From c03aa16b78027c0adce663328ac3a28099547ada Mon Sep 17 00:00:00 2001 From: "linrunqi.lrq" Date: Tue, 15 Oct 2024 10:21:34 +0800 Subject: [PATCH] =?UTF-8?q?Merge=20opensource=20main=20=E6=9C=AC=E6=AC=A1?= =?UTF-8?q?=E4=BB=A3=E7=A0=81=E8=AF=84=E5=AE=A1=E4=B8=BB=E8=A6=81=E6=B6=89?= =?UTF-8?q?=E5=8F=8A=E6=B7=BB=E5=8A=A0=E6=96=B0=E7=9A=84=E4=BE=9D=E8=B5=96?= =?UTF-8?q?=E3=80=81=E5=BC=95=E5=85=A5=E6=96=B0=E7=9A=84=E5=8F=98=E9=87=8F?= =?UTF-8?q?=E3=80=81=E4=BF=AE=E6=94=B9=E9=94=81=E7=9A=84=E4=BD=BF=E7=94=A8?= =?UTF-8?q?=E3=80=81=E5=A2=9E=E5=8A=A0=E5=92=8C=E4=BF=AE=E6=94=B9=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E5=8A=9F=E8=83=BD=E3=80=81=E8=B0=83=E6=95=B4=E9=85=8D?= =?UTF-8?q?=E7=BD=AE=E5=A4=84=E7=90=86=E9=80=BB=E8=BE=91=E3=80=81=E6=9B=B4?= =?UTF-8?q?=E6=96=B0=E6=B5=8B=E8=AF=95=E7=94=A8=E4=BE=8B=E4=BB=A5=E9=80=82?= =?UTF-8?q?=E5=BA=94=E4=BB=A3=E7=A0=81=E7=BB=93=E6=9E=84=E5=92=8C=E9=80=BB?= =?UTF-8?q?=E8=BE=91=E7=9A=84=E5=8F=98=E5=8C=96=EF=BC=8C=E4=BB=A5=E5=8F=8A?= =?UTF-8?q?=E4=B8=80=E7=B3=BB=E5=88=97=E7=9A=84=E4=BB=A3=E7=A0=81=E4=BC=98?= =?UTF-8?q?=E5=8C=96=E5=92=8C=E9=87=8D=E6=9E=84=EF=BC=8C=E7=89=B9=E5=88=AB?= =?UTF-8?q?=E6=98=AF=E5=A2=9E=E5=8A=A0=E4=BA=86=E5=AF=B9=E9=85=8D=E7=BD=AE?= =?UTF-8?q?=E7=AE=A1=E7=90=86=E5=92=8C=E6=8C=87=E6=A0=87=E7=BB=9F=E8=AE=A1?= =?UTF-8?q?=E7=9A=84=E5=A4=84=E7=90=86=EF=BC=8C=E5=8A=A0=E5=BC=BA=E4=BA=86?= =?UTF-8?q?=E6=A8=A1=E5=9D=97=E9=97=B4=E7=9A=84=E8=A7=A3=E8=80=A6=E5=92=8C?= =?UTF-8?q?=E4=BB=A3=E7=A0=81=E7=9A=84=E5=8F=AF=E7=BB=B4=E6=8A=A4=E6=80=A7?= =?UTF-8?q?=E3=80=82=20Link:=20https://code.alibaba-inc.com/sls/ilogtail/c?= =?UTF-8?q?odereview/18705573=20*=20add=20setpipelineforitems=20for=20proc?= =?UTF-8?q?ess=20queues=20(#1769)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add metric for runner (#1766) * fix deadlock in full drain mode on config update (#1776) * remove InputStream (#1777) * fix: do not use gtid if gtid is disabled (#1781) Fixed an issue where the input_canal plugin used GTID as the sync method even when gtid_enabled was set to false. This led to syncing from the beginning due to receiving an empty GTID set. Syncing from the beginning is usually undesirable as it consumes significant CPU resources on the server side and deviates from the default plugin behavior. This commit ensures that the plugin respects the gtid_enabled switch and the gtid_mode query result. * fix: gometric v2 not started and shared extensions (#1782) * fix metrics v2 type assertion and shared extension initialiazation * restore go mod * test: calculate incremental unittest coverage (#1783) * fix: given more time to fix ut failed of flusher_prometheus (#1784) * Flusher supports new metricstore endpoint, which optimizes time series data storage and query (#1731) * rename self monitor metrics (#1779) * c++ part * go part * fix go bug * rename cpp metrics * rename go metrics * change parameters name * delete unused metrics * change name * delete node id * change name * fix build * change name * 1. support honor_timestamps, 2. support honor_labels, apply global mTags to MetricEvent in relabel phase 3. optimization relabel, manage MetricEvent tags directly (#1742) * update: remove enable-compression config temporarily (#1785) * update: check pipeline queue before scrape, try again after 1 second if is not valid to push (#1757) * Add a default content key for the service_kafka plugin (#1754) * add K8s meta self metrics (#1765) * support k8s meta to collect more resources * feat: reload pipeline config independently (#1713) * feat: reload Go pipeline config independently * fix unittest * fix unittest * fix * fix * fix * fix * fix * unittest * fix * fix * fix * fix * fix * self telemetry * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * test: calculate incremental unittest coverage * fix * fix * fix * fix * fix conflict * fix log * feat: prometheus self monitor metrics (#1796) * feat:iLogtail renamed to LoongCollector and directory layout changes (#1791) ```plaintext / └── /opt/loongcollector/ ├── loongcollector ├── libPluginAdapter.so ├── libPluginBase.so ├── ca-bundle.crt ├── plugins │ └── custom plugins ├── dump (used exclusively by the service_http_server input plugin) ├── thirdparty │ ├── jvm │ └── telegraf ├── conf/ │ ├── scripts │ ├── apsara_log_conf.json │ ├── plugin_logger.xml │ ├── user_defined_id │ ├── authorization.json │ ├── pipelineconfig/ │ │ ├── local/ │ │ │ └── collect_stdout.json │ │ └── remote/ │ │ └── collect_file.json │ └── instanceconfig/ │ ├── local/ │ │ ├── ebpf.json │ │ └── loongcollector_config.json │ └── remote/ │ ├── region.json │ └── resource.json ├── data/ │ ├── file_check_point │ ├── exactly_once_checkpoint/ │ ├── go_plugin_checkpoint/ │ ├── docker_path_config.json │ ├── send_buffer_file_xxxxxxxxxxxx │ └── backtrace.dat ├── log/ │ ├── loongcollector.log │ ├── loongcollector.log.1 │ ├── go_plugin.log │ ├── go_plugin.log.1 │ ├── logger_initialization.log │ └── snapshot/ └── run/ ├── loongcollector.pid ├── inotify_watcher_dirs └── app_info.json ``` * Support concurrency isolation between different sender queues (#1786) * Merge remote-tracking branch 'upstream/main' into HEAD * Internal code adaptation to ConcurrencyLimiter changes --- .devcontainer/Dockerfile | 4 +- .github/workflows/build-core-ut.yaml | 4 +- .github/workflows/static-check.yaml | 2 +- CHANGELOG.md | 1 + Makefile | 26 +- core/CMakeLists.txt | 6 +- core/app_config/AppConfig.cpp | 360 ++++++++++++++++-- core/app_config/AppConfig.h | 39 +- core/application/Application.cpp | 45 ++- core/checkpoint/AdhocCheckpointManager.cpp | 13 +- core/checkpoint/AdhocCheckpointManager.h | 1 - core/checkpoint/CheckPointManager.cpp | 11 +- core/checkpoint/CheckpointManagerV2.cpp | 3 +- core/common/Constants.cpp | 2 + core/common/Constants.h | 2 + core/common/CrashBackTraceUtil.cpp | 15 +- core/common/FileSystemUtil.cpp | 11 - core/common/FileSystemUtil.h | 3 - core/common/LogtailCommonFlags.cpp | 11 +- core/common/LogtailCommonFlags.h | 6 + core/common/RuntimeUtil.cpp | 16 +- core/common/compression/Compressor.cpp | 24 +- core/common/compression/Compressor.h | 8 +- core/common/compression/CompressorFactory.cpp | 10 +- core/common/timer/Timer.cpp | 5 +- core/common/timer/Timer.h | 1 + core/config/ConfigDiff.h | 4 +- core/config/PipelineConfig.cpp | 40 -- core/config/provider/ConfigProvider.cpp | 8 +- core/config/watcher/ConfigWatcher.cpp | 59 +-- .../ContainerDiscoveryOptions.h | 2 +- core/ebpf/SourceManager.cpp | 1 + core/file_server/ConfigManager.cpp | 1 + core/file_server/EventDispatcher.cpp | 4 +- core/file_server/FileServer.cpp | 9 +- core/file_server/FileServer.h | 9 +- .../event_handler/EventHandler.cpp | 10 +- .../event_handler/HistoryFileImporter.cpp | 10 +- core/file_server/event_handler/LogInput.cpp | 33 +- core/file_server/event_handler/LogInput.h | 6 +- core/file_server/polling/PollingDirFile.cpp | 19 +- core/file_server/polling/PollingDirFile.h | 4 +- core/file_server/polling/PollingModify.cpp | 7 +- core/file_server/polling/PollingModify.h | 2 +- core/file_server/reader/LogFileReader.cpp | 26 +- core/file_server/reader/LogFileReader.h | 9 +- core/go_pipeline/CMakeLists.txt | 4 +- core/go_pipeline/LogtailPlugin.cpp | 135 +++++-- core/go_pipeline/LogtailPlugin.h | 43 +-- core/go_pipeline/LogtailPluginAdapter.cpp | 2 +- core/logger/Logger.cpp | 46 +-- core/logtail.cpp | 7 +- core/logtail_windows.cpp | 7 +- core/models/PipelineEventGroup.h | 1 - core/monitor/LogFileProfiler.cpp | 9 +- core/monitor/LogtailAlarm.cpp | 7 +- core/monitor/LogtailAlarm.h | 8 +- core/monitor/LogtailMetric.cpp | 40 +- core/monitor/LogtailMetric.h | 14 +- core/monitor/MetricConstants.cpp | 162 -------- core/monitor/MetricConstants.h | 155 -------- core/monitor/MetricExportor.cpp | 21 +- core/monitor/Monitor.cpp | 54 +-- core/monitor/Monitor.h | 2 - .../monitor/metric_constants/AgentMetrics.cpp | 41 ++ .../metric_constants/ComponentMetrics.cpp | 74 ++++ .../metric_constants/MetricConstants.h | 267 +++++++++++++ .../metric_constants/PipelineMetrics.cpp | 33 ++ .../metric_constants/PluginMetrics.cpp | 117 ++++++ .../metric_constants/RunnerMetrics.cpp | 65 ++++ .../network/sources/ebpf/EBPFWrapper.cpp | 11 +- .../network/sources/pcap/PCAPWrapper.cpp | 1 + core/options.cmake | 4 +- core/pipeline/Pipeline.cpp | 80 ++-- core/pipeline/Pipeline.h | 30 +- core/pipeline/PipelineManager.cpp | 139 ++----- core/pipeline/PipelineManager.h | 18 +- core/pipeline/batch/Batcher.h | 62 +-- core/pipeline/limiter/ConcurrencyLimiter.cpp | 64 +++- core/pipeline/limiter/ConcurrencyLimiter.h | 55 ++- core/pipeline/plugin/PluginRegistry.cpp | 7 +- .../plugin/instance/FlusherInstance.cpp | 14 +- .../plugin/instance/FlusherInstance.h | 4 +- .../plugin/instance/InputInstance.cpp | 2 +- .../pipeline/plugin/instance/PluginInstance.h | 8 +- .../plugin/instance/ProcessorInstance.cpp | 30 +- .../plugin/instance/ProcessorInstance.h | 10 +- core/pipeline/plugin/interface/Flusher.cpp | 27 +- core/pipeline/plugin/interface/Flusher.h | 8 +- core/pipeline/plugin/interface/Plugin.h | 24 +- core/pipeline/queue/BoundedProcessQueue.cpp | 23 +- core/pipeline/queue/BoundedProcessQueue.h | 6 +- core/pipeline/queue/BoundedQueueInterface.h | 2 +- .../queue/BoundedSenderQueueInterface.cpp | 35 +- .../queue/BoundedSenderQueueInterface.h | 20 +- core/pipeline/queue/CircularProcessQueue.cpp | 24 +- core/pipeline/queue/CircularProcessQueue.h | 3 +- .../queue/ExactlyOnceQueueManager.cpp | 30 +- core/pipeline/queue/ExactlyOnceQueueManager.h | 7 +- .../pipeline/queue/ExactlyOnceSenderQueue.cpp | 77 +++- core/pipeline/queue/ExactlyOnceSenderQueue.h | 7 +- core/pipeline/queue/ProcessQueueInterface.cpp | 2 +- core/pipeline/queue/ProcessQueueInterface.h | 8 +- core/pipeline/queue/ProcessQueueItem.h | 12 + core/pipeline/queue/ProcessQueueManager.cpp | 18 +- core/pipeline/queue/ProcessQueueManager.h | 4 +- core/pipeline/queue/QueueInterface.h | 24 +- core/pipeline/queue/SenderQueue.cpp | 92 +++-- core/pipeline/queue/SenderQueue.h | 8 +- core/pipeline/queue/SenderQueueItem.h | 37 +- core/pipeline/queue/SenderQueueManager.cpp | 49 ++- core/pipeline/queue/SenderQueueManager.h | 10 +- core/pipeline/route/Router.cpp | 14 +- core/pipeline/route/Router.h | 2 +- core/pipeline/serializer/SLSSerializer.cpp | 8 +- core/pipeline/serializer/Serializer.h | 40 +- .../flusher/blackhole/FlusherBlackHole.cpp | 2 +- core/plugin/flusher/sls/DiskBufferWriter.cpp | 25 +- core/plugin/flusher/sls/DiskBufferWriter.h | 1 - core/plugin/flusher/sls/FlusherSLS.cpp | 172 +++++++-- core/plugin/flusher/sls/FlusherSLS.h | 19 +- core/plugin/flusher/sls/SLSClientManager.cpp | 4 +- core/plugin/input/InputContainerStdio.cpp | 15 +- core/plugin/input/InputContainerStdio.h | 2 +- core/plugin/input/InputFile.cpp | 18 +- core/plugin/input/InputFile.h | 2 +- core/plugin/input/InputPrometheus.cpp | 3 +- core/plugin/input/InputPrometheus.h | 2 +- core/plugin/input/input.cmake | 7 - .../processor/ProcessorDesensitizeNative.cpp | 23 +- .../processor/ProcessorDesensitizeNative.h | 5 +- .../processor/ProcessorFilterNative.cpp | 9 +- core/plugin/processor/ProcessorFilterNative.h | 3 - .../processor/ProcessorParseApsaraNative.cpp | 26 +- .../processor/ProcessorParseApsaraNative.h | 11 +- .../ProcessorParseDelimiterNative.cpp | 33 +- .../processor/ProcessorParseDelimiterNative.h | 9 +- .../processor/ProcessorParseJsonNative.cpp | 22 +- .../processor/ProcessorParseJsonNative.h | 8 +- .../processor/ProcessorParseRegexNative.cpp | 22 +- .../processor/ProcessorParseRegexNative.h | 10 +- .../ProcessorParseTimestampNative.cpp | 24 +- .../processor/ProcessorParseTimestampNative.h | 11 +- core/plugin/processor/ProcessorSPL.cpp | 2 +- .../ProcessorMergeMultilineLogNative.cpp | 14 +- .../inner/ProcessorMergeMultilineLogNative.h | 4 +- .../ProcessorParseContainerLogNative.cpp | 28 +- .../inner/ProcessorParseContainerLogNative.h | 8 +- .../inner/ProcessorPromParseMetricNative.cpp | 19 +- .../inner/ProcessorPromParseMetricNative.h | 5 +- .../ProcessorPromRelabelMetricNative.cpp | 178 +++++---- .../inner/ProcessorPromRelabelMetricNative.h | 15 +- ...ProcessorSplitMultilineLogStringNative.cpp | 24 +- .../ProcessorSplitMultilineLogStringNative.h | 6 +- .../EnterpriseProfileSender.cpp | 2 +- core/prometheus/Constants.h | 9 +- core/prometheus/PromSelfMonitor.cpp | 71 ++++ core/prometheus/PromSelfMonitor.h | 38 ++ core/prometheus/PrometheusInputRunner.cpp | 93 ++++- core/prometheus/PrometheusInputRunner.h | 16 +- core/prometheus/Utils.cpp | 62 ++- core/prometheus/Utils.h | 3 + core/prometheus/async/PromFuture.cpp | 22 +- core/prometheus/async/PromFuture.h | 11 +- core/prometheus/async/PromHttpRequest.cpp | 15 +- core/prometheus/async/PromHttpRequest.h | 6 +- core/prometheus/labels/Labels.cpp | 166 +++----- core/prometheus/labels/Labels.h | 54 +-- core/prometheus/labels/Relabel.cpp | 202 +++++----- core/prometheus/labels/Relabel.h | 40 +- core/prometheus/labels/TextParser.cpp | 32 +- core/prometheus/labels/TextParser.h | 13 +- core/prometheus/schedulers/BaseScheduler.cpp | 8 +- core/prometheus/schedulers/BaseScheduler.h | 6 +- core/prometheus/schedulers/ScrapeConfig.cpp | 177 ++++----- core/prometheus/schedulers/ScrapeConfig.h | 12 +- .../prometheus/schedulers/ScrapeScheduler.cpp | 77 +++- core/prometheus/schedulers/ScrapeScheduler.h | 14 +- .../schedulers/TargetSubscriberScheduler.cpp | 64 +++- .../schedulers/TargetSubscriberScheduler.h | 11 +- core/protobuf/sls/logtail_buffer_meta.proto | 1 + core/protobuf/sls/sls_logs.proto | 6 + core/runner/FlusherRunner.cpp | 25 +- core/runner/FlusherRunner.h | 10 + core/runner/LogProcess.h | 67 ---- .../{LogProcess.cpp => ProcessorRunner.cpp} | 184 +++------ core/runner/ProcessorRunner.h | 69 ++++ core/runner/sink/http/HttpSink.cpp | 48 ++- core/runner/sink/http/HttpSink.h | 10 + core/sdk/Client.cpp | 57 ++- core/sdk/Client.h | 48 ++- core/sdk/Common.cpp | 1 + core/sdk/Common.h | 1 + core/sdk/CurlImp.cpp | 6 +- .../app_config/AppConfigUnittestLegal.cpp | 22 +- core/unittest/batch/BatcherUnittest.cpp | 32 +- .../batch/TimeoutFlushManagerUnittest.cpp | 2 +- .../AdhocCheckpointManagerUnittest.cpp | 2 +- .../checkpoint/CheckpointManagerUnittest.cpp | 2 +- .../CheckpointManagerV2Unittest.cpp | 2 +- .../compression/CompressorFactoryUnittest.cpp | 10 +- .../compression/CompressorUnittest.cpp | 14 +- core/unittest/config/ConfigMatchUnittest.cpp | 4 +- core/unittest/config/ConfigUpdateUnittest.cpp | 7 - .../unittest/config/ConfigUpdatorUnittest.cpp | 6 +- .../unittest/config/ConfigWatcherUnittest.cpp | 12 +- core/unittest/flusher/FlusherSLSUnittest.cpp | 80 ++-- .../input/InputContainerStdioUnittest.cpp | 14 +- core/unittest/input/InputFileUnittest.cpp | 24 +- .../input/InputPrometheusUnittest.cpp | 60 ++- .../monitor/PluginMetricManagerUnittest.cpp | 14 +- core/unittest/pipeline/CMakeLists.txt | 5 + .../pipeline/ConcurrencyLimiterUnittest.cpp | 100 +++++ core/unittest/pipeline/PipelineUnittest.cpp | 78 +++- core/unittest/plugin/CMakeLists.txt | 5 + .../plugin/FlusherInstanceUnittest.cpp | 10 +- core/unittest/plugin/FlusherUnittest.cpp | 86 +++++ .../unittest/plugin/InputInstanceUnittest.cpp | 8 +- core/unittest/plugin/PluginMock.h | 4 +- .../plugin/PluginRegistryUnittest.cpp | 12 +- .../plugin/ProcessorInstanceUnittest.cpp | 6 +- .../plugin/StaticFlusherCreatorUnittest.cpp | 4 +- .../plugin/StaticInputCreatorUnittest.cpp | 4 +- .../plugin/StaticProcessorCreatorUnittest.cpp | 4 +- .../processor/ParseContainerLogBenchmark.cpp | 4 +- .../ProcessorDesensitizeNativeUnittest.cpp | 6 +- .../ProcessorFilterNativeUnittest.cpp | 10 +- ...ocessorMergeMultilineLogNativeUnittest.cpp | 78 ++-- .../ProcessorParseApsaraNativeUnittest.cpp | 47 +-- ...ocessorParseContainerLogNativeUnittest.cpp | 34 +- .../ProcessorParseDelimiterNativeUnittest.cpp | 51 ++- .../ProcessorParseJsonNativeUnittest.cpp | 45 +-- .../ProcessorParseRegexNativeUnittest.cpp | 75 ++-- .../ProcessorParseTimestampNativeUnittest.cpp | 47 +-- ...ProcessorPromParseMetricNativeUnittest.cpp | 2 + ...ocessorPromRelabelMetricNativeUnittest.cpp | 59 ++- .../ProcessorSplitLogStringNativeUnittest.cpp | 2 +- ...rSplitMultilineLogStringNativeUnittest.cpp | 80 ++-- core/unittest/prometheus/CMakeLists.txt | 5 +- core/unittest/prometheus/LabelsUnittest.cpp | 113 +----- core/unittest/prometheus/PromAsynUnittest.cpp | 3 +- .../prometheus/PromSelfMonitorUnittest.cpp | 56 +++ .../PrometheusInputRunnerUnittest.cpp | 98 +++-- core/unittest/prometheus/RelabelUnittest.cpp | 357 ++++++++++------- .../prometheus/ScrapeConfigUnittest.cpp | 20 +- .../prometheus/ScrapeSchedulerUnittest.cpp | 56 ++- .../TargetSubscriberSchedulerUnittest.cpp | 11 +- .../prometheus/TextParserUnittest.cpp | 34 ++ core/unittest/prometheus/UtilsUnittest.cpp | 44 +++ .../queue/BoundedProcessQueueUnittest.cpp | 43 ++- .../queue/CircularProcessQueueUnittest.cpp | 49 ++- .../queue/ExactlyOnceQueueManagerUnittest.cpp | 78 +++- .../queue/ExactlyOnceSenderQueueUnittest.cpp | 40 +- .../queue/ProcessQueueManagerUnittest.cpp | 154 ++++++-- .../queue/SenderQueueManagerUnittest.cpp | 49 +-- core/unittest/queue/SenderQueueUnittest.cpp | 63 +-- core/unittest/reader/ForceReadUnittest.cpp | 1 + core/unittest/route/RouterUnittest.cpp | 10 +- .../unittest/sender/FlusherRunnerUnittest.cpp | 4 +- core/unittest/sender/SenderUnittest.cpp | 77 ++-- .../serializer/SLSSerializerUnittest.cpp | 2 +- .../serializer/SerializerUnittest.cpp | 16 +- core/unittest/spl/SplBenchmark.cpp | 2 +- core/unittest/spl/SplUnittest.cpp | 2 +- docker/Dockerfile.e2e-test | 6 +- docker/Dockerfile_build | 2 +- docker/Dockerfile_coverage | 2 +- docker/Dockerfile_development_part | 27 +- docker/Dockerfile_e2e | 27 +- docker/Dockerfile_production | 29 +- docker/Dockerfile_production_minimal | 28 +- ...config.json => loongcollector_config.json} | 0 ...config.json => loongcollector_config.json} | 0 go.mod | 2 +- go.sum | 4 +- pkg/config/global_config.go | 36 +- pkg/flags/flags.go | 2 + pkg/helper/docker_center.go | 2 +- pkg/helper/docker_cri_adapter.go | 2 +- pkg/helper/dumper.go | 9 +- pkg/helper/dumper_test.go | 6 +- pkg/helper/k8smeta/k8s_meta_cache.go | 11 + pkg/helper/k8smeta/k8s_meta_http_server.go | 32 +- pkg/helper/k8smeta/k8s_meta_manager.go | 137 +++++-- pkg/helper/log_helper.go | 6 +- pkg/helper/log_helper_test.go | 2 +- pkg/helper/self_metrics_agent_constants.go | 44 +++ pkg/helper/self_metrics_plugin_constants.go | 101 +++++ pkg/helper/self_metrics_runner_constants.go | 33 ++ pkg/helper/self_metrics_v2_imp.go | 38 ++ pkg/helper/self_metrics_vector_imp.go | 14 + pkg/logger/logger.go | 18 +- pkg/logger/logger_test.go | 16 +- ...{PluginAdapter.dll => GoPluginAdapter.dll} | Bin pkg/logtail/libGoPluginAdapter.so | Bin 0 -> 36000 bytes pkg/logtail/libPluginAdapter.so | Bin 20768 -> 0 bytes pkg/logtail/logtail.go | 4 +- pkg/pipeline/context.go | 21 - pkg/pipeline/plugin.go | 2 - pkg/pipeline/self_metrics.go | 1 + pkg/protocol/decoder/influxdb/decoder_test.go | 2 +- .../decoder/opentelemetry/decoder_test.go | 2 +- pkg/util/util.go | 9 - plugin_main/plugin_export.go | 139 +++---- plugin_main/plugin_http.go | 11 +- plugin_main/plugin_main.go | 10 +- plugin_main/plugin_main_test.go | 90 +++-- pluginmanager/always_online_manager.go | 120 ------ pluginmanager/always_online_manager_test.go | 71 ---- pluginmanager/checkpoint_manager.go | 58 +-- pluginmanager/checkpoint_manager_test.go | 32 +- pluginmanager/config_update_test.go | 83 ++-- pluginmanager/container_config_manager.go | 19 +- .../container_config_manager_test.go | 9 +- pluginmanager/context_imp.go | 2 +- pluginmanager/logstore_config.go | 208 ++++------ pluginmanager/logstore_config_test.go | 52 ++- pluginmanager/metric_export.go | 13 +- pluginmanager/plugin_manager.go | 179 +++++---- pluginmanager/plugin_manager_test.go | 20 +- pluginmanager/plugin_runner.go | 2 + pluginmanager/plugin_runner_v1.go | 21 +- pluginmanager/plugin_runner_v2.go | 27 +- pluginmanager/plugin_wrapper.go | 85 ++--- pluginmanager/plugin_wrapper_aggregator_v1.go | 33 +- pluginmanager/plugin_wrapper_aggregator_v2.go | 38 +- pluginmanager/plugin_wrapper_flusher_v1.go | 20 +- pluginmanager/plugin_wrapper_flusher_v2.go | 21 +- pluginmanager/plugin_wrapper_metric_v1.go | 65 ++-- pluginmanager/plugin_wrapper_metric_v2.go | 12 +- pluginmanager/plugin_wrapper_processor_v1.go | 14 +- pluginmanager/plugin_wrapper_processor_v2.go | 23 +- pluginmanager/plugin_wrapper_service_v1.go | 67 ++-- pluginmanager/plugin_wrapper_service_v2.go | 10 +- pluginmanager/self_telemetry_alarm.go | 2 + pluginmanager/self_telemetry_statistics.go | 2 + .../prometheus/flusher_prometheus_test.go | 3 + plugins/flusher/sls/flusher_sls.go | 8 +- plugins/input/canal/input_canal.go | 26 +- plugins/input/command/input_command.go | 2 +- .../docker/logmeta/metric_container_info.go | 8 +- .../docker/stdout/input_docker_stdout.go | 6 +- plugins/input/jmxfetch/jmxfetch.go | 2 +- plugins/input/kafka/input_kafka.go | 7 +- .../input/kubernetesmetav2/meta_collector.go | 157 ++++---- .../kubernetesmetav2/meta_collector_const.go | 22 +- .../kubernetesmetav2/meta_collector_core.go | 10 +- .../kubernetesmetav2/meta_collector_test.go | 48 +++ .../input/kubernetesmetav2/service_meta.go | 21 +- plugins/input/mysql/mysql.go | 4 +- .../opentelemetry/service_otlp_v1_test.go | 2 +- plugins/input/prometheus/input_prometheus.go | 4 +- plugins/input/rdb/rdb.go | 4 +- plugins/input/telegraf/input_telegraf.go | 4 +- plugins/processor/anchor/anchor.go | 2 +- .../droplastkey/processor_drop_last_key.go | 2 +- .../processor/encrypt/processor_encrypt.go | 10 - .../processor_fields_with_condition.go | 9 +- .../keyregex/processor_filter_key_regex.go | 14 +- .../filter/regex/processor_filter_regex.go | 14 +- plugins/processor/json/processor_json.go | 7 +- .../processor/pickkey/processor_pick_key.go | 11 +- .../ratelimit/processor_rate_limit.go | 11 +- .../ratelimit/processor_rate_limit_test.go | 5 - plugins/processor/regex/regex.go | 2 +- .../stringreplace/processor_string_replace.go | 2 +- plugins/test/common.go | 22 +- scripts/check_glibc.sh | 8 +- scripts/dist.sh | 20 +- scripts/docker_build.sh | 4 +- scripts/gen_build_scripts.sh | 20 +- ...l_control.sh => loongcollector_control.sh} | 62 +-- scripts/plugin_build.sh | 6 +- scripts/plugin_gocbuild.sh | 2 +- scripts/update_version.sh | 6 +- scripts/upgrade_adapter_lib.sh | 2 +- scripts/windows32_build.bat | 10 +- scripts/windows64_build.bat | 10 +- .../case.feature | 104 ----- .../case.feature | 76 ---- .../case.feature | 166 -------- .../block_holdon_resume/case.feature | 72 ---- .../input_container_stdio/case.feature | 2 +- .../case.feature | 2 +- .../input_docker_rawstdout/case.feature | 2 +- .../case.feature | 2 +- .../input_docker_stdout/case.feature | 2 +- .../case.feature | 2 +- .../load_same_block_config/case.feature | 154 -------- .../normal_holdon_resume/case.feature | 46 --- .../recover_holdon_resume/case.feature | 69 ---- test/engine/control/config.go | 2 +- test/engine/setup/dockercompose/compose.go | 21 +- test/engine/verify/sys_logtail_log.go | 4 +- tools/coverage-diff/main.py | 129 +++++-- 395 files changed, 6975 insertions(+), 5188 deletions(-) delete mode 100644 core/monitor/MetricConstants.cpp delete mode 100644 core/monitor/MetricConstants.h create mode 100644 core/monitor/metric_constants/AgentMetrics.cpp create mode 100644 core/monitor/metric_constants/ComponentMetrics.cpp create mode 100644 core/monitor/metric_constants/MetricConstants.h create mode 100644 core/monitor/metric_constants/PipelineMetrics.cpp create mode 100644 core/monitor/metric_constants/PluginMetrics.cpp create mode 100644 core/monitor/metric_constants/RunnerMetrics.cpp create mode 100644 core/prometheus/PromSelfMonitor.cpp create mode 100644 core/prometheus/PromSelfMonitor.h delete mode 100644 core/runner/LogProcess.h rename core/runner/{LogProcess.cpp => ProcessorRunner.cpp} (65%) create mode 100644 core/runner/ProcessorRunner.h create mode 100644 core/unittest/pipeline/ConcurrencyLimiterUnittest.cpp create mode 100644 core/unittest/plugin/FlusherUnittest.cpp create mode 100644 core/unittest/prometheus/PromSelfMonitorUnittest.cpp rename example_config/quick_start/{ilogtail_config.json => loongcollector_config.json} (100%) rename example_config/start_with_docker/{ilogtail_config.json => loongcollector_config.json} (100%) create mode 100644 pkg/helper/self_metrics_agent_constants.go create mode 100644 pkg/helper/self_metrics_plugin_constants.go create mode 100644 pkg/helper/self_metrics_runner_constants.go rename pkg/logtail/{PluginAdapter.dll => GoPluginAdapter.dll} (100%) create mode 100755 pkg/logtail/libGoPluginAdapter.so delete mode 100755 pkg/logtail/libPluginAdapter.so delete mode 100644 pluginmanager/always_online_manager.go delete mode 100644 pluginmanager/always_online_manager_test.go create mode 100644 plugins/input/kubernetesmetav2/meta_collector_test.go rename scripts/{ilogtail_control.sh => loongcollector_control.sh} (70%) delete mode 100644 test/e2e/test_cases/alwaysonline_noraml_config_exit_false/case.feature delete mode 100644 test/e2e/test_cases/alwaysonline_noraml_config_exit_true/case.feature delete mode 100644 test/e2e/test_cases/alwaysonline_noraml_config_resume/case.feature delete mode 100644 test/e2e/test_cases/block_holdon_resume/case.feature delete mode 100644 test/e2e/test_cases/load_same_block_config/case.feature delete mode 100644 test/e2e/test_cases/normal_holdon_resume/case.feature delete mode 100644 test/e2e/test_cases/recover_holdon_resume/case.feature diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 4cebf02c58..f8ad55e7c6 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -29,7 +29,7 @@ RUN wget http://mirrors.ustc.edu.cn/gnu/libc/glibc-2.18.tar.gz && \ rm -fr glibc-2.18* # install python3.8 -RUN cd /opt && curl -O https://cdn.npmmirror.com/binaries/python/3.8.12/Python-3.8.12.tgz && \ +RUN cd /opt && curl -O https://mirrors.aliyun.com/python-release/source/Python-3.8.12.tgz && \ tar -zxvf Python-3.8.12.tgz && cd Python-3.8.12 && \ mkdir /usr/local/python3 && \ ./configure --prefix=/usr/local/python3 && \ @@ -37,7 +37,7 @@ RUN cd /opt && curl -O https://cdn.npmmirror.com/binaries/python/3.8.12/Python-3 cp /usr/local/python3/bin/python3.8 /usr/bin/python3 # install gcovr RUN python3 -m pip install --upgrade pip -RUN cp /usr/local/python3/bin/pip3 /usr/bin/pip3 && pip3 install gcovr==7.0 +RUN cp /usr/local/python3/bin/pip3 /usr/bin/pip3 && pip3 config set global.index-url https://mirrors.aliyun.com/pypi/simple/ && pip3 install gcovr==7.0 RUN cp /usr/local/python3/bin/gcovr /usr/bin/gcovr # Create the user diff --git a/.github/workflows/build-core-ut.yaml b/.github/workflows/build-core-ut.yaml index 16b9ed8ee1..54dc3203d2 100644 --- a/.github/workflows/build-core-ut.yaml +++ b/.github/workflows/build-core-ut.yaml @@ -82,7 +82,7 @@ jobs: run: make unittest_core - name: Unit Test Coverage - run: docker build -t unittest_coverage -f ./docker/Dockerfile_coverage . && docker run -v $(pwd):$(pwd) unittest_coverage bash -c "cd $(pwd)/core && gcovr --root . --lcov coverage.lcov --txt coverage.txt -e \".*sdk.*\" -e \".*observer.*\" -e \".*protobuf.*\" -e \".*unittest.*\" -e \".*config_server.*\" -e \".*fuse.*\" -e \".*go_pipeline.*\"" + run: docker build -t unittest_coverage -f ./docker/Dockerfile_coverage . && docker run -v $(pwd):$(pwd) unittest_coverage bash -c "cd $(pwd)/core && gcovr --root . --json coverage.json --json-summary-pretty --json-summary summary.json -e \".*sdk.*\" -e \".*observer.*\" -e \".*logger.*\" -e \".*unittest.*\" -e \".*config_server.*\" -e \".*go_pipeline.*\" -e \".*application.*\" -e \".*protobuf.*\" -e \".*runner.*\"" - name: Setup Python3.10 uses: actions/setup-python@v5 @@ -90,7 +90,7 @@ jobs: python-version: "3.10" - name: Report code coverage - run: python3 tools/coverage-diff/main.py core/coverage.txt + run: python3 tools/coverage-diff/main.py --path core/coverage.json --summary core/summary.json result: runs-on: arc-runner-set-ilogtail diff --git a/.github/workflows/static-check.yaml b/.github/workflows/static-check.yaml index cbeaa8c8d4..db54d1b7a1 100644 --- a/.github/workflows/static-check.yaml +++ b/.github/workflows/static-check.yaml @@ -72,7 +72,7 @@ jobs: - name: Copy Lib if: matrix.runner == 'ubuntu' - run: sudo cp ./pkg/logtail/libPluginAdapter.so /usr/lib/ + run: sudo cp ./pkg/logtail/libGoPluginAdapter.so /usr/lib/ - name: Check License Header if: matrix.runner == 'ubuntu' diff --git a/CHANGELOG.md b/CHANGELOG.md index d8213df816..dc9bd559c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,3 +36,4 @@ your changes, such as: - [public] [both] [fix] do not read env config from exited containers ## [Unreleased] +- [inner] [both] [updated] Support SLS Metricstore output \ No newline at end of file diff --git a/Makefile b/Makefile index 722f547759..a2b2036178 100644 --- a/Makefile +++ b/Makefile @@ -13,10 +13,10 @@ # limitations under the License. .DEFAULT_GOAL := all -VERSION ?= 2.0.0 +VERSION ?= 0.0.1 DOCKER_PUSH ?= false -DOCKER_REPOSITORY ?= aliyun/ilogtail -BUILD_REPOSITORY ?= aliyun/ilogtail_build +DOCKER_REPOSITORY ?= aliyun/loongcollector +BUILD_REPOSITORY ?= aliyun/loongcollector_build GENERATED_HOME ?= generated_files PLUGINS_CONFIG_FILE ?= plugins.yml,external_plugins.yml GO_MOD_FILE ?= go.mod @@ -68,9 +68,9 @@ GO_BUILD_FLAGS = -v LICENSE_COVERAGE_FILE=license_coverage.txt OUT_DIR = output DIST_DIR = dist -PACKAGE_DIR = ilogtail-$(VERSION) +PACKAGE_DIR = loongcollector-$(VERSION) EXTERNAL_DIR = external -DIST_FILE = $(DIST_DIR)/ilogtail-$(VERSION).linux-$(ARCH).tar.gz +DIST_FILE = $(DIST_DIR)/loongcollector-$(VERSION).linux-$(ARCH).tar.gz .PHONY: tools tools: @@ -137,8 +137,8 @@ upgrade_adapter_lib: .PHONY: plugin_main plugin_main: clean ./scripts/plugin_build.sh mod default $(OUT_DIR) $(VERSION) $(PLUGINS_CONFIG_FILE) $(GO_MOD_FILE) - cp pkg/logtail/libPluginAdapter.so $(OUT_DIR)/libPluginAdapter.so - cp pkg/logtail/PluginAdapter.dll $(OUT_DIR)/PluginAdapter.dll + cp pkg/logtail/libGoPluginAdapter.so $(OUT_DIR)/libGoPluginAdapter.so + cp pkg/logtail/GoPluginAdapter.dll $(OUT_DIR)/GoPluginAdapter.dll .PHONY: plugin_local plugin_local: @@ -192,8 +192,8 @@ unittest_e2e_engine: clean gocdocker .PHONY: unittest_plugin unittest_plugin: clean import_plugins - cp pkg/logtail/libPluginAdapter.so ./plugin_main - cp pkg/logtail/PluginAdapter.dll ./plugin_main + cp pkg/logtail/libGoPluginAdapter.so ./plugin_main + cp pkg/logtail/GoPluginAdapter.dll ./plugin_main mv ./plugins/input/prometheus/input_prometheus.go ./plugins/input/prometheus/input_prometheus.go.bak go test $$(go list ./...|grep -Ev "telegraf|external|envconfig|(input\/prometheus)|(input\/syslog)"| grep -Ev "plugin_main|pluginmanager") -coverprofile .testCoverage.txt mv ./plugins/input/prometheus/input_prometheus.go.bak ./plugins/input/prometheus/input_prometheus.go @@ -205,9 +205,9 @@ unittest_core: .PHONY: unittest_pluginmanager unittest_pluginmanager: clean import_plugins - cp pkg/logtail/libPluginAdapter.so ./plugin_main - cp pkg/logtail/PluginAdapter.dll ./plugin_main - cp pkg/logtail/libPluginAdapter.so ./pluginmanager + cp pkg/logtail/libGoPluginAdapter.so ./plugin_main + cp pkg/logtail/GoPluginAdapter.dll ./plugin_main + cp pkg/logtail/libGoPluginAdapter.so ./pluginmanager mv ./plugins/input/prometheus/input_prometheus.go ./plugins/input/prometheus/input_prometheus.go.bak go test $$(go list ./...|grep -Ev "telegraf|external|envconfig"| grep -E "plugin_main|pluginmanager") -coverprofile .coretestCoverage.txt mv ./plugins/input/prometheus/input_prometheus.go.bak ./plugins/input/prometheus/input_prometheus.go @@ -223,7 +223,7 @@ dist: all ./scripts/dist.sh "$(OUT_DIR)" "$(DIST_DIR)" "$(PACKAGE_DIR)" $(DIST_FILE): - @echo 'ilogtail-$(VERSION) dist does not exist! Please download or run `make dist` first!' + @echo 'loongcollector-$(VERSION) dist does not exist! Please download or run `make dist` first!' @false .PHONY: docker diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index 39073d2271..8613450034 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -13,7 +13,7 @@ # limitations under the License. cmake_minimum_required(VERSION 3.22) -project(logtail) +project(loongcollector) include(CMakeDependentOption) @@ -113,7 +113,7 @@ include(${CMAKE_CURRENT_SOURCE_DIR}/plugin/flusher/links.cmake) # Subdirectories (modules). except for common, input, processor, flusher, observer, helper, spl, and provider. set(SUB_DIRECTORIES_LIST - application app_config checkpoint container_manager logger go_pipeline monitor profile_sender models + application app_config checkpoint container_manager logger go_pipeline monitor monitor/metric_constants profile_sender models config config/watcher pipeline pipeline/batch pipeline/limiter pipeline/plugin pipeline/plugin/creator pipeline/plugin/instance pipeline/plugin/interface pipeline/queue pipeline/route pipeline/serializer runner runner/sink/http @@ -125,7 +125,7 @@ set(SUB_DIRECTORIES_LIST ) if (LINUX) if (ENABLE_ENTERPRISE) - set(SUB_DIRECTORIES_LIST ${SUB_DIRECTORIES_LIST} shennong shennong/sdk streamlog aggregator) + set(SUB_DIRECTORIES_LIST ${SUB_DIRECTORIES_LIST} shennong shennong/sdk) endif() elseif(MSVC) if (ENABLE_ENTERPRISE) diff --git a/core/app_config/AppConfig.cpp b/core/app_config/AppConfig.cpp index e95ef285c0..e0390e5b29 100644 --- a/core/app_config/AppConfig.cpp +++ b/core/app_config/AppConfig.cpp @@ -15,18 +15,21 @@ #include "AppConfig.h" #include +#include +#include +#include "RuntimeUtil.h" #include "common/EnvUtil.h" #include "common/FileSystemUtil.h" #include "common/JsonUtil.h" #include "common/LogtailCommonFlags.h" #include "common/RuntimeUtil.h" #include "file_server/ConfigManager.h" +#include "file_server/reader/LogFileReader.h" #include "logger/Logger.h" #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" #include "monitor/Monitor.h" -#include "file_server/reader/LogFileReader.h" #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" #endif @@ -98,8 +101,6 @@ DEFINE_FLAG_INT32(data_server_port, "", 80); // DEFINE_FLAG_STRING(alipay_zone, "", "ALIPAY_ZONE"); // DEFINE_FLAG_STRING(alipay_zone_env_name, "", ""); -DECLARE_FLAG_STRING(check_point_filename); - DECLARE_FLAG_INT32(polling_max_stat_count); DECLARE_FLAG_INT32(polling_max_stat_count_per_dir); DECLARE_FLAG_INT32(polling_max_stat_count_per_config); @@ -157,7 +158,308 @@ DEFINE_FLAG_STRING(loong_collector_operator_service, "loong collector operator s DEFINE_FLAG_INT32(loong_collector_operator_service_port, "loong collector operator service port", 8888); DEFINE_FLAG_STRING(_pod_name_, "agent pod name", ""); +DEFINE_FLAG_STRING(app_info_file, "", "app_info.json"); +DEFINE_FLAG_STRING(crash_stack_file_name, "crash stack back trace file name", "backtrace.dat"); +DEFINE_FLAG_STRING(local_event_data_file_name, "local event data file name", "local_event.json"); +DEFINE_FLAG_STRING(inotify_watcher_dirs_dump_filename, "", "inotify_watcher_dirs"); +DEFINE_FLAG_STRING(logtail_snapshot_dir, "snapshot dir on local disk", "snapshot"); +DEFINE_FLAG_STRING(logtail_profile_snapshot, "reader profile on local disk", "logtail_profile_snapshot"); +DEFINE_FLAG_STRING(ilogtail_config_env_name, "config file path", "ALIYUN_LOGTAIL_CONFIG"); + +#if defined(__linux__) +DEFINE_FLAG_STRING(adhoc_check_point_file_dir, "", "/tmp/logtail_adhoc_checkpoint"); +#elif defined(_MSC_VER) +DEFINE_FLAG_STRING(adhoc_check_point_file_dir, "", "C:\\LogtailData\\logtail_adhoc_checkpoint"); +#endif + +#if defined(__linux__) +DEFINE_FLAG_STRING(check_point_filename, "", "/tmp/logtail_check_point"); +#elif defined(_MSC_VER) +DEFINE_FLAG_STRING(check_point_filename, "", "C:\\LogtailData\\logtail_check_point"); +#endif + namespace logtail { + +void CreateAgentDir () { +#if defined(__RUN_LOGTAIL__) + return; +#endif + std::string processExecutionDir = GetProcessExecutionDir(); + Json::Value emptyJson; +#define PROCESSDIRFLAG(flag_name) \ + try { \ + const char* value = getenv(#flag_name); \ + if (value != NULL) { \ + STRING_FLAG(flag_name) = StringTo(value); \ + } \ + } catch (const exception& e) { \ + std::cout << "load config from env error, env_name:" << #flag_name << "\terror:" << e.what() << std::endl; \ + } \ + if (STRING_FLAG(flag_name).empty()) { \ + STRING_FLAG(flag_name) = processExecutionDir + PATH_SEPARATOR; \ + } else { \ + STRING_FLAG(flag_name) = AbsolutePath(STRING_FLAG(flag_name), processExecutionDir); \ + } \ + if (!CheckExistance(STRING_FLAG(flag_name))) { \ + if (Mkdirs(STRING_FLAG(flag_name))) { \ + std::cout << STRING_FLAG(flag_name) + " dir is not existing, create done" << std::endl; \ + } else { \ + std::cout << STRING_FLAG(flag_name) + " dir is not existing, create failed" << std::endl; \ + exit(0); \ + } \ + } + + PROCESSDIRFLAG(loongcollector_conf_dir); + PROCESSDIRFLAG(loongcollector_log_dir); + PROCESSDIRFLAG(loongcollector_data_dir); + PROCESSDIRFLAG(loongcollector_run_dir); + PROCESSDIRFLAG(loongcollector_third_party_dir); +} + +std::string GetAgentThirdPartyDir() { + static std::string dir; + if (!dir.empty()) { + return dir; + } +#if defined(__RUN_LOGTAIL__) + dir = AppConfig::GetInstance()->GetLoongcollectorConfDir(); +#else + dir = STRING_FLAG(loongcollector_third_party_dir) + PATH_SEPARATOR; +#endif + return dir; +} + +std::string GetAgentLogDir() { + static std::string dir; + if (!dir.empty()) { + return dir; + } +#if defined(__RUN_LOGTAIL__) || defined(APSARA_UNIT_TEST_MAIN) + dir = GetProcessExecutionDir(); +#else + dir = STRING_FLAG(loongcollector_log_dir) + PATH_SEPARATOR; +#endif + return dir; +} + +std::string GetAgentDataDir() { + static std::string dir; + if (!dir.empty()) { + return dir; + } +#if defined(__RUN_LOGTAIL__) || defined(APSARA_UNIT_TEST_MAIN) + dir = GetProcessExecutionDir(); +#else + dir = STRING_FLAG(loongcollector_data_dir) + PATH_SEPARATOR; +#endif + return dir; +} + +std::string GetAgentConfDir() { + static std::string dir; + if (!dir.empty()) { + return dir; + } +#if defined(__RUN_LOGTAIL__) || defined(APSARA_UNIT_TEST_MAIN) + dir = GetProcessExecutionDir(); +#else + dir = STRING_FLAG(loongcollector_conf_dir) + PATH_SEPARATOR; +#endif + return dir; +} + +std::string GetAgentRunDir() { + static std::string dir; + if (!dir.empty()) { + return dir; + } +#if defined(__RUN_LOGTAIL__) || defined(APSARA_UNIT_TEST_MAIN) + dir = GetProcessExecutionDir(); +#else + dir = STRING_FLAG(loongcollector_run_dir) + PATH_SEPARATOR; +#endif + return dir; +} + +std::string GetAgentDockerPathConfig() { + static std::string file_path; + if (!file_path.empty()) { + return file_path; + } +#if defined(__RUN_LOGTAIL__) + file_path = GetAgentDataDir() + STRING_FLAG(ilogtail_docker_file_path_config); +#else + file_path = GetAgentDataDir() + "docker_path_config.json"; +#endif + return file_path; +} + +std::string GetAgentConfDir(const ParseConfResult& res, const Json::Value& confJson) { + std::string newConfDir; +#if defined(__RUN_LOGTAIL__) + if (res == CONFIG_OK) { + // Should be loaded here because other parameters depend on it. + LoadStringParameter(newConfDir, confJson, "logtail_sys_conf_dir", "ALIYUN_LOGTAIL_SYS_CONF_DIR"); + } + if (newConfDir.empty()) { + newConfDir = STRING_FLAG(logtail_sys_conf_dir); + } +#else + newConfDir = GetAgentConfDir(); +#endif + return newConfDir; +} + +std::string GetAgentConfigFile() { +#if defined(__RUN_LOGTAIL__) + // load ilogtail_config.json + char* configEnv = getenv(STRING_FLAG(ilogtail_config_env_name).c_str()); + if (configEnv == NULL || strlen(configEnv) == 0) { + return STRING_FLAG(ilogtail_config); + } else { + return configEnv; + } +#else + return LOONGCOLLECTOR_CONFIG; +#endif +} + +std::string GetAgentAppInfoFile() { + static std::string file; + if (!file.empty()) { + return file; + } +#if defined(__RUN_LOGTAIL__) + file = GetAgentRunDir() + STRING_FLAG(app_info_file); +#else + file = GetAgentRunDir() + "app_info.json"; +#endif + return file; +} + +string GetAdhocCheckpointDirPath() { +#if defined(__RUN_LOGTAIL__) + return STRING_FLAG(adhoc_check_point_file_dir); +#else + return GetAgentDataDir() + "adhoc_checkpoint"; +#endif +} + +string GetCheckPointFileName() { +#if defined(__RUN_LOGTAIL__) + return STRING_FLAG(check_point_filename); +#else + return GetAgentDataDir() + "file_check_point"; +#endif +} + +string GetCrashStackFileName() { +#if defined(__RUN_LOGTAIL__) + return GetProcessExecutionDir() + STRING_FLAG(crash_stack_file_name); +#else + return GetAgentDataDir() + "backtrace.dat"; +#endif +} + +string GetLocalEventDataFileName() { +#if defined(__RUN_LOGTAIL__) + return STRING_FLAG(local_event_data_file_name); +#else + return AppConfig::GetInstance()->GetLoongcollectorConfDir() + "local_event.json"; +#endif +} + +string GetInotifyWatcherDirsDumpFileName() { +#if defined(__RUN_LOGTAIL__) + return GetProcessExecutionDir() + STRING_FLAG(inotify_watcher_dirs_dump_filename); +#else + return GetAgentRunDir() + "inotify_watcher_dirs"; +#endif +} + +string GetAgentLoggersPrefix() { +#if defined(__RUN_LOGTAIL__) + return "/apsara/sls/ilogtail"; +#else + return "/apsara/loongcollector"; +#endif +} + +string GetAgentLogName() { +#if defined(__RUN_LOGTAIL__) + return "ilogtail.LOG"; +#else + return "loongcollector.LOG"; +#endif +} + +string GetAgentSnapshotDir() { +#if defined(__RUN_LOGTAIL__) + return GetProcessExecutionDir() + STRING_FLAG(logtail_snapshot_dir); +#else + return GetAgentLogDir() + "snapshot"; +#endif +} + +string GetAgentProfileLogName() { +#if defined(__RUN_LOGTAIL__) + return "ilogtail_profile.LOG"; +#else + return "loongcollector_profile.LOG"; +#endif +} + +string GetAgentStatusLogName() { +#if defined(__RUN_LOGTAIL__) + return "ilogtail_status.LOG"; +#else + return "loongcollector_status.LOG"; +#endif +} + +string GetProfileSnapshotDumpFileName() { +#if defined(__RUN_LOGTAIL__) + return GetProcessExecutionDir() + STRING_FLAG(logtail_profile_snapshot); +#else + return GetAgentLogDir() + "loongcollector_profile_snapshot"; +#endif +} + + +string GetObserverEbpfHostPath() { +#if defined(__RUN_LOGTAIL__) + return STRING_FLAG(sls_observer_ebpf_host_path); +#else + return GetAgentDataDir(); +#endif +} + +string GetSendBufferFileNamePrefix(){ +#if defined(__RUN_LOGTAIL__) + return "logtail_buffer_file_"; +#else + return "send_buffer_file_"; +#endif +} + +string GetLegacyUserLocalConfigFilePath() { +#if defined(__RUN_LOGTAIL__) + return AppConfig::GetInstance()->GetProcessExecutionDir(); +#else + return AppConfig::GetInstance()->GetLoongcollectorConfDir(); +#endif +} + +string GetExactlyOnceCheckpoint() { +#if defined(__RUN_LOGTAIL__) + auto fp = boost::filesystem::path(AppConfig::GetInstance()->GetLoongcollectorConfDir()); + return (fp / "checkpoint_v2").string(); +#else + auto fp = boost::filesystem::path(GetAgentDataDir()); + return (fp / "exactly_once_checkpoint").string(); +#endif +} + AppConfig::AppConfig() { LOG_INFO(sLogger, ("AppConfig AppConfig", "success")); mSendRandomSleep = BOOL_FLAG(enable_send_tps_smoothing); @@ -203,7 +505,7 @@ void AppConfig::LoadIncludeConfig(Json::Value& confJson) { // to config.d, be compatible with old default value. string dirPath = STRING_FLAG(default_include_config_path); if (!dirPath.empty() && dirPath[0] != '/') { - dirPath = mLogtailSysConfDir + dirPath + PATH_SEPARATOR; + dirPath = mLoongcollectorConfDir + dirPath + PATH_SEPARATOR; } if (confJson.isMember("include_config_path") && confJson["include_config_path"].isString()) { dirPath = confJson["include_config_path"].asString(); @@ -248,14 +550,14 @@ void AppConfig::LoadIncludeConfig(Json::Value& confJson) { } void AppConfig::LoadAppConfig(const std::string& ilogtailConfigFile) { - std::string processExecutionDir = GetProcessExecutionDir(); - mDockerFilePathConfig = processExecutionDir + STRING_FLAG(ilogtail_docker_file_path_config); + mDockerFilePathConfig = GetAgentDockerPathConfig(); Json::Value confJson(Json::objectValue); - std::string newSysConfDir; + std::string newConfDir; + ParseConfResult res = CONFIG_NOT_EXIST; if (!ilogtailConfigFile.empty()) { - ParseConfResult res = ParseConfig(ilogtailConfigFile, confJson); + res = ParseConfig(ilogtailConfigFile, confJson); #ifdef __ENTERPRISE__ if (res == CONFIG_NOT_EXIST) { @@ -270,8 +572,6 @@ void AppConfig::LoadAppConfig(const std::string& ilogtailConfigFile) { #endif if (res == CONFIG_OK) { - // Should be loaded here because other parameters depend on it. - LoadStringParameter(newSysConfDir, confJson, "logtail_sys_conf_dir", "ALIYUN_LOGTAIL_SYS_CONF_DIR"); } else { confJson.clear(); if (res == CONFIG_NOT_EXIST) { @@ -284,10 +584,8 @@ void AppConfig::LoadAppConfig(const std::string& ilogtailConfigFile) { } } - if (newSysConfDir.empty()) { - newSysConfDir = STRING_FLAG(logtail_sys_conf_dir); - } - SetLogtailSysConfDir(AbsolutePath(newSysConfDir, mProcessExecutionDir)); + newConfDir = GetAgentConfDir(res, confJson); + SetLoongcollectorConfDir(AbsolutePath(newConfDir, mProcessExecutionDir)); LoadIncludeConfig(confJson); string configJsonString = confJson.toStyledString(); @@ -557,7 +855,7 @@ void AppConfig::LoadResourceConf(const Json::Value& confJson) { else if (confJson.isMember("check_point_file_path") && confJson["check_point_file_path"].isString()) mCheckPointFilePath = confJson["check_point_file_path"].asString(); else - mCheckPointFilePath = STRING_FLAG(check_point_filename); + mCheckPointFilePath = GetCheckPointFileName(); LoadStringParameter(mCheckPointFilePath, confJson, NULL, // Only load from env. @@ -851,15 +1149,6 @@ bool AppConfig::CheckAndResetProxyAddress(const char* envKey, string& address) { } void AppConfig::LoadOtherConf(const Json::Value& confJson) { - // if (confJson.isMember("mapping_conf_path") && confJson["mapping_conf_path"].isString()) - // mMappingConfigPath = confJson["mapping_conf_path"].asString(); - // else - // mMappingConfigPath = STRING_FLAG(default_mapping_config_path); - - if (confJson.isMember("streamlog_open") && confJson["streamlog_open"].isBool()) { - mOpenStreamLog = confJson["streamlog_open"].asBool(); - } - { int32_t oasConnectTimeout = 0; if (LoadInt32Parameter( @@ -965,6 +1254,11 @@ void AppConfig::InitEnvMapping(const std::string& envStr, std::map sIgnoreFlagSet + = {"loongcollector_conf_dir", "loongcollector_log_dir", "loongcollector_data_dir", "loongcollector_run_dir"}; + if (sIgnoreFlagSet.find(flagName) != sIgnoreFlagSet.end()) { + return; + } GFLAGS_NAMESPACE::CommandLineFlagInfo info; bool rst = GetCommandLineFlagInfo(flagName.c_str(), &info); if (rst) { @@ -1173,14 +1467,14 @@ bool AppConfig::IsInInotifyBlackList(const std::string& path) const { // TODO: Use Boost instead. // boost::filesystem::directory_iterator end; // try { boost::filesystem::directory_iterator(path); } catch (...) { // failed } // OK -void AppConfig::SetLogtailSysConfDir(const std::string& dirPath) { - mLogtailSysConfDir = dirPath; +void AppConfig::SetLoongcollectorConfDir(const std::string& dirPath) { + mLoongcollectorConfDir = dirPath; if (dirPath.back() != '/' || dirPath.back() != '\\') { - mLogtailSysConfDir += PATH_SEPARATOR; + mLoongcollectorConfDir += PATH_SEPARATOR; } - if (!CheckExistance(mLogtailSysConfDir)) { - if (Mkdir(mLogtailSysConfDir)) { + if (!CheckExistance(mLoongcollectorConfDir)) { + if (Mkdir(mLoongcollectorConfDir)) { LOG_INFO(sLogger, ("sys conf dir is not existing, create", "done")); } else { LOG_WARNING(sLogger, ("sys conf dir is not existing, create", "failed")); @@ -1193,15 +1487,15 @@ void AppConfig::SetLogtailSysConfDir(const std::string& dirPath) { int savedErrno = errno; LOG_WARNING(sLogger, ("open sys conf dir error", dirPath)("error", strerror(errno))); if (savedErrno == EACCES || savedErrno == ENOTDIR || savedErrno == ENOENT) { - mLogtailSysConfDir = GetProcessExecutionDir(); + mLoongcollectorConfDir = GetAgentConfDir(); } } else { closedir(dir); } #elif defined(_MSC_VER) - DWORD ret = GetFileAttributes(mLogtailSysConfDir.c_str()); + DWORD ret = GetFileAttributes(mLoongcollectorConfDir.c_str()); if (INVALID_FILE_ATTRIBUTES == ret) { - mLogtailSysConfDir = GetProcessExecutionDir(); + mLoongcollectorConfDir = GetAgentConfDir(); } #endif @@ -1220,7 +1514,7 @@ void AppConfig::SetLogtailSysConfDir(const std::string& dirPath) { // = AbsolutePath(STRING_FLAG(ilogtail_local_yaml_config_dir), mLogtailSysConfDir) + PATH_SEPARATOR; // mUserRemoteYamlConfigDirPath // = AbsolutePath(STRING_FLAG(ilogtail_remote_yaml_config_dir), mLogtailSysConfDir) + PATH_SEPARATOR; - LOG_INFO(sLogger, ("set logtail sys conf dir", mLogtailSysConfDir)); + LOG_INFO(sLogger, ("set loongcollector conf dir", mLoongcollectorConfDir)); } bool AppConfig::IsHostPathMatchBlacklist(const string& dirPath) const { diff --git a/core/app_config/AppConfig.h b/core/app_config/AppConfig.h index 69a129c068..b827d5a6a4 100644 --- a/core/app_config/AppConfig.h +++ b/core/app_config/AppConfig.h @@ -27,6 +27,31 @@ #include "protobuf/sls/sls_logs.pb.h" namespace logtail { +void CreateAgentDir(); + +std::string GetAgentLogDir(); +std::string GetAgentDataDir(); +std::string GetAgentConfDir(); +std::string GetAgentRunDir(); +std::string GetAgentThirdPartyDir(); + +std::string GetAgentConfigFile(); +std::string GetAgentAppInfoFile(); +std::string GetAdhocCheckpointDirPath(); +std::string GetCheckPointFileName(); +std::string GetCrashStackFileName(); +std::string GetLocalEventDataFileName(); +std::string GetInotifyWatcherDirsDumpFileName(); +std::string GetAgentLoggersPrefix(); +std::string GetAgentLogName(); +std::string GetAgentSnapshotDir(); +std::string GetAgentProfileLogName(); +std::string GetAgentStatusLogName(); +std::string GetProfileSnapshotDumpFileName(); +std::string GetObserverEbpfHostPath(); +std::string GetSendBufferFileNamePrefix(); +std::string GetLegacyUserLocalConfigFilePath(); +std::string GetExactlyOnceCheckpoint(); template class DoubleBuffer { @@ -49,7 +74,7 @@ class AppConfig { Json::Value mConfJson; mutable SpinLock mAppConfigLock; - // ilogtail_config.json content for rebuild + // loongcollector_config.json content for rebuild std::string mIlogtailConfigJson; // syslog @@ -57,7 +82,7 @@ class AppConfig { // uint32_t mStreamLogTcpPort; // uint32_t mStreamLogPoolSizeInMb; // uint32_t mStreamLogRcvLenPerCall; - bool mOpenStreamLog; + // bool mOpenStreamLog; // performance float mCpuUsageUpLimit; @@ -131,7 +156,7 @@ class AppConfig { // logtail will force quit, 7200s by default. int32_t mForceQuitReadTimeout; - std::string mLogtailSysConfDir; // MUST ends with path separator + std::string mLoongcollectorConfDir; // MUST ends with path separator // For such security case: logtail -> proxy server + firewall (domain rule). // By default, logtail will construct HTTP request URL by concating host IP with @@ -240,7 +265,7 @@ class AppConfig { void LoadEnvTags(); // LoadEnvResourceLimit loads resource limit from env config. - // Read values will replace corresponding configs in ilogtail_config.json. + // Read values will replace corresponding configs in loongcollector_config.json. void LoadEnvResourceLimit(); // logtail is in purage container mode when STRING_FLAG(ilogtail_user_defined_id_env_name) exist and /logtail_host @@ -285,7 +310,7 @@ class AppConfig { // uint32_t GetStreamLogRcvLenPerCall() const { return mStreamLogRcvLenPerCall; } - bool GetOpenStreamLog() const { return mOpenStreamLog; } + // bool GetOpenStreamLog() const { return mOpenStreamLog; } std::string GetIlogtailConfigJson() { ScopedSpinLock lock(mAppConfigLock); @@ -379,9 +404,9 @@ class AppConfig { // const std::string& GetAlipayZone() const { return mAlipayZone; } // If @dirPath is not accessible, GetProcessExecutionDir will be set. - void SetLogtailSysConfDir(const std::string& dirPath); + void SetLoongcollectorConfDir(const std::string& dirPath); - const std::string& GetLogtailSysConfDir() const { return mLogtailSysConfDir; } + const std::string& GetLoongcollectorConfDir() const { return mLoongcollectorConfDir; } inline bool IsHostIPReplacePolicyEnabled() const { return mEnableHostIPReplace; } diff --git a/core/application/Application.cpp b/core/application/Application.cpp index 3ed86d1e65..a8f081504c 100644 --- a/core/application/Application.cpp +++ b/core/application/Application.cpp @@ -32,13 +32,11 @@ #include "common/version.h" #include "config/ConfigDiff.h" #include "config/watcher/ConfigWatcher.h" -#include "file_server/EventDispatcher.h" -#include "file_server/event_handler/LogInput.h" #include "file_server/ConfigManager.h" +#include "file_server/EventDispatcher.h" #include "file_server/FileServer.h" -#include "plugin/flusher/sls/DiskBufferWriter.h" +#include "file_server/event_handler/LogInput.h" #include "go_pipeline/LogtailPlugin.h" -#include "plugin/input/InputFeedbackInterfaceRegistry.h" #include "logger/Logger.h" #include "monitor/LogFileProfiler.h" #include "monitor/MetricExportor.h" @@ -46,10 +44,12 @@ #include "pipeline/InstanceConfigManager.h" #include "pipeline/PipelineManager.h" #include "pipeline/plugin/PluginRegistry.h" -#include "runner/LogProcess.h" #include "pipeline/queue/ExactlyOnceQueueManager.h" #include "pipeline/queue/SenderQueueManager.h" +#include "plugin/flusher/sls/DiskBufferWriter.h" +#include "plugin/input/InputFeedbackInterfaceRegistry.h" #include "runner/FlusherRunner.h" +#include "runner/ProcessorRunner.h" #include "runner/sink/http/HttpSink.h" #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" @@ -57,15 +57,12 @@ #if defined(__linux__) && !defined(__ANDROID__) #include "common/LinuxDaemonUtil.h" #include "shennong/ShennongManager.h" -#include "streamlog/StreamLogManager.h" #endif #else #include "provider/Provider.h" #endif DEFINE_FLAG_BOOL(ilogtail_disable_core, "disable core in worker process", true); -DEFINE_FLAG_STRING(ilogtail_config_env_name, "config file path", "ALIYUN_LOGTAIL_CONFIG"); -DEFINE_FLAG_STRING(app_info_file, "", "app_info.json"); DEFINE_FLAG_INT32(file_tags_update_interval, "second", 1); DEFINE_FLAG_INT32(config_scan_interval, "seconds", 10); DEFINE_FLAG_INT32(profiling_check_interval, "seconds", 60); @@ -121,13 +118,7 @@ void Application::Init() { AppConfig::GetInstance()->SetWorkingDir(GetProcessExecutionDir()); } - // load ilogtail_config.json - char* configEnv = getenv(STRING_FLAG(ilogtail_config_env_name).c_str()); - if (configEnv == NULL || strlen(configEnv) == 0) { - AppConfig::GetInstance()->LoadAppConfig(STRING_FLAG(ilogtail_config)); - } else { - AppConfig::GetInstance()->LoadAppConfig(configEnv); - } + AppConfig::GetInstance()->LoadAppConfig(GetAgentConfigFile()); // Initialize basic information: IP, hostname, etc. LogFileProfiler::GetInstance(); @@ -177,9 +168,9 @@ void Application::Init() { appInfoJson["UUID"] = Json::Value(Application::GetInstance()->GetUUID()); appInfoJson["instance_id"] = Json::Value(Application::GetInstance()->GetInstanceId()); #ifdef __ENTERPRISE__ - appInfoJson["logtail_version"] = Json::Value(ILOGTAIL_VERSION); + appInfoJson["loongcollector_version"] = Json::Value(ILOGTAIL_VERSION); #else - appInfoJson["logtail_version"] = Json::Value(string(ILOGTAIL_VERSION) + " Community Edition"); + appInfoJson["loongcollector_version"] = Json::Value(string(ILOGTAIL_VERSION) + " Community Edition"); appInfoJson["git_hash"] = Json::Value(ILOGTAIL_GIT_HASH); appInfoJson["build_date"] = Json::Value(ILOGTAIL_BUILD_DATE); #endif @@ -195,7 +186,7 @@ void Application::Init() { appInfoJson["os"] = Json::Value(LogFileProfiler::mOsDetail); appInfoJson["update_time"] = GetTimeStamp(time(NULL), "%Y-%m-%d %H:%M:%S"); string appInfo = appInfoJson.toStyledString(); - OverwriteFile(GetProcessExecutionDir() + STRING_FLAG(app_info_file), appInfo); + OverwriteFile(GetAgentAppInfoFile(), appInfo); LOG_INFO(sLogger, ("app info", appInfo)); } @@ -213,12 +204,12 @@ void Application::Start() { // GCOVR_EXCL_START { // add local config dir filesystem::path localConfigPath - = filesystem::path(AppConfig::GetInstance()->GetLogtailSysConfDir()) / "config" / "local"; + = filesystem::path(AppConfig::GetInstance()->GetLoongcollectorConfDir()) / "pipeline_config" / "local"; error_code ec; filesystem::create_directories(localConfigPath, ec); if (ec) { LOG_WARNING(sLogger, - ("failed to create dir for local pipelineconfig", + ("failed to create dir for local pipeline_config", "manual creation may be required")("error code", ec.value())("error msg", ec.message())); } ConfigWatcher::GetInstance()->AddPipelineSource(localConfigPath.string()); @@ -226,12 +217,12 @@ void Application::Start() { // GCOVR_EXCL_START { // add local config dir filesystem::path localConfigPath - = filesystem::path(AppConfig::GetInstance()->GetLogtailSysConfDir()) / "instanceconfig" / "local"; + = filesystem::path(AppConfig::GetInstance()->GetLoongcollectorConfDir()) / "instance_config" / "local"; error_code ec; filesystem::create_directories(localConfigPath, ec); if (ec) { LOG_WARNING(sLogger, - ("failed to create dir for local instanceconfig", + ("failed to create dir for local instance_config", "manual creation may be required")("error code", ec.value())("error msg", ec.message())); } ConfigWatcher::GetInstance()->AddInstanceSource(localConfigPath.string()); @@ -270,7 +261,14 @@ void Application::Start() { // GCOVR_EXCL_START LogtailPlugin::GetInstance()->LoadPluginBase(); } - LogProcess::GetInstance()->Start(); + const char* deployMode = getenv("DEPLOY_MODE"); + const char* enableK8sMeta = getenv("ENABLE_KUBERNETES_META"); + if (deployMode != NULL && strlen(deployMode) > 0 && strcmp(deployMode, "singleton") == 0 + && strcmp(enableK8sMeta, "true") == 0) { + LogtailPlugin::GetInstance()->LoadPluginBase(); + } + + ProcessorRunner::GetInstance()->Init(); time_t curTime = 0, lastProfilingCheckTime = 0, lastConfigCheckTime = 0, lastUpdateMetricTime = 0, lastCheckTagsTime = 0, lastQueueGCTime = 0; @@ -377,6 +375,7 @@ void Application::Exit() { LogtailMonitor::GetInstance()->Stop(); LoongCollectorMonitor::GetInstance()->Stop(); LogtailAlarm::GetInstance()->Stop(); + LogtailPlugin::GetInstance()->StopBuiltInModules(); // from now on, alarm should not be used. FlusherRunner::GetInstance()->Stop(); diff --git a/core/checkpoint/AdhocCheckpointManager.cpp b/core/checkpoint/AdhocCheckpointManager.cpp index 7b31530b39..9b64c0f03d 100644 --- a/core/checkpoint/AdhocCheckpointManager.cpp +++ b/core/checkpoint/AdhocCheckpointManager.cpp @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "app_config/AppConfig.h" #include "AdhocCheckpointManager.h" #include "common/FileSystemUtil.h" #include "common/Flags.h" @@ -24,14 +25,6 @@ DEFINE_FLAG_INT32(adhoc_checkpoint_dump_thread_wait_interval, "microseconds", 5 * 1000); -// TODO: Change to AppConfig::GetInstance()->GetLogtailSysConfDir() -// /etc/ilogtail + /checkpoint/logtail_adhoc_checkpoint/${jobName} -#if defined(__linux__) -DEFINE_FLAG_STRING(adhoc_check_point_file_dir, "", "/tmp/logtail_adhoc_checkpoint"); -#elif defined(_MSC_VER) -DEFINE_FLAG_STRING(adhoc_check_point_file_dir, "", "C:\\LogtailData\\logtail_adhoc_checkpoint"); -#endif - namespace logtail { AdhocJobCheckpointPtr AdhocCheckpointManager::GetAdhocJobCheckpoint(const std::string& jobName) { @@ -182,8 +175,4 @@ std::string AdhocCheckpointManager::GetJobCheckpointPath(const std::string& jobN return path; } -std::string AdhocCheckpointManager::GetAdhocCheckpointDirPath() { - return STRING_FLAG(adhoc_check_point_file_dir); -} - } // namespace logtail \ No newline at end of file diff --git a/core/checkpoint/AdhocCheckpointManager.h b/core/checkpoint/AdhocCheckpointManager.h index 617aa47ece..22443b9cc0 100644 --- a/core/checkpoint/AdhocCheckpointManager.h +++ b/core/checkpoint/AdhocCheckpointManager.h @@ -47,7 +47,6 @@ class AdhocCheckpointManager { UpdateAdhocFileCheckpoint(const std::string& jobName, AdhocFileKey* fileKey, AdhocFileCheckpointPtr fileCheckpoint); void DeleteAdhocJobCheckpoint(const std::string& jobName); - std::string GetAdhocCheckpointDirPath(); std::string GetJobCheckpointPath(const std::string& jobName); }; diff --git a/core/checkpoint/CheckPointManager.cpp b/core/checkpoint/CheckPointManager.cpp index fdec823c3b..6d69990782 100644 --- a/core/checkpoint/CheckPointManager.cpp +++ b/core/checkpoint/CheckPointManager.cpp @@ -31,11 +31,8 @@ #include "monitor/LogtailAlarm.h" using namespace std; -#if defined(__linux__) -DEFINE_FLAG_STRING(check_point_filename, "", "/tmp/logtail_check_point"); -#elif defined(_MSC_VER) -DEFINE_FLAG_STRING(check_point_filename, "", "C:\\LogtailData\\logtail_check_point"); -#endif +DECLARE_FLAG_STRING(check_point_filename); + DEFINE_FLAG_INT32(file_check_point_time_out, "seconds", 300); DEFINE_FLAG_INT32(mem_check_point_time_out, "seconds", 7200); DEFINE_FLAG_INT32(check_point_check_interval, "default 15 min", 14 * 60); @@ -127,8 +124,8 @@ void CheckPointManager::LoadCheckPoint() { ParseConfResult cptRes = ParseConfig(AppConfig::GetInstance()->GetCheckPointFilePath(), root); // if new checkpoint file not exist, check old checkpoint file. if (cptRes == CONFIG_NOT_EXIST - && AppConfig::GetInstance()->GetCheckPointFilePath() != STRING_FLAG(check_point_filename)) { - cptRes = ParseConfig(STRING_FLAG(check_point_filename), root); + && AppConfig::GetInstance()->GetCheckPointFilePath() != GetCheckPointFileName()) { + cptRes = ParseConfig(GetCheckPointFileName(), root); } if (cptRes != CONFIG_OK) { if (cptRes == CONFIG_NOT_EXIST) diff --git a/core/checkpoint/CheckpointManagerV2.cpp b/core/checkpoint/CheckpointManagerV2.cpp index a05702cb86..76f58f4c76 100644 --- a/core/checkpoint/CheckpointManagerV2.cpp +++ b/core/checkpoint/CheckpointManagerV2.cpp @@ -36,8 +36,7 @@ namespace logtail { namespace detail { std::string getDatabasePath() { - auto fp = boost::filesystem::path(AppConfig::GetInstance()->GetLogtailSysConfDir()); - return (fp / "checkpoint_v2").string(); + return GetExactlyOnceCheckpoint(); } // Log error locally and send alarm. diff --git a/core/common/Constants.cpp b/core/common/Constants.cpp index ca3b599e75..4113c30c33 100644 --- a/core/common/Constants.cpp +++ b/core/common/Constants.cpp @@ -65,4 +65,6 @@ const std::string EXPIRE_DAY = "expire_day"; const std::string DEFAULT_CONTENT_KEY = "content"; const std::string DEFAULT_REG = "(.*)"; +const std::string LOONGCOLLECTOR_CONFIG = "loongcollector_config.json"; + } // namespace logtail \ No newline at end of file diff --git a/core/common/Constants.h b/core/common/Constants.h index 7010a7d61a..f94b0875a7 100644 --- a/core/common/Constants.h +++ b/core/common/Constants.h @@ -66,4 +66,6 @@ extern const std::string EXPIRE_DAY; extern const std::string DEFAULT_CONTENT_KEY; //"content" extern const std::string DEFAULT_REG; //"(.*)" +extern const std::string LOONGCOLLECTOR_CONFIG; + } // namespace logtail \ No newline at end of file diff --git a/core/common/CrashBackTraceUtil.cpp b/core/common/CrashBackTraceUtil.cpp index 22abf99151..24fbd0882b 100644 --- a/core/common/CrashBackTraceUtil.cpp +++ b/core/common/CrashBackTraceUtil.cpp @@ -13,8 +13,9 @@ // limitations under the License. #include "CrashBackTraceUtil.h" -#include + #include +#include #if defined(__ANDROID__) #elif defined(__linux__) #define UNW_LOCAL_ONLY @@ -24,11 +25,11 @@ #include #include #endif -#include "logger/Logger.h" -#include "RuntimeUtil.h" #include "Flags.h" +#include "RuntimeUtil.h" +#include "app_config/AppConfig.h" +#include "logger/Logger.h" -DEFINE_FLAG_STRING(crash_stack_file_name, "crash stack back trace file name", "backtrace.dat"); namespace logtail { @@ -101,7 +102,7 @@ bool MinidumpCallbackFunc(const wchar_t* dump_path, MDRawAssertionInfo* assertion, bool succeeded) { printf("MinidumpCallbackFunc is called\n"); - auto trgFilePath = GetProcessExecutionDir() + STRING_FLAG(crash_stack_file_name); + auto trgFilePath = GetCrashStackFileName(); if (0 == _access(trgFilePath.c_str(), 0)) { if (remove(trgFilePath.c_str()) != 0) { printf("Remove existing target file %s failed: %d", trgFilePath.c_str(), errno); @@ -126,7 +127,7 @@ bool MinidumpCallbackFunc(const wchar_t* dump_path, void InitCrashBackTrace() { #if defined(__ANDROID__) #elif defined(__linux__) - g_crashBackTraceFilePtr = fopen((GetProcessExecutionDir() + STRING_FLAG(crash_stack_file_name)).c_str(), "w"); + g_crashBackTraceFilePtr = fopen((GetCrashStackFileName()).c_str(), "w"); if (g_crashBackTraceFilePtr == NULL) { APSARA_LOG_ERROR(sLogger, ("unable to open stack back trace file", strerror(errno))); return; @@ -143,7 +144,7 @@ void InitCrashBackTrace() { } std::string GetCrashBackTrace() { - auto stackFilePath = GetProcessExecutionDir() + STRING_FLAG(crash_stack_file_name); + auto stackFilePath = GetCrashStackFileName(); FILE* pStackFile = fopen(stackFilePath.c_str(), "rb"); if (pStackFile == NULL) { return ""; diff --git a/core/common/FileSystemUtil.cpp b/core/common/FileSystemUtil.cpp index 5d59f13024..39ef57c067 100644 --- a/core/common/FileSystemUtil.cpp +++ b/core/common/FileSystemUtil.cpp @@ -348,17 +348,6 @@ bool IsValidSuffix(const std::string& filename) { return true; } -bool IsEmptyConfigJSONFile(const std::string& filePath) { - auto fullFilePath = IsRelativePath(filePath) ? GetProcessExecutionDir() + filePath : filePath; - std::ifstream in; - in.open(fullFilePath.c_str()); - if (!in) { - return false; - } - in.seekg(0, std::ios::end); - return 0 == in.tellg(); -} - namespace fsutil { Dir::Dir(const std::string& dirPath) : mDirPath(dirPath) { diff --git a/core/common/FileSystemUtil.h b/core/common/FileSystemUtil.h index c065b3818a..3ad1b046ed 100644 --- a/core/common/FileSystemUtil.h +++ b/core/common/FileSystemUtil.h @@ -113,9 +113,6 @@ FILE* FileAppendOpen(const char* filePath, const char* mode = "a"); // Logtail will ignore files with special suffix. bool IsValidSuffix(const std::string& filename); -// Return true if filePath is existing and empty, otherwise false. -bool IsEmptyConfigJSONFile(const std::string& filePath); - std::string GetFdPath(int fd); #ifdef _MSC_VER diff --git a/core/common/LogtailCommonFlags.cpp b/core/common/LogtailCommonFlags.cpp index c0aa75a941..b2cea76fda 100644 --- a/core/common/LogtailCommonFlags.cpp +++ b/core/common/LogtailCommonFlags.cpp @@ -81,7 +81,7 @@ // app config DEFINE_FLAG_STRING(ilogtail_config, "set dataserver & configserver address; (optional)set cpu,mem,bufflerfile,buffermap and etc.", - "ilogtail_config.json"); + "loongcollector_config.json"); DEFINE_FLAG_BOOL(enable_full_drain_mode, "", false); DEFINE_FLAG_INT32(cpu_limit_num, "cpu violate limit num before shutdown", 10); DEFINE_FLAG_INT32(mem_limit_num, "memory violate limit num before shutdown", 10); @@ -122,4 +122,11 @@ DEFINE_FLAG_INT32(timeout_interval, "the time interval that an inactive dir bein DEFINE_FLAG_STRING(default_container_host_path, "", "C:\\logtail_host"); #else DEFINE_FLAG_STRING(default_container_host_path, "", "/logtail_host"); -#endif \ No newline at end of file +#endif + +// dir +DEFINE_FLAG_STRING(loongcollector_conf_dir, "loongcollector config dir", "conf"); +DEFINE_FLAG_STRING(loongcollector_log_dir, "loongcollector log dir", "log"); +DEFINE_FLAG_STRING(loongcollector_data_dir, "loongcollector data dir", "data"); +DEFINE_FLAG_STRING(loongcollector_run_dir, "loongcollector run dir", "run"); +DEFINE_FLAG_STRING(loongcollector_third_party_dir, "loongcollector third party dir", "thirdparty"); \ No newline at end of file diff --git a/core/common/LogtailCommonFlags.h b/core/common/LogtailCommonFlags.h index 54bac8018b..2c980742a0 100644 --- a/core/common/LogtailCommonFlags.h +++ b/core/common/LogtailCommonFlags.h @@ -55,3 +55,9 @@ DECLARE_FLAG_BOOL(enable_root_path_collection); DECLARE_FLAG_INT32(timeout_interval); DECLARE_FLAG_STRING(default_container_host_path); + +DECLARE_FLAG_STRING(loongcollector_conf_dir); +DECLARE_FLAG_STRING(loongcollector_log_dir); +DECLARE_FLAG_STRING(loongcollector_data_dir); +DECLARE_FLAG_STRING(loongcollector_run_dir); +DECLARE_FLAG_STRING(loongcollector_third_party_dir); \ No newline at end of file diff --git a/core/common/RuntimeUtil.cpp b/core/common/RuntimeUtil.cpp index a038cb58b1..91ea4632a9 100644 --- a/core/common/RuntimeUtil.cpp +++ b/core/common/RuntimeUtil.cpp @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "app_config/AppConfig.h" #include "RuntimeUtil.h" #if defined(__linux__) #include @@ -71,11 +72,21 @@ std::string GetBinaryName(void) { #endif } -// only ilogtail_config.json will be rebuild from memory +// only loongcollector_config.json will be rebuild from memory bool RebuildExecutionDir(const std::string& ilogtailConfigJson, std::string& errorMessage, const std::string& executionDir) { - std::string path = executionDir.empty() ? GetProcessExecutionDir() : executionDir; + std::string path = GetAgentDataDir(); + if (CheckExistance(path)) + return true; + if (!Mkdirs(path)) { + std::stringstream ss; + ss << "create data dir failed, errno is " << errno; + errorMessage = ss.str(); + return false; + } + #if defined(__RUN_LOGTAIL__) + path = executionDir.empty() ? GetProcessExecutionDir() : executionDir; if (CheckExistance(path)) return true; if (!Mkdir(path)) { @@ -98,6 +109,7 @@ bool RebuildExecutionDir(const std::string& ilogtailConfigJson, fwrite(ilogtailConfigJson.c_str(), 1, ilogtailConfigJson.size(), pFile); fclose(pFile); + #endif return true; } diff --git a/core/common/compression/Compressor.cpp b/core/common/compression/Compressor.cpp index 39a7af4b57..c5800016d9 100644 --- a/core/common/compression/Compressor.cpp +++ b/core/common/compression/Compressor.cpp @@ -16,7 +16,7 @@ #include -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" using namespace std; @@ -25,18 +25,18 @@ namespace logtail { void Compressor::SetMetricRecordRef(MetricLabels&& labels, DynamicMetricLabels&& dynamicLabels) { WriteMetrics::GetInstance()->PrepareMetricsRecordRef( mMetricsRecordRef, std::move(labels), std::move(dynamicLabels)); - mInItemsCnt = mMetricsRecordRef.CreateCounter(METRIC_IN_ITEMS_CNT); - mInItemSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_IN_ITEM_SIZE_BYTES); - mOutItemsCnt = mMetricsRecordRef.CreateCounter(METRIC_OUT_ITEMS_CNT); - mOutItemSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_OUT_ITEM_SIZE_BYTES); - mDiscardedItemsCnt = mMetricsRecordRef.CreateCounter("discarded_items_cnt"); - mDiscardedItemSizeBytes = mMetricsRecordRef.CreateCounter("discarded_item_size_bytes"); - mTotalDelayMs = mMetricsRecordRef.CreateCounter(METRIC_TOTAL_DELAY_MS); + mInItemsTotal = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_IN_ITEMS_TOTAL); + mInItemSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_IN_SIZE_BYTES); + mOutItemsTotal = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_OUT_ITEMS_TOTAL); + mOutItemSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_OUT_SIZE_BYTES); + mTotalProcessMs = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_TOTAL_PROCESS_TIME_MS); + mDiscardedItemsTotal = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_DISCARDED_ITEMS_TOTAL); + mDiscardedItemSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_DISCARDED_ITEMS_SIZE_BYTES); } bool Compressor::DoCompress(const string& input, string& output, string& errorMsg) { if (mMetricsRecordRef != nullptr) { - mInItemsCnt->Add(1); + mInItemsTotal->Add(1); mInItemSizeBytes->Add(input.size()); } @@ -44,12 +44,12 @@ bool Compressor::DoCompress(const string& input, string& output, string& errorMs auto res = Compress(input, output, errorMsg); if (mMetricsRecordRef != nullptr) { - mTotalDelayMs->Add(chrono::duration_cast(chrono::system_clock::now() - before).count()); + mTotalProcessMs->Add(chrono::duration_cast(chrono::system_clock::now() - before).count()); if (res) { - mOutItemsCnt->Add(1); + mOutItemsTotal->Add(1); mOutItemSizeBytes->Add(output.size()); } else { - mDiscardedItemsCnt->Add(1); + mDiscardedItemsTotal->Add(1); mDiscardedItemSizeBytes->Add(input.size()); } } diff --git a/core/common/compression/Compressor.h b/core/common/compression/Compressor.h index 0c32358ca5..0a2e490d30 100644 --- a/core/common/compression/Compressor.h +++ b/core/common/compression/Compressor.h @@ -40,13 +40,13 @@ class Compressor { protected: mutable MetricsRecordRef mMetricsRecordRef; - CounterPtr mInItemsCnt; + CounterPtr mInItemsTotal; CounterPtr mInItemSizeBytes; - CounterPtr mOutItemsCnt; + CounterPtr mOutItemsTotal; CounterPtr mOutItemSizeBytes; - CounterPtr mDiscardedItemsCnt; + CounterPtr mDiscardedItemsTotal; CounterPtr mDiscardedItemSizeBytes; - CounterPtr mTotalDelayMs; + CounterPtr mTotalProcessMs; private: virtual bool Compress(const std::string& input, std::string& output, std::string& errorMsg) = 0; diff --git a/core/common/compression/CompressorFactory.cpp b/core/common/compression/CompressorFactory.cpp index 0e3b27197b..bb1e0f3885 100644 --- a/core/common/compression/CompressorFactory.cpp +++ b/core/common/compression/CompressorFactory.cpp @@ -15,7 +15,7 @@ #include "common/compression/CompressorFactory.h" #include "common/ParamExtractor.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "common/compression/LZ4Compressor.h" #include "common/compression/ZstdCompressor.h" @@ -61,10 +61,10 @@ unique_ptr CompressorFactory::Create(const Json::Value& config, } else { compressor = Create(defaultType); } - compressor->SetMetricRecordRef({{METRIC_LABEL_PROJECT, ctx.GetProjectName()}, - {METRIC_LABEL_CONFIG_NAME, ctx.GetConfigName()}, - {METRIC_LABEL_KEY_COMPONENT_NAME, "compressor"}, - {METRIC_LABEL_KEY_FLUSHER_NODE_ID, flusherId}}); + compressor->SetMetricRecordRef({{METRIC_LABEL_KEY_PROJECT, ctx.GetProjectName()}, + {METRIC_LABEL_KEY_PIPELINE_NAME, ctx.GetConfigName()}, + {METRIC_LABEL_KEY_COMPONENT_NAME, METRIC_LABEL_VALUE_COMPONENT_NAME_COMPRESSOR}, + {METRIC_LABEL_KEY_FLUSHER_PLUGIN_ID, flusherId}}); return compressor; } diff --git a/core/common/timer/Timer.cpp b/core/common/timer/Timer.cpp index ed6af6ba14..a7316943a5 100644 --- a/core/common/timer/Timer.cpp +++ b/core/common/timer/Timer.cpp @@ -66,12 +66,15 @@ void Timer::Run() { mCV.wait_for(threadLock, timeout); break; } else { + auto e = std::move(const_cast&>(mQueue.top())); + mQueue.pop(); + queueLock.unlock(); if (!e->IsValid()) { LOG_INFO(sLogger, ("invalid timer event", "task is cancelled")); } else { e->Execute(); } - mQueue.pop(); + queueLock.lock(); } } } diff --git a/core/common/timer/Timer.h b/core/common/timer/Timer.h index 1d6adf2c60..6825e8443d 100644 --- a/core/common/timer/Timer.h +++ b/core/common/timer/Timer.h @@ -52,6 +52,7 @@ class Timer { #ifdef APSARA_UNIT_TEST_MAIN friend class TimerUnittest; + friend class ScrapeSchedulerUnittest; #endif }; diff --git a/core/config/ConfigDiff.h b/core/config/ConfigDiff.h index 76571ee22c..8dc3ed2d26 100644 --- a/core/config/ConfigDiff.h +++ b/core/config/ConfigDiff.h @@ -19,8 +19,8 @@ #include #include -#include "config/PipelineConfig.h" #include "config/InstanceConfig.h" +#include "config/PipelineConfig.h" namespace logtail { @@ -29,7 +29,6 @@ class PipelineConfigDiff { std::vector mAdded; std::vector mModified; std::vector mRemoved; - std::vector mUnchanged; // 过渡使用,仅供插件系统用 bool IsEmpty() { return mRemoved.empty() && mAdded.empty() && mModified.empty(); } }; @@ -38,7 +37,6 @@ class InstanceConfigDiff { std::vector mAdded; std::vector mModified; std::vector mRemoved; - std::vector mUnchanged; // 过渡使用,仅供插件系统用 bool IsEmpty() { return mRemoved.empty() && mAdded.empty() && mModified.empty(); } }; diff --git a/core/config/PipelineConfig.cpp b/core/config/PipelineConfig.cpp index 83f3c581b2..b44d05df87 100644 --- a/core/config/PipelineConfig.cpp +++ b/core/config/PipelineConfig.cpp @@ -140,9 +140,6 @@ bool PipelineConfig::Parse() { // extensions module parsing will rely on their results. bool hasObserverInput = false; bool hasFileInput = false; -#ifdef __ENTERPRISE__ - bool hasStreamInput = false; -#endif key = "inputs"; itr = mDetail->find(key.c_str(), key.c_str() + key.size()); if (!itr) { @@ -244,21 +241,10 @@ bool PipelineConfig::Parse() { hasObserverInput = true; } else if (pluginType == "input_file" || pluginType == "input_container_stdio") { hasFileInput = true; -#ifdef __ENTERPRISE__ - } else if (pluginType == "input_stream") { - if (!AppConfig::GetInstance()->GetOpenStreamLog()) { - PARAM_ERROR_RETURN( - sLogger, alarm, "stream log is not enabled", noModule, mName, mProject, mLogstore, mRegion); - } - hasStreamInput = true; -#endif } } // TODO: remove these special restrictions bool hasSpecialInput = hasObserverInput || hasFileInput; -#ifdef __ENTERPRISE__ - hasSpecialInput = hasSpecialInput || hasStreamInput; -#endif if (hasSpecialInput && (*mDetail)["inputs"].size() > 1) { PARAM_ERROR_RETURN(sLogger, alarm, @@ -283,19 +269,6 @@ bool PipelineConfig::Parse() { mLogstore, mRegion); } -#ifdef __ENTERPRISE__ - // TODO: remove these special restrictions - if (hasStreamInput && !itr->empty()) { - PARAM_ERROR_RETURN(sLogger, - alarm, - "processor plugins coexist with input_stream", - noModule, - mName, - mProject, - mLogstore, - mRegion); - } -#endif bool isCurrentPluginNative = true; for (Json::Value::ArrayIndex i = 0; i < itr->size(); ++i) { const Json::Value& plugin = (*itr)[i]; @@ -520,19 +493,6 @@ bool PipelineConfig::Parse() { PARAM_ERROR_RETURN( sLogger, alarm, "unsupported flusher plugin", pluginType, mName, mProject, mLogstore, mRegion); } -#ifdef __ENTERPRISE__ - // TODO: remove these special restrictions - if (hasStreamInput && pluginType != "flusher_sls") { - PARAM_ERROR_RETURN(sLogger, - alarm, - "flusher plugins other than flusher_sls coexist with input_stream", - noModule, - mName, - mProject, - mLogstore, - mRegion); - } -#endif mFlushers.push_back(&plugin); } // TODO: remove these special restrictions diff --git a/core/config/provider/ConfigProvider.cpp b/core/config/provider/ConfigProvider.cpp index 44c2accbe4..be4709dc7a 100644 --- a/core/config/provider/ConfigProvider.cpp +++ b/core/config/provider/ConfigProvider.cpp @@ -23,12 +23,12 @@ namespace logtail { void ConfigProvider::Init(const string& dir) { // default path: /etc/ilogtail/config/${dir} - mPipelineSourceDir.assign(AppConfig::GetInstance()->GetLogtailSysConfDir()); - mPipelineSourceDir /= "config"; + mPipelineSourceDir.assign(AppConfig::GetInstance()->GetLoongcollectorConfDir()); + mPipelineSourceDir /= "pipeline_config"; mPipelineSourceDir /= dir; - mInstanceSourceDir.assign(AppConfig::GetInstance()->GetLogtailSysConfDir()); - mInstanceSourceDir /= "instanceconfig"; + mInstanceSourceDir.assign(AppConfig::GetInstance()->GetLoongcollectorConfDir()); + mInstanceSourceDir /= "instance_config"; mInstanceSourceDir /= dir; error_code ec; diff --git a/core/config/watcher/ConfigWatcher.cpp b/core/config/watcher/ConfigWatcher.cpp index 816a33a371..67c1633ad5 100644 --- a/core/config/watcher/ConfigWatcher.cpp +++ b/core/config/watcher/ConfigWatcher.cpp @@ -19,8 +19,8 @@ #include #include "logger/Logger.h" -#include "pipeline/PipelineManager.h" #include "pipeline/InstanceConfigManager.h" +#include "pipeline/PipelineManager.h" using namespace std; @@ -51,12 +51,15 @@ ConfigDiffType ConfigWatcher::CheckConfigDiff( continue; } if (!filesystem::exists(s)) { - LOG_WARNING(sLogger, ("config dir path not existed", "skip current object")("dir path", dir.string())("configType", configType)); + LOG_WARNING(sLogger, + ("config dir path not existed", "skip current object")("dir path", dir.string())("configType", + configType)); continue; } if (!filesystem::is_directory(s)) { LOG_WARNING(sLogger, - ("config dir path is not a directory", "skip current object")("dir path", dir.string())("configType", configType)); + ("config dir path is not a directory", + "skip current object")("dir path", dir.string())("configType", configType)); continue; } for (auto const& entry : filesystem::directory_iterator(dir, ec)) { @@ -72,13 +75,15 @@ ConfigDiffType ConfigWatcher::CheckConfigDiff( const string& configName = path.stem().string(); const string& filepath = path.string(); if (!filesystem::is_regular_file(entry.status(ec))) { - LOG_DEBUG(sLogger, ("config file is not a regular file", "skip current object")("filepath", filepath)("configType", configType)); + LOG_DEBUG(sLogger, + ("config file is not a regular file", + "skip current object")("filepath", filepath)("configType", configType)); continue; } if (configSet.find(configName) != configSet.end()) { - LOG_WARNING( - sLogger, - ("more than 1 config with the same name is found", "skip current config")("filepath", filepath)("configType", configType)); + LOG_WARNING(sLogger, + ("more than 1 config with the same name is found", + "skip current config")("filepath", filepath)("configType", configType)); continue; } configSet.insert(configName); @@ -93,12 +98,16 @@ ConfigDiffType ConfigWatcher::CheckConfigDiff( continue; } if (!IsConfigEnabled(configName, *detail)) { - LOG_INFO(sLogger, ("new config found and disabled", "skip current object")("config", configName)("configType", configType)); + LOG_INFO(sLogger, + ("new config found and disabled", + "skip current object")("config", configName)("configType", configType)); continue; } ConfigType config(configName, std::move(detail)); if (!config.Parse()) { - LOG_ERROR(sLogger, ("new config found but invalid", "skip current object")("config", configName)("configType", configType)); + LOG_ERROR(sLogger, + ("new config found but invalid", + "skip current object")("config", configName)("configType", configType)); LogtailAlarm::GetInstance()->SendAlarm(CATEGORY_CONFIG_ALARM, "new config found but invalid: skip current object, config: " + configName + ", configType: " + configType, @@ -108,29 +117,27 @@ ConfigDiffType ConfigWatcher::CheckConfigDiff( continue; } diff.mAdded.push_back(std::move(config)); - LOG_INFO( - sLogger, - ("new config found and passed topology check", "prepare to build config")("config", configName)("configType", configType)); + LOG_INFO(sLogger, + ("new config found and passed topology check", + "prepare to build config")("config", configName)("configType", configType)); } else if (iter->second.first != size || iter->second.second != mTime) { // for config currently running, we leave it untouched if new config is invalid fileInfoMap[filepath] = make_pair(size, mTime); unique_ptr detail = make_unique(); if (!LoadConfigDetailFromFile(path, *detail)) { - if (configManager->FindConfigByName(configName)) { - diff.mUnchanged.push_back(configName); - } continue; } if (!IsConfigEnabled(configName, *detail)) { if (configManager->FindConfigByName(configName)) { diff.mRemoved.push_back(configName); - LOG_INFO(sLogger, - ("existing valid config modified and disabled", - "prepare to stop current running config")("config", configName)("configType", configType)); + LOG_INFO( + sLogger, + ("existing valid config modified and disabled", + "prepare to stop current running config")("config", configName)("configType", configType)); } else { LOG_INFO(sLogger, - ("existing invalid config modified and disabled", "skip current object")("config", - configName)("configType", configType)); + ("existing invalid config modified and disabled", + "skip current object")("config", configName)("configType", configType)); } continue; } @@ -157,7 +164,6 @@ ConfigDiffType ConfigWatcher::CheckConfigDiff( } else if (*detail != p->GetConfig()) { ConfigType config(configName, std::move(detail)); if (!config.Parse()) { - diff.mUnchanged.push_back(configName); LOG_ERROR(sLogger, ("existing valid config modified and becomes invalid", "keep current config running")("config", configName)("configType", configType)); @@ -175,15 +181,11 @@ ConfigDiffType ConfigWatcher::CheckConfigDiff( ("existing valid config modified and passed topology check", "prepare to rebuild config")("config", configName)("configType", configType)); } else { - diff.mUnchanged.push_back(configName); LOG_DEBUG(sLogger, - ("existing valid config file modified, but no change found", "skip current object")("configType", configType)); + ("existing valid config file modified, but no change found", + "skip current object")("configType", configType)); } } else { - // 为了插件系统过渡使用 - if (configManager->FindConfigByName(configName)) { - diff.mUnchanged.push_back(configName); - } LOG_DEBUG(sLogger, ("existing config file unchanged", "skip current object")("configType", configType)); } } @@ -192,7 +194,8 @@ ConfigDiffType ConfigWatcher::CheckConfigDiff( if (configSet.find(name) == configSet.end()) { diff.mRemoved.push_back(name); LOG_INFO(sLogger, - ("existing valid config is removed", "prepare to stop current running config")("config", name)("configType", configType)); + ("existing valid config is removed", + "prepare to stop current running config")("config", name)("configType", configType)); } } for (const auto& item : fileInfoMap) { diff --git a/core/container_manager/ContainerDiscoveryOptions.h b/core/container_manager/ContainerDiscoveryOptions.h index 633a99caa4..1b1b06ccaf 100644 --- a/core/container_manager/ContainerDiscoveryOptions.h +++ b/core/container_manager/ContainerDiscoveryOptions.h @@ -52,7 +52,7 @@ struct ContainerDiscoveryOptions { bool Init(const Json::Value& config, const PipelineContext& ctx, const std::string& pluginType); void GenerateContainerMetaFetchingGoPipeline(Json::Value& res, const FileDiscoveryOptions* fileDiscovery = nullptr, - const PluginInstance::PluginMeta pluginMeta = {"0", "0", "0"}) const; + const PluginInstance::PluginMeta pluginMeta = {"0"}) const; }; using ContainerDiscoveryConfig = std::pair; diff --git a/core/ebpf/SourceManager.cpp b/core/ebpf/SourceManager.cpp index 4f79e1149d..be4b1c0acc 100644 --- a/core/ebpf/SourceManager.cpp +++ b/core/ebpf/SourceManager.cpp @@ -63,6 +63,7 @@ void SourceManager::Init() { mHostIp = GetHostIp(); mHostName = GetHostName(); mHostPathPrefix = STRING_FLAG(default_container_host_path); + // load ebpf lib mBinaryPath = GetProcessExecutionDir(); mFullLibName = "lib" + m_lib_name_ + ".so"; for (auto& x : mRunning) { diff --git a/core/file_server/ConfigManager.cpp b/core/file_server/ConfigManager.cpp index 00161105e1..fca041e21a 100644 --- a/core/file_server/ConfigManager.cpp +++ b/core/file_server/ConfigManager.cpp @@ -101,6 +101,7 @@ DEFINE_FLAG_INT32(docker_config_update_interval, "interval between docker config namespace logtail { +// ParseConfResult ParseConfig(const std::string& configName, Json::Value& jsonRoot) { // Get full path, if it is a relative path, prepend process execution dir. std::string fullPath = configName; diff --git a/core/file_server/EventDispatcher.cpp b/core/file_server/EventDispatcher.cpp index fe41226b51..dbbd80cd11 100644 --- a/core/file_server/EventDispatcher.cpp +++ b/core/file_server/EventDispatcher.cpp @@ -13,6 +13,7 @@ // limitations under the License. #include "EventDispatcher.h" +#include "Flags.h" #if defined(__linux__) #include #include @@ -83,7 +84,6 @@ DEFINE_FLAG_INT32(existed_file_active_timeout, 120); DEFINE_FLAG_INT32(checkpoint_find_max_cache_size, "", 100000); DEFINE_FLAG_INT32(max_watch_dir_count, "", 100 * 1000); -DEFINE_FLAG_STRING(inotify_watcher_dirs_dump_filename, "", "inotify_watcher_dirs"); DEFINE_FLAG_INT32(default_max_inotify_watch_num, "the max allowed inotify watch dir number", 3000); namespace logtail { @@ -718,7 +718,7 @@ void EventDispatcher::RemoveOneToOneMapEntry(int wd) { } void EventDispatcher::DumpInotifyWatcherDirs() { - string filename = GetProcessExecutionDir() + STRING_FLAG(inotify_watcher_dirs_dump_filename); + string filename = GetInotifyWatcherDirsDumpFileName(); FILE* pFile = fopen(filename.c_str(), "w"); if (pFile == NULL) { LOG_WARNING(sLogger, ("open file (dump inotify watcher dirs) failed", filename)("errno", errno)); diff --git a/core/file_server/FileServer.cpp b/core/file_server/FileServer.cpp index 49b1953b72..2e37967007 100644 --- a/core/file_server/FileServer.cpp +++ b/core/file_server/FileServer.cpp @@ -18,12 +18,12 @@ #include "common/Flags.h" #include "common/StringTools.h" #include "common/TimeUtil.h" +#include "file_server/ConfigManager.h" #include "file_server/EventDispatcher.h" #include "file_server/event_handler/LogInput.h" -#include "file_server/ConfigManager.h" -#include "plugin/input/InputFile.h" #include "file_server/polling/PollingDirFile.h" #include "file_server/polling/PollingModify.h" +#include "plugin/input/InputFile.h" DEFINE_FLAG_BOOL(enable_polling_discovery, "", true); @@ -31,6 +31,11 @@ using namespace std; namespace logtail { +FileServer::FileServer() { + WriteMetrics::GetInstance()->PrepareMetricsRecordRef(mMetricsRecordRef, + {{METRIC_LABEL_KEY_RUNNER_NAME, METRIC_LABEL_VALUE_RUNNER_NAME_FILE_SERVER}}); +} + // 启动文件服务,包括加载配置、处理检查点、注册事件等 void FileServer::Start() { ConfigManager::GetInstance()->LoadDockerConfig(); diff --git a/core/file_server/FileServer.h b/core/file_server/FileServer.h index 7daee73fc5..d77b6a13a1 100644 --- a/core/file_server/FileServer.h +++ b/core/file_server/FileServer.h @@ -23,9 +23,11 @@ #include "common/Lock.h" #include "file_server/FileDiscoveryOptions.h" #include "file_server/MultilineOptions.h" +#include "file_server/reader/FileReaderOptions.h" +#include "monitor/LogtailMetric.h" #include "monitor/PluginMetricManager.h" #include "pipeline/PipelineContext.h" -#include "file_server/reader/FileReaderOptions.h" + namespace logtail { @@ -78,6 +80,7 @@ class FileServer { // for reader, event_handler ... ReentrantMetricsRecordRef GetOrCreateReentrantMetricsRecordRef(const std::string& name, MetricLabels& labels); void ReleaseReentrantMetricsRecordRef(const std::string& name, MetricLabels& labels); + MetricsRecordRef& GetMetricsRecordRef() { return mMetricsRecordRef; } // 过渡使用 void Resume(bool isConfigUpdate = true); @@ -88,7 +91,7 @@ class FileServer { void RemoveExactlyOnceConcurrency(const std::string& name); private: - FileServer() = default; + FileServer(); ~FileServer() = default; void PauseInner(); @@ -102,6 +105,8 @@ class FileServer { std::unordered_map mPipelineNamePluginMetricManagersMap; // 过渡使用 std::unordered_map mPipelineNameEOConcurrencyMap; + + mutable MetricsRecordRef mMetricsRecordRef; }; } // namespace logtail diff --git a/core/file_server/event_handler/EventHandler.cpp b/core/file_server/event_handler/EventHandler.cpp index 22fdbe41ba..4c9774b373 100644 --- a/core/file_server/event_handler/EventHandler.cpp +++ b/core/file_server/event_handler/EventHandler.cpp @@ -18,20 +18,20 @@ #include #include -#include "file_server/event_handler/LogInput.h" #include "app_config/AppConfig.h" #include "common/FileSystemUtil.h" #include "common/RuntimeUtil.h" #include "common/StringTools.h" #include "common/TimeUtil.h" -#include "file_server/EventDispatcher.h" -#include "file_server/event/BlockEventManager.h" #include "file_server/ConfigManager.h" +#include "file_server/EventDispatcher.h" #include "file_server/FileServer.h" +#include "file_server/event/BlockEventManager.h" +#include "file_server/event_handler/LogInput.h" #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" -#include "runner/LogProcess.h" #include "pipeline/queue/ProcessQueueManager.h" +#include "runner/ProcessorRunner.h" using namespace std; using namespace sls_logs; @@ -1090,7 +1090,7 @@ int32_t ModifyHandler::PushLogToProcessor(LogFileReaderPtr reader, LogBuffer* lo time(NULL)); PipelineEventGroup group = LogFileReader::GenerateEventGroup(reader, logBuffer); - while (!LogProcess::GetInstance()->PushBuffer(reader->GetQueueKey(), 0, std::move(group))) // 10ms + while (!ProcessorRunner::GetInstance()->PushQueue(reader->GetQueueKey(), 0, std::move(group))) // 10ms { ++pushRetry; if (pushRetry % 10 == 0) diff --git a/core/file_server/event_handler/HistoryFileImporter.cpp b/core/file_server/event_handler/HistoryFileImporter.cpp index 95dbc89cd7..24c8bbfbae 100644 --- a/core/file_server/event_handler/HistoryFileImporter.cpp +++ b/core/file_server/event_handler/HistoryFileImporter.cpp @@ -20,10 +20,10 @@ #include "common/Thread.h" #include "common/TimeUtil.h" #include "file_server/ConfigManager.h" +#include "file_server/reader/LogFileReader.h" #include "logger/Logger.h" -#include "runner/LogProcess.h" #include "pipeline/queue/ProcessQueueManager.h" -#include "file_server/reader/LogFileReader.h" +#include "runner/ProcessorRunner.h" namespace logtail { @@ -51,7 +51,7 @@ void HistoryFileImporter::Run() { } void HistoryFileImporter::LoadCheckPoint() { - std::string historyDataPath = GetProcessExecutionDir() + "history_file_checkpoint"; + std::string historyDataPath = GetAgentDataDir() + "history_file_checkpoint"; FILE* readPtr = fopen(historyDataPath.c_str(), "r"); if (readPtr != NULL) { fclose(readPtr); @@ -59,7 +59,7 @@ void HistoryFileImporter::LoadCheckPoint() { } void HistoryFileImporter::ProcessEvent(const HistoryFileEvent& event, const std::vector& fileNames) { - static LogProcess* logProcess = LogProcess::GetInstance(); + static ProcessorRunner* logProcess = ProcessorRunner::GetInstance(); LOG_INFO(sLogger, ("begin load history files, count", fileNames.size())("file list", ToString(fileNames))); for (size_t i = 0; i < fileNames.size(); ++i) { @@ -116,7 +116,7 @@ void HistoryFileImporter::ProcessEvent(const HistoryFileEvent& event, const std: // TODO: currently only 1 input is allowed, so we assume 0 here. It should be the actual input seq after // refactorization. - logProcess->PushBuffer(readerSharePtr->GetQueueKey(), 0, std::move(group), 100000000); + logProcess->PushQueue(readerSharePtr->GetQueueKey(), 0, std::move(group), 100000000); } else { // when ReadLog return false, retry once if (doneFlag) { diff --git a/core/file_server/event_handler/LogInput.cpp b/core/file_server/event_handler/LogInput.cpp index a5ea529bec..ca1b5616e8 100644 --- a/core/file_server/event_handler/LogInput.cpp +++ b/core/file_server/event_handler/LogInput.cpp @@ -16,8 +16,6 @@ #include -#include "file_server/event_handler/EventHandler.h" -#include "file_server/event_handler/HistoryFileImporter.h" #include "app_config/AppConfig.h" #include "application/Application.h" #include "checkpoint/CheckPointManager.h" @@ -27,18 +25,20 @@ #include "common/RuntimeUtil.h" #include "common/StringTools.h" #include "common/TimeUtil.h" +#include "file_server/ConfigManager.h" #include "file_server/EventDispatcher.h" #include "file_server/event/BlockEventManager.h" -#include "file_server/ConfigManager.h" -#include "logger/Logger.h" -#include "monitor/LogtailAlarm.h" -#include "monitor/Monitor.h" +#include "file_server/event_handler/EventHandler.h" +#include "file_server/event_handler/HistoryFileImporter.h" #include "file_server/polling/PollingCache.h" #include "file_server/polling/PollingDirFile.h" #include "file_server/polling/PollingEventQueue.h" #include "file_server/polling/PollingModify.h" #include "file_server/reader/GloablFileDescriptorManager.h" #include "file_server/reader/LogFileReader.h" +#include "logger/Logger.h" +#include "monitor/LogtailAlarm.h" +#include "monitor/Monitor.h" #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" #endif @@ -54,7 +54,6 @@ DEFINE_FLAG_INT32(check_handler_timeout_interval, "seconds", 180); DEFINE_FLAG_INT32(dump_inotify_watcher_interval, "seconds", 180); DEFINE_FLAG_INT32(clear_config_match_interval, "seconds", 600); DEFINE_FLAG_INT32(check_block_event_interval, "seconds", 1); -DEFINE_FLAG_STRING(local_event_data_file_name, "local event data file name", "local_event.json"); DEFINE_FLAG_INT32(read_local_event_interval, "seconds", 60); DEFINE_FLAG_BOOL(force_close_file_on_container_stopped, "whether close file handler immediately when associate container stopped", @@ -88,9 +87,11 @@ void LogInput::Start() { mInteruptFlag = false; + mLastRunTime = FileServer::GetInstance()->GetMetricsRecordRef().CreateIntGauge(METRIC_RUNNER_LAST_RUN_TIME); mAgentOpenFdTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_OPEN_FD_TOTAL); - mAgentRegisterHandlerTotal - = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_REGISTER_HANDLER_TOTAL); + mRegisterdHandlersTotal = FileServer::GetInstance()->GetMetricsRecordRef().CreateIntGauge(METRIC_RUNNER_FILE_WATCHED_DIRS_TOTAL); + mActiveReadersTotal = FileServer::GetInstance()->GetMetricsRecordRef().CreateIntGauge(METRIC_RUNNER_FILE_ACTIVE_READERS_TOTAL); + mEnableFileIncludedByMultiConfigs = FileServer::GetInstance()->GetMetricsRecordRef().CreateIntGauge(METRIC_RUNNER_FILE_ENABLE_FILE_INCLUDED_BY_MULTI_CONFIGS_FLAG); new Thread([this]() { ProcessLoop(); }); } @@ -104,7 +105,7 @@ void LogInput::Resume() { void LogInput::HoldOn() { LOG_INFO(sLogger, ("event handle daemon pause", "starts")); - if (BOOL_FLAG(enable_full_drain_mode)) { + if (BOOL_FLAG(enable_full_drain_mode) && Application::GetInstance()->IsExiting()) { unique_lock lock(mThreadRunningMux); mStopCV.wait(lock, [this]() { return mInteruptFlag; }); } else { @@ -199,15 +200,15 @@ void LogInput::FlowControl() { bool LogInput::ReadLocalEvents() { Json::Value localEventJson; // will contains the root value after parsing. - ParseConfResult loadRes = ParseConfig(STRING_FLAG(local_event_data_file_name), localEventJson); - LOG_DEBUG(sLogger, ("load local events", STRING_FLAG(local_event_data_file_name))("result", loadRes)); + ParseConfResult loadRes = ParseConfig(GetLocalEventDataFileName(), localEventJson); + LOG_DEBUG(sLogger, ("load local events", GetLocalEventDataFileName())("result", loadRes)); if (loadRes != CONFIG_OK || !localEventJson.isArray()) { return false; } // set discard old data flag, so that history data will not be dropped. BOOL_FLAG(ilogtail_discard_old_data) = false; LOG_INFO(sLogger, - ("load local events", STRING_FLAG(local_event_data_file_name))("event count", localEventJson.size())); + ("load local events", GetLocalEventDataFileName())("event count", localEventJson.size())); for (Json::ValueIterator iter = localEventJson.begin(); iter != localEventJson.end(); ++iter) { const Json::Value& eventItem = *iter; if (!eventItem.isObject()) { @@ -290,7 +291,7 @@ bool LogInput::ReadLocalEvents() { // after process event, clear the local file - FILE* pFile = fopen((GetProcessExecutionDir() + STRING_FLAG(local_event_data_file_name)).c_str(), "w"); + FILE* pFile = fopen(GetLocalEventDataFileName().c_str(), "w"); if (pFile != NULL) { fclose(pFile); } @@ -342,6 +343,7 @@ void LogInput::ProcessEvent(EventDispatcher* dispatcher, Event* ev) { void LogInput::UpdateCriticalMetric(int32_t curTime) { LogtailMonitor::GetInstance()->UpdateMetric("last_read_event_time", GetTimeStamp(mLastReadEventTime, "%Y-%m-%d %H:%M:%S")); + mLastRunTime->Set(mLastReadEventTime.load()); LogtailMonitor::GetInstance()->UpdateMetric("event_tps", 1.0 * mEventProcessCount / (curTime - mLastUpdateMetricTime)); @@ -350,8 +352,9 @@ void LogInput::UpdateCriticalMetric(int32_t curTime) { mAgentOpenFdTotal->Set(openFdTotal); size_t handlerCount = EventDispatcher::GetInstance()->GetHandlerCount(); LogtailMonitor::GetInstance()->UpdateMetric("register_handler", handlerCount); - mAgentRegisterHandlerTotal->Set(handlerCount); + mRegisterdHandlersTotal->Set(handlerCount); LogtailMonitor::GetInstance()->UpdateMetric("reader_count", CheckPointManager::Instance()->GetReaderCount()); + mActiveReadersTotal->Set(CheckPointManager::Instance()->GetReaderCount()); LogtailMonitor::GetInstance()->UpdateMetric("multi_config", AppConfig::GetInstance()->IsAcceptMultiConfig()); mEventProcessCount = 0; } diff --git a/core/file_server/event_handler/LogInput.h b/core/file_server/event_handler/LogInput.h index 21217086a3..e2921eec2c 100644 --- a/core/file_server/event_handler/LogInput.h +++ b/core/file_server/event_handler/LogInput.h @@ -79,8 +79,12 @@ class LogInput : public LogRunnable { volatile bool mIdleFlag; int32_t mEventProcessCount; int32_t mLastUpdateMetricTime; + + IntGaugePtr mLastRunTime; IntGaugePtr mAgentOpenFdTotal; - IntGaugePtr mAgentRegisterHandlerTotal; + IntGaugePtr mRegisterdHandlersTotal; + IntGaugePtr mActiveReadersTotal; + IntGaugePtr mEnableFileIncludedByMultiConfigs; std::atomic_int mLastReadEventTime{0}; mutable std::mutex mThreadRunningMux; diff --git a/core/file_server/polling/PollingDirFile.cpp b/core/file_server/polling/PollingDirFile.cpp index 2bc809de7c..4897624817 100644 --- a/core/file_server/polling/PollingDirFile.cpp +++ b/core/file_server/polling/PollingDirFile.cpp @@ -21,8 +21,6 @@ #endif #include -#include "file_server/polling/PollingEventQueue.h" -#include "file_server/polling/PollingModify.h" #include "app_config/AppConfig.h" #include "common/ErrorUtil.h" #include "common/FileSystemUtil.h" @@ -30,10 +28,13 @@ #include "common/StringTools.h" #include "common/TimeUtil.h" #include "file_server/ConfigManager.h" -#include "file_server/event/Event.h" #include "file_server/FileServer.h" +#include "file_server/event/Event.h" +#include "file_server/polling/PollingEventQueue.h" +#include "file_server/polling/PollingModify.h" #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" +#include "monitor/metric_constants/MetricConstants.h" // Control the check frequency to call ClearUnavailableFileAndDir. DEFINE_FLAG_INT32(check_not_exist_file_dir_round, "clear not exist file dir cache, round", 20); @@ -69,10 +70,10 @@ static const int64_t NANO_CONVERTING = 1000000000; void PollingDirFile::Start() { ClearCache(); mAgentConfigTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_PIPELINE_CONFIG_TOTAL); - mAgentPollingDirCacheSizeTotal - = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_POLLING_DIR_CACHE_SIZE_TOTAL); - mAgentPollingFileCacheSizeTotal - = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_POLLING_FILE_CACHE_SIZE_TOTAL); + mPollingDirCacheSize + = FileServer::GetInstance()->GetMetricsRecordRef().CreateIntGauge(METRIC_RUNNER_FILE_POLLING_DIR_CACHE_SIZE); + mPollingFileCacheSize + = FileServer::GetInstance()->GetMetricsRecordRef().CreateIntGauge(METRIC_RUNNER_FILE_POLLING_FILE_CACHE_SIZE); mRuningFlag = true; mThreadPtr = CreateThread([this]() { Polling(); }); } @@ -157,10 +158,10 @@ void PollingDirFile::Polling() { ScopedSpinLock lock(mCacheLock); size_t pollingDirCacheSize = mDirCacheMap.size(); LogtailMonitor::GetInstance()->UpdateMetric("polling_dir_cache", pollingDirCacheSize); - mAgentPollingDirCacheSizeTotal->Set(pollingDirCacheSize); + mPollingDirCacheSize->Set(pollingDirCacheSize); size_t pollingFileCacheSize = mFileCacheMap.size(); LogtailMonitor::GetInstance()->UpdateMetric("polling_file_cache", pollingFileCacheSize); - mAgentPollingFileCacheSizeTotal->Set(pollingFileCacheSize); + mPollingFileCacheSize->Set(pollingFileCacheSize); } // Iterate all normal configs, make sure stat count will not exceed limit. diff --git a/core/file_server/polling/PollingDirFile.h b/core/file_server/polling/PollingDirFile.h index 40a4ebfb8b..3e381f80fa 100644 --- a/core/file_server/polling/PollingDirFile.h +++ b/core/file_server/polling/PollingDirFile.h @@ -137,8 +137,8 @@ class PollingDirFile : public LogRunnable { uint64_t mCurrentRound; IntGaugePtr mAgentConfigTotal; - IntGaugePtr mAgentPollingDirCacheSizeTotal; - IntGaugePtr mAgentPollingFileCacheSizeTotal; + IntGaugePtr mPollingDirCacheSize; + IntGaugePtr mPollingFileCacheSize; #ifdef APSARA_UNIT_TEST_MAIN friend class PollingUnittest; diff --git a/core/file_server/polling/PollingModify.cpp b/core/file_server/polling/PollingModify.cpp index 6dac3afad4..0a940cd4a3 100644 --- a/core/file_server/polling/PollingModify.cpp +++ b/core/file_server/polling/PollingModify.cpp @@ -24,9 +24,11 @@ #include "common/Flags.h" #include "common/StringTools.h" #include "common/TimeUtil.h" +#include "file_server/FileServer.h" #include "file_server/event/Event.h" #include "logger/Logger.h" #include "monitor/LogtailAlarm.h" +#include "monitor/metric_constants/MetricConstants.h" using namespace std; @@ -47,7 +49,8 @@ PollingModify::~PollingModify() { void PollingModify::Start() { ClearCache(); - mAgentPollingModifySizeTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_POLLING_MODIFY_SIZE_TOTAL); + mPollingModifySize + = FileServer::GetInstance()->GetMetricsRecordRef().CreateIntGauge(METRIC_RUNNER_FILE_POLLING_MODIFY_CACHE_SIZE); mRuningFlag = true; mThreadPtr = CreateThread([this]() { Polling(); }); @@ -251,7 +254,7 @@ void PollingModify::Polling() { int32_t statCount = 0; size_t pollingModifySizeTotal = mModifyCacheMap.size(); LogtailMonitor::GetInstance()->UpdateMetric("polling_modify_size", pollingModifySizeTotal); - mAgentPollingModifySizeTotal->Set(pollingModifySizeTotal); + mPollingModifySize->Set(pollingModifySizeTotal); for (auto iter = mModifyCacheMap.begin(); iter != mModifyCacheMap.end(); ++iter) { if (!mRuningFlag || mHoldOnFlag) break; diff --git a/core/file_server/polling/PollingModify.h b/core/file_server/polling/PollingModify.h index c82337a898..5e3738a92a 100644 --- a/core/file_server/polling/PollingModify.h +++ b/core/file_server/polling/PollingModify.h @@ -101,7 +101,7 @@ class PollingModify : public LogRunnable { ModifyCheckCacheMap mModifyCacheMap; - IntGaugePtr mAgentPollingModifySizeTotal; + IntGaugePtr mPollingModifySize; #ifdef APSARA_UNIT_TEST_MAIN friend class PollingUnittest; diff --git a/core/file_server/reader/LogFileReader.cpp b/core/file_server/reader/LogFileReader.cpp index 499fe63658..2f849b5fcb 100644 --- a/core/file_server/reader/LogFileReader.cpp +++ b/core/file_server/reader/LogFileReader.cpp @@ -48,7 +48,7 @@ #include "logger/Logger.h" #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/queue/ExactlyOnceQueueManager.h" #include "pipeline/queue/ProcessQueueManager.h" #include "pipeline/queue/QueueKeyManager.h" @@ -203,9 +203,9 @@ LogFileReader::LogFileReader(const std::string& hostLogPathDir, void LogFileReader::SetMetrics() { mMetricInited = false; - mMetricLabels = {{METRIC_LABEL_FILE_NAME, GetConvertedPath()}, - {METRIC_LABEL_FILE_DEV, std::to_string(GetDevInode().dev)}, - {METRIC_LABEL_FILE_INODE, std::to_string(GetDevInode().inode)}}; + mMetricLabels = {{METRIC_LABEL_KEY_FILE_NAME, GetConvertedPath()}, + {METRIC_LABEL_KEY_FILE_DEV, std::to_string(GetDevInode().dev)}, + {METRIC_LABEL_KEY_FILE_INODE, std::to_string(GetDevInode().inode)}}; mMetricsRecordRef = FileServer::GetInstance()->GetOrCreateReentrantMetricsRecordRef(GetConfigName(), mMetricLabels); if (mMetricsRecordRef == nullptr) { LOG_ERROR(sLogger, @@ -213,10 +213,11 @@ void LogFileReader::SetMetrics() { return; } - mInputRecordsSizeBytesCounter = mMetricsRecordRef->GetCounter(METRIC_INPUT_RECORDS_SIZE_BYTES); - mInputReadTotalCounter = mMetricsRecordRef->GetCounter(METRIC_INPUT_READ_TOTAL); - mInputFileSizeBytesGauge = mMetricsRecordRef->GetIntGauge(METRIC_INPUT_FILE_SIZE_BYTES); - mInputFileOffsetBytesGauge = mMetricsRecordRef->GetIntGauge(METRIC_INPUT_FILE_OFFSET_BYTES); + mOutEventsTotal = mMetricsRecordRef->GetCounter(METRIC_PLUGIN_OUT_EVENTS_TOTAL); + mOutEventGroupsTotal = mMetricsRecordRef->GetCounter(METRIC_PLUGIN_OUT_EVENT_GROUPS_TOTAL); + mOutSizeBytes = mMetricsRecordRef->GetCounter(METRIC_PLUGIN_OUT_SIZE_BYTES); + mSourceSizeBytes = mMetricsRecordRef->GetIntGauge(METRIC_PLUGIN_SOURCE_SIZE_BYTES); + mSourceReadOffsetBytes = mMetricsRecordRef->GetIntGauge(METRIC_PLUGIN_SOURCE_READ_OFFSET_BYTES); mMetricInited = true; } @@ -2133,10 +2134,11 @@ std::unique_ptr LogFileReader::CreateFlushTimeoutEvent() { void LogFileReader::ReportMetrics(uint64_t readSize) { if (mMetricInited) { - mInputReadTotalCounter->Add(1); - mInputRecordsSizeBytesCounter->Add(readSize); - mInputFileOffsetBytesGauge->Set(GetLastFilePos()); - mInputFileSizeBytesGauge->Set(GetFileSize()); + mOutEventsTotal->Add(1); + mOutEventGroupsTotal->Add(1); + mOutSizeBytes->Add(readSize); + mSourceReadOffsetBytes->Set(GetLastFilePos()); + mSourceSizeBytes->Set(GetFileSize()); } } diff --git a/core/file_server/reader/LogFileReader.h b/core/file_server/reader/LogFileReader.h index cbca3ef1ab..e58d6e5279 100644 --- a/core/file_server/reader/LogFileReader.h +++ b/core/file_server/reader/LogFileReader.h @@ -537,10 +537,11 @@ class LogFileReader { MetricLabels mMetricLabels; bool mMetricInited; ReentrantMetricsRecordRef mMetricsRecordRef; - CounterPtr mInputRecordsSizeBytesCounter; - CounterPtr mInputReadTotalCounter; - IntGaugePtr mInputFileSizeBytesGauge; - IntGaugePtr mInputFileOffsetBytesGauge; + CounterPtr mOutEventsTotal; + CounterPtr mOutEventGroupsTotal; + CounterPtr mOutSizeBytes; + IntGaugePtr mSourceSizeBytes; + IntGaugePtr mSourceReadOffsetBytes; private: bool mHasReadContainerBom = false; diff --git a/core/go_pipeline/CMakeLists.txt b/core/go_pipeline/CMakeLists.txt index a3e5b332bd..c37baa6da1 100644 --- a/core/go_pipeline/CMakeLists.txt +++ b/core/go_pipeline/CMakeLists.txt @@ -16,9 +16,9 @@ cmake_minimum_required(VERSION 3.22) project(go_pipeline) if (LINUX OR MSVC) - add_library(PluginAdapter SHARED LogtailPluginAdapter.h LogtailPluginAdapter.cpp) + add_library(GoPluginAdapter SHARED LogtailPluginAdapter.h LogtailPluginAdapter.cpp) if (LINUX) - target_link_libraries(PluginAdapter dl) + target_link_libraries(GoPluginAdapter dl) else () add_definitions(-DPLUGIN_ADAPTER_EXPORTS) endif () diff --git a/core/go_pipeline/LogtailPlugin.cpp b/core/go_pipeline/LogtailPlugin.cpp index ccff0763fc..5040db50b1 100644 --- a/core/go_pipeline/LogtailPlugin.cpp +++ b/core/go_pipeline/LogtailPlugin.cpp @@ -45,11 +45,12 @@ LogtailPlugin* LogtailPlugin::s_instance = NULL; LogtailPlugin::LogtailPlugin() { mPluginAdapterPtr = NULL; mPluginBasePtr = NULL; - mLoadConfigFun = NULL; - mHoldOnFun = NULL; - mResumeFun = NULL; + mLoadPipelineFun = NULL; + mUnloadPipelineFun = NULL; + mStopAllPipelinesFun = NULL; + mStopFun = NULL; + mStartFun = NULL; mLoadGlobalConfigFun = NULL; - mProcessRawLogFun = NULL; mPluginValid = false; mPluginAlarmConfig.mLogstore = "logtail_alarm"; mPluginAlarmConfig.mAliuid = STRING_FLAG(logtail_profile_aliuid); @@ -61,7 +62,10 @@ LogtailPlugin::LogtailPlugin() { mPluginContainerConfig.mAliuid = STRING_FLAG(logtail_profile_aliuid); mPluginContainerConfig.mCompressor = CompressorFactory::GetInstance()->Create(CompressType::ZSTD); - mPluginCfg["LogtailSysConfDir"] = AppConfig::GetInstance()->GetLogtailSysConfDir(); + mPluginCfg["LoongcollectorConfDir"] = AppConfig::GetInstance()->GetLoongcollectorConfDir(); + mPluginCfg["LoongcollectorLogDir"] = GetAgentLogDir(); + mPluginCfg["LoongcollectorDataDir"] = GetAgentDataDir(); + mPluginCfg["LoongcollectorThirdPartyDir"] = GetAgentThirdPartyDir(); mPluginCfg["HostIP"] = LogFileProfiler::mIpAddr; mPluginCfg["Hostname"] = LogFileProfiler::mHostname; mPluginCfg["EnableContainerdUpperDirDetect"] = BOOL_FLAG(enable_containerd_upper_dir_detect); @@ -83,7 +87,7 @@ bool LogtailPlugin::LoadPipeline(const std::string& pipelineName, LoadPluginBase(); } - if (mPluginValid && mLoadConfigFun != NULL) { + if (mPluginValid && mLoadPipelineFun != NULL) { GoString goProject; GoString goLogstore; GoString goConfigName; @@ -99,31 +103,77 @@ bool LogtailPlugin::LoadPipeline(const std::string& pipelineName, goLogstore.p = logstore.c_str(); long long goLogStoreKey = static_cast(logstoreKey); - return mLoadConfigFun(goProject, goLogstore, goConfigName, goLogStoreKey, goPluginConfig) == 0; + return mLoadPipelineFun(goProject, goLogstore, goConfigName, goLogStoreKey, goPluginConfig) == 0; } return false; } -void LogtailPlugin::HoldOn(bool exitFlag) { - if (mPluginValid && mHoldOnFun != NULL) { - LOG_INFO(sLogger, ("Go pipelines pause", "starts")); - auto holdOnStart = GetCurrentTimeInMilliSeconds(); - mHoldOnFun(exitFlag ? 1 : 0); - auto holdOnCost = GetCurrentTimeInMilliSeconds() - holdOnStart; - LOG_INFO(sLogger, ("Go pipelines pause", "succeeded")("cost", ToString(holdOnCost) + "ms")); - if (holdOnCost >= 60 * 1000) { +bool LogtailPlugin::UnloadPipeline(const std::string& pipelineName) { + if (!mPluginValid) { + LOG_ERROR(sLogger, ("UnloadPipeline", "plugin not valid")); + return false; + } + + if (mPluginValid && mUnloadPipelineFun != NULL) { + GoString goConfigName; + + goConfigName.n = pipelineName.size(); + goConfigName.p = pipelineName.c_str(); + + return mUnloadPipelineFun(goConfigName) == 0; + } + + return false; +} + +void LogtailPlugin::StopAllPipelines(bool withInputFlag) { + if (mPluginValid && mStopAllPipelinesFun != NULL) { + LOG_INFO(sLogger, ("Go pipelines stop all", "starts")); + auto stopAllStart = GetCurrentTimeInMilliSeconds(); + mStopAllPipelinesFun(withInputFlag ? 1 : 0); + auto stopAllCost = GetCurrentTimeInMilliSeconds() - stopAllStart; + LOG_INFO(sLogger, ("Go pipelines stop all", "succeeded")("cost", ToString(stopAllCost) + "ms")); + if (stopAllCost >= 10 * 1000) { LogtailAlarm::GetInstance()->SendAlarm(HOLD_ON_TOO_SLOW_ALARM, - "Pausing Go pipelines took " + ToString(holdOnCost) + "ms"); + "Stopping all Go pipelines took " + ToString(stopAllCost) + "ms"); + } + } +} + +void LogtailPlugin::Stop(const std::string& configName, bool removedFlag) { + if (mPluginValid && mStopFun != NULL) { + LOG_INFO(sLogger, ("Go pipelines stop", "starts")("config", configName)); + auto stopStart = GetCurrentTimeInMilliSeconds(); + GoString goConfigName; + goConfigName.n = configName.size(); + goConfigName.p = configName.c_str(); + mStopFun(goConfigName, removedFlag ? 1 : 0); + auto stopCost = GetCurrentTimeInMilliSeconds() - stopStart; + LOG_INFO(sLogger, ("Go pipelines stop", "succeeded")("config", configName)("cost", ToString(stopCost) + "ms")); + if (stopCost >= 10 * 1000) { + LogtailAlarm::GetInstance()->SendAlarm( + HOLD_ON_TOO_SLOW_ALARM, "Stopping Go pipeline " + configName + " took " + ToString(stopCost) + "ms"); } } } -void LogtailPlugin::Resume() { - if (mPluginValid && mResumeFun != NULL) { - LOG_INFO(sLogger, ("Go pipelines resume", "starts")); - mResumeFun(); - LOG_INFO(sLogger, ("Go pipelines resume", "succeeded")); +void LogtailPlugin::StopBuiltInModules() { + if (mPluginValid && mStopFun != NULL) { + LOG_INFO(sLogger, ("Go pipelines stop built-in", "starts")); + mStopBuiltInModulesFun(); + LOG_INFO(sLogger, ("Go pipelines stop built-in", "succeeded")); + } +} + +void LogtailPlugin::Start(const std::string& configName) { + if (mPluginValid && mStartFun != NULL) { + LOG_INFO(sLogger, ("Go pipelines start", "starts")("config name", configName)); + GoString goConfigName; + goConfigName.n = configName.size(); + goConfigName.p = configName.c_str(); + mStartFun(goConfigName); + LOG_INFO(sLogger, ("Go pipelines start", "succeeded")("config name", configName)); } } @@ -260,7 +310,8 @@ bool LogtailPlugin::LoadPluginBase() { if (mPluginAdapterPtr == NULL) { DynamicLibLoader loader; std::string error; - if (!loader.LoadDynLib("PluginAdapter", error, AppConfig::GetInstance()->GetWorkingDir())) { + // load plugin adapter + if (!loader.LoadDynLib("GoPluginAdapter", error, AppConfig::GetInstance()->GetWorkingDir())) { LOG_ERROR(sLogger, ("open adapter lib error, Message", error)); return mPluginValid; } @@ -277,7 +328,7 @@ bool LogtailPlugin::LoadPluginBase() { } LOG_INFO(sLogger, ("valid plugin adapter version, version", version)); - // Be compatible with old libPluginAdapter.so, V2 -> V1. + // Be compatible with old libGoPluginAdapter.so, V2 -> V1. auto registerV2Fun = (RegisterLogtailCallBackV2)loader.LoadMethod("RegisterLogtailCallBackV2", error); if (error.empty()) { registerV2Fun(LogtailPlugin::IsValidToSend, @@ -304,7 +355,8 @@ bool LogtailPlugin::LoadPluginBase() { if (mPluginBasePtr == NULL) { DynamicLibLoader loader; std::string error; - if (!loader.LoadDynLib("PluginBase", error, AppConfig::GetInstance()->GetWorkingDir())) { + // load plugin base + if (!loader.LoadDynLib("GoPluginBase", error, AppConfig::GetInstance()->GetWorkingDir())) { LOG_ERROR(sLogger, ("open plugin base dl error, Message", error)); return mPluginValid; } @@ -326,39 +378,40 @@ bool LogtailPlugin::LoadPluginBase() { LOG_ERROR(sLogger, ("load LoadGlobalConfig error, Message", error)); return mPluginValid; } - // 加载单个配置,目前应该是Resume的时候,全量加载一次 - mLoadConfigFun = (LoadConfigFun)loader.LoadMethod("LoadConfig", error); + // 加载单个配置 + mLoadPipelineFun = (LoadPipelineFun)loader.LoadMethod("LoadPipeline", error); if (!error.empty()) { - LOG_ERROR(sLogger, ("load LoadConfig error, Message", error)); + LOG_ERROR(sLogger, ("load LoadPipelineFun error, Message", error)); return mPluginValid; } - // 更新配置,目前应该没有调用点 - mUnloadConfigFun = (UnloadConfigFun)loader.LoadMethod("UnloadConfig", error); + // 卸载单个配置 + mUnloadPipelineFun = (UnloadPipelineFun)loader.LoadMethod("UnloadPipeline", error); if (!error.empty()) { - LOG_ERROR(sLogger, ("load UnloadConfig error, Message", error)); + LOG_ERROR(sLogger, ("load UnloadPipelineFun error, Message", error)); return mPluginValid; } - // 插件暂停 - mHoldOnFun = (HoldOnFun)loader.LoadMethod("HoldOn", error); + // 停止所有插件 + mStopAllPipelinesFun = (StopAllPipelinesFun)loader.LoadMethod("StopAllPipelines", error); if (!error.empty()) { - LOG_ERROR(sLogger, ("load HoldOn error, Message", error)); + LOG_ERROR(sLogger, ("load StopAllPipelines error, Message", error)); return mPluginValid; } - // 插件恢复 - mResumeFun = (ResumeFun)loader.LoadMethod("Resume", error); + // 停止单个插件 + mStopFun = (StopFun)loader.LoadMethod("Stop", error); if (!error.empty()) { - LOG_ERROR(sLogger, ("load Resume error, Message", error)); + LOG_ERROR(sLogger, ("load Stop error, Message", error)); return mPluginValid; } - // C++传递原始二进制数据到golang插件,v1和v2的区别:是否传递tag - mProcessRawLogFun = (ProcessRawLogFun)loader.LoadMethod("ProcessRawLog", error); + // 停止内置功能 + mStopBuiltInModulesFun = (StopBuiltInModulesFun)loader.LoadMethod("StopBuiltInModules", error); if (!error.empty()) { - LOG_ERROR(sLogger, ("load ProcessRawLog error, Message", error)); + LOG_ERROR(sLogger, ("load StopBuiltInModules error, Message", error)); return mPluginValid; } - mProcessRawLogV2Fun = (ProcessRawLogV2Fun)loader.LoadMethod("ProcessRawLogV2", error); + // 插件恢复 + mStartFun = (StartFun)loader.LoadMethod("Start", error); if (!error.empty()) { - LOG_ERROR(sLogger, ("load ProcessRawLogV2 error, Message", error)); + LOG_ERROR(sLogger, ("load Start error, Message", error)); return mPluginValid; } // C++获取容器信息的 diff --git a/core/go_pipeline/LogtailPlugin.h b/core/go_pipeline/LogtailPlugin.h index 5ed3d22242..120573bff6 100644 --- a/core/go_pipeline/LogtailPlugin.h +++ b/core/go_pipeline/LogtailPlugin.h @@ -132,12 +132,12 @@ struct K8sContainerMeta { // Methods export by plugin. typedef GoInt (*LoadGlobalConfigFun)(GoString); -typedef GoInt (*LoadConfigFun)(GoString p, GoString l, GoString c, GoInt64 k, GoString p2); -typedef GoInt (*UnloadConfigFun)(GoString p, GoString l, GoString c); -typedef GoInt (*ProcessRawLogFun)(GoString c, GoSlice l, GoString p, GoString t); -typedef GoInt (*ProcessRawLogV2Fun)(GoString c, GoSlice l, GoString p, GoString t, GoSlice tags); -typedef void (*HoldOnFun)(GoInt); -typedef void (*ResumeFun)(); +typedef GoInt (*LoadPipelineFun)(GoString p, GoString l, GoString c, GoInt64 k, GoString p2); +typedef GoInt (*UnloadPipelineFun)(GoString c); +typedef void (*StopAllPipelinesFun)(GoInt); +typedef void (*StopFun)(GoString, GoInt); +typedef void (*StopBuiltInModulesFun)(); +typedef void (*StartFun)(GoString); typedef GoInt (*InitPluginBaseFun)(); typedef GoInt (*InitPluginBaseV2Fun)(GoString cfg); typedef GoInt (*ProcessLogsFun)(GoString c, GoSlice l, GoString p, GoString t, GoSlice tags); @@ -207,29 +207,20 @@ class LogtailPlugin { } bool LoadPluginBase(); - // void LoadConfig(); bool LoadPipeline(const std::string& pipelineName, const std::string& pipeline, const std::string& project = "", const std::string& logstore = "", const std::string& region = "", logtail::QueueKey logstoreKey = 0); - void HoldOn(bool exitFlag); - void Resume(); + bool UnloadPipeline(const std::string& pipelineName); + void StopAllPipelines(bool withInputFlag); + void Stop(const std::string& configName, bool removingFlag); + void StopBuiltInModules(); + void Start(const std::string& configName); bool IsPluginOpened() { return mPluginValid; } - // void ProcessRawLog(const std::string& configName, - // logtail::StringView rawLog, - // const std::string& packId, - // const std::string& topic); - - // void ProcessRawLogV2(const std::string& configName, - // logtail::StringView rawLog, - // const std::string& packId, - // const std::string& topic, - // const std::string& tags); - void ProcessLog(const std::string& configName, sls_logs::Log& log, const std::string& packId, @@ -271,12 +262,12 @@ class LogtailPlugin { void* mPluginAdapterPtr; LoadGlobalConfigFun mLoadGlobalConfigFun; - LoadConfigFun mLoadConfigFun; - UnloadConfigFun mUnloadConfigFun; - HoldOnFun mHoldOnFun; - ResumeFun mResumeFun; - ProcessRawLogFun mProcessRawLogFun; - ProcessRawLogV2Fun mProcessRawLogV2Fun; + LoadPipelineFun mLoadPipelineFun; + UnloadPipelineFun mUnloadPipelineFun; + StopAllPipelinesFun mStopAllPipelinesFun; + StopFun mStopFun; + StopBuiltInModulesFun mStopBuiltInModulesFun; + StartFun mStartFun; volatile bool mPluginValid; logtail::FlusherSLS mPluginAlarmConfig; logtail::FlusherSLS mPluginProfileConfig; diff --git a/core/go_pipeline/LogtailPluginAdapter.cpp b/core/go_pipeline/LogtailPluginAdapter.cpp index 8f2906fc6f..a5a5f9e48a 100644 --- a/core/go_pipeline/LogtailPluginAdapter.cpp +++ b/core/go_pipeline/LogtailPluginAdapter.cpp @@ -21,7 +21,7 @@ SendPbV2Fun gAdapterSendPbV2Fun = NULL; PluginCtlCmdFun gPluginCtlCmdFun = NULL; void RegisterLogtailCallBack(IsValidToSendFun checkFun, SendPbFun sendFun, PluginCtlCmdFun cmdFun) { - fprintf(stderr, "[PluginAdapter] register fun %p %p %p\n", checkFun, sendFun, cmdFun); + fprintf(stderr, "[GoPluginAdapter] register fun %p %p %p\n", checkFun, sendFun, cmdFun); gAdapterIsValidToSendFun = checkFun; gAdapterSendPbFun = sendFun; gPluginCtlCmdFun = cmdFun; diff --git a/core/logger/Logger.cpp b/core/logger/Logger.cpp index da5bbd1f2b..c754d5b37b 100644 --- a/core/logger/Logger.cpp +++ b/core/logger/Logger.cpp @@ -13,21 +13,23 @@ // limitations under the License. #include "Logger.h" -#include + #include #include #include #include + +#include +#include + +#include "app_config/AppConfig.h" +#include "common/ErrorUtil.h" #include "common/ExceptionBase.h" -#include "common/RuntimeUtil.h" -#include "common/StringTools.h" #include "common/FileSystemUtil.h" #include "common/Flags.h" -#include "common/ErrorUtil.h" -#include "common/FileSystemUtil.h" -#include +#include "common/RuntimeUtil.h" +#include "common/StringTools.h" -DEFINE_FLAG_STRING(logtail_snapshot_dir, "snapshot dir on local disk", "snapshot"); DEFINE_FLAG_BOOL(logtail_async_logger_enable, "", true); DEFINE_FLAG_INT32(logtail_async_logger_queue_size, "", 1024); DEFINE_FLAG_INT32(logtail_async_logger_thread_num, "", 1); @@ -89,9 +91,8 @@ Logger::Logger() { INT32_FLAG(logtail_async_logger_thread_num)); } - auto execDir = GetProcessExecutionDir(); - mInnerLogger.open(execDir + "logger_initialization.log"); - LoadConfig(execDir + "apsara_log_conf.json"); + mInnerLogger.open(GetAgentLogDir() + "logger_initialization.log"); + LoadConfig(GetAgentConfDir() + "apsara_log_conf.json"); mInnerLogger.close(); } @@ -106,8 +107,9 @@ void Logger::LogMsg(const std::string& msg) { } void Logger::InitGlobalLoggers() { - if (!sLogger) - sLogger = GetLogger("/apsara/sls/ilogtail"); + if (!sLogger) { + sLogger = GetLogger(GetAgentLoggersPrefix()); + } } Logger::logger Logger::CreateLogger(const std::string& loggerName, @@ -124,7 +126,7 @@ Logger::logger Logger::CreateLogger(const std::string& loggerName, if (!absoluteFilePath.empty() && absoluteFilePath[0] != '/') #endif { - absoluteFilePath = GetProcessExecutionDir() + absoluteFilePath; + absoluteFilePath = GetAgentLogDir() + absoluteFilePath; } LogMsg("Path of logger named " + loggerName + ": " + absoluteFilePath); @@ -171,7 +173,7 @@ Logger::logger Logger::GetLogger(const std::string& loggerName) { // // Attributes for the logger, (sink_name, log_level). // "AsyncFileSink": "WARNING" // }, -// "/apsara/sls/ilogtail": { // Another logger. +// "/apsara/loongcollector": { // Another logger. // // .... // } // }, @@ -323,7 +325,7 @@ void Logger::LoadConfig(const std::string& filePath) { spdlog::register_logger(logger); logger->set_pattern(DEFAULT_PATTERN); - if (name == "/apsara/sls/ilogtail" && !aliyun_logtail_log_level.empty()) { + if (name == GetAgentLoggersPrefix() && !aliyun_logtail_log_level.empty()) { logger->set_level(envLogLevel); logger->flush_on(envLogLevel); } else { @@ -404,25 +406,25 @@ void Logger::LoadDefaultConfig(std::map& loggerCfgs, if (sinkCfgs.find("AsyncFileSink") != sinkCfgs.end()) return; sinkCfgs.insert({"AsyncFileSink", - SinkConfig{"AsyncFile", 10, 20000000, 300, GetProcessExecutionDir() + "ilogtail.LOG", "Gzip"}}); + SinkConfig{"AsyncFile", 10, 20000000, 300, GetAgentLogDir() + GetAgentLogName(), "Gzip"}}); } void Logger::LoadAllDefaultConfigs(std::map& loggerCfgs, std::map& sinkCfgs) { LoadDefaultConfig(loggerCfgs, sinkCfgs); - loggerCfgs.insert({"/apsara/sls/ilogtail", LoggerConfig{"AsyncFileSink", level::info}}); - loggerCfgs.insert({"/apsara/sls/ilogtail/profile", LoggerConfig{"AsyncFileSinkProfile", level::info}}); - loggerCfgs.insert({"/apsara/sls/ilogtail/status", LoggerConfig{"AsyncFileSinkStatus", level::info}}); + loggerCfgs.insert({GetAgentLoggersPrefix(), LoggerConfig{"AsyncFileSink", level::info}}); + loggerCfgs.insert({GetAgentLoggersPrefix() + "/profile", LoggerConfig{"AsyncFileSinkProfile", level::info}}); + loggerCfgs.insert({GetAgentLoggersPrefix() + "/status", LoggerConfig{"AsyncFileSinkStatus", level::info}}); - std::string dirPath = GetProcessExecutionDir() + STRING_FLAG(logtail_snapshot_dir); + std::string dirPath = GetAgentSnapshotDir(); if (!Mkdir(dirPath)) { LogMsg(std::string("Create snapshot dir error ") + dirPath + ", error" + ErrnoToString(GetErrno())); } sinkCfgs.insert( - {"AsyncFileSinkProfile", SinkConfig{"AsyncFile", 61, 1, 1, dirPath + PATH_SEPARATOR + "ilogtail_profile.LOG"}}); + {"AsyncFileSinkProfile", SinkConfig{"AsyncFile", 61, 1, 1, dirPath + PATH_SEPARATOR + GetAgentProfileLogName()}}); sinkCfgs.insert( - {"AsyncFileSinkStatus", SinkConfig{"AsyncFile", 61, 1, 1, dirPath + PATH_SEPARATOR + "ilogtail_status.LOG"}}); + {"AsyncFileSinkStatus", SinkConfig{"AsyncFile", 61, 1, 1, dirPath + PATH_SEPARATOR + GetAgentStatusLogName()}}); } void Logger::EnsureSnapshotDirExist(std::map& sinkCfgs) { diff --git a/core/logtail.cpp b/core/logtail.cpp index 0164239022..52e7e7db9c 100644 --- a/core/logtail.cpp +++ b/core/logtail.cpp @@ -16,6 +16,7 @@ #include #include +#include "app_config/AppConfig.h" #include "application/Application.h" #include "common/ErrorUtil.h" #include "common/Flags.h" @@ -73,10 +74,6 @@ void enable_core(void) { static void overwrite_community_edition_flags() { // support run in installation dir on default - STRING_FLAG(logtail_sys_conf_dir) = "."; - STRING_FLAG(check_point_filename) = "checkpoint/logtail_check_point"; - STRING_FLAG(default_buffer_file_path) = "checkpoint"; - STRING_FLAG(ilogtail_docker_file_path_config) = "checkpoint/docker_path_config.json"; STRING_FLAG(metrics_report_method) = ""; INT32_FLAG(data_server_port) = 443; BOOL_FLAG(enable_env_ref_in_config) = true; @@ -86,6 +83,8 @@ static void overwrite_community_edition_flags() { // Main routine of worker process. void do_worker_process() { + CreateAgentDir(); + Logger::Instance().InitGlobalLoggers(); struct sigaction sigtermSig; diff --git a/core/logtail_windows.cpp b/core/logtail_windows.cpp index b3d2ec50cc..6ff1bf28e6 100644 --- a/core/logtail_windows.cpp +++ b/core/logtail_windows.cpp @@ -16,6 +16,7 @@ #include #include +#include "app_config/AppConfig.h" #include "application/Application.h" #include "common/Flags.h" #include "logger/Logger.h" @@ -35,10 +36,6 @@ DECLARE_FLAG_BOOL(enable_containerd_upper_dir_detect); static void overwrite_community_edition_flags() { // support run in installation dir on default - STRING_FLAG(logtail_sys_conf_dir) = "."; - STRING_FLAG(check_point_filename) = "checkpoint/logtail_check_point"; - STRING_FLAG(default_buffer_file_path) = "checkpoint"; - STRING_FLAG(ilogtail_docker_file_path_config) = "checkpoint/docker_path_config.json"; STRING_FLAG(metrics_report_method) = ""; INT32_FLAG(data_server_port) = 443; BOOL_FLAG(enable_env_ref_in_config) = true; @@ -47,6 +44,8 @@ static void overwrite_community_edition_flags() { } void do_worker_process() { + CreateAgentDir(); + Logger::Instance().InitGlobalLoggers(); // Initialize Winsock. diff --git a/core/models/PipelineEventGroup.h b/core/models/PipelineEventGroup.h index a64325940e..a83be6e61d 100644 --- a/core/models/PipelineEventGroup.h +++ b/core/models/PipelineEventGroup.h @@ -55,7 +55,6 @@ enum class EventGroupMetaKey { PROMETHEUS_SCRAPE_RESPONSE_SIZE, PROMETHEUS_SAMPLES_SCRAPED, PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC, - PROMETHEUS_INSTANCE, PROMETHEUS_UP_STATE, SOURCE_ID diff --git a/core/monitor/LogFileProfiler.cpp b/core/monitor/LogFileProfiler.cpp index 76068f2f63..9b72bb0462 100644 --- a/core/monitor/LogFileProfiler.cpp +++ b/core/monitor/LogFileProfiler.cpp @@ -31,7 +31,6 @@ #include "pipeline/queue/QueueKeyManager.h" DEFINE_FLAG_INT32(profile_data_send_interval, "interval of send LogFile/DomainSocket profile data, seconds", 600); -DEFINE_FLAG_STRING(logtail_profile_snapshot, "reader profile on local disk", "logtail_profile_snapshot"); using namespace std; using namespace sls_logs; @@ -51,8 +50,8 @@ LogFileProfiler::LogFileProfiler() { srand(time(NULL)); mSendInterval = INT32_FLAG(profile_data_send_interval); mLastSendTime = time(NULL) - (rand() % (mSendInterval / 10)) * 10; - mDumpFileName = GetProcessExecutionDir() + STRING_FLAG(logtail_profile_snapshot); - mBakDumpFileName = GetProcessExecutionDir() + STRING_FLAG(logtail_profile_snapshot) + "_bak"; + mDumpFileName = GetProfileSnapshotDumpFileName(); + mBakDumpFileName = GetProfileSnapshotDumpFileName() + "_bak"; mHostname = GetHostName(); #if defined(_MSC_VER) @@ -94,7 +93,7 @@ bool LogFileProfiler::GetProfileData(LogGroup& logGroup, LogStoreStatistic* stat contentPtr->set_value(statistic->mHostLogPath); } contentPtr = logPtr->add_contents(); - contentPtr->set_key("logtail_version"); + contentPtr->set_key("loongcollector_version"); contentPtr->set_value(ILOGTAIL_VERSION); contentPtr = logPtr->add_contents(); contentPtr->set_key("source_ip"); @@ -463,7 +462,7 @@ void LogFileProfiler::DumpToLocal(int32_t curTime, bool forceSend, Json::Value& ("rename profile snapshot fail, file", mDumpFileName)("error", ErrnoToString(GetErrno()))); } - static auto gProfileLogger = Logger::Instance().GetLogger("/apsara/sls/ilogtail/profile"); + static auto gProfileLogger = Logger::Instance().GetLogger(GetAgentLoggersPrefix() + "/profile"); LOG_INFO(gProfileLogger, ("\n", styledRoot)); } diff --git a/core/monitor/LogtailAlarm.cpp b/core/monitor/LogtailAlarm.cpp index cde2320161..267c0518fd 100644 --- a/core/monitor/LogtailAlarm.cpp +++ b/core/monitor/LogtailAlarm.cpp @@ -22,10 +22,10 @@ #include "common/Thread.h" #include "common/TimeUtil.h" #include "common/version.h" -#include "protobuf/sls/sls_logs.pb.h" -#include "provider/Provider.h" #include "pipeline/queue/QueueKeyManager.h" #include "pipeline/queue/SenderQueueManager.h" +#include "protobuf/sls/sls_logs.pb.h" +#include "provider/Provider.h" DEFINE_FLAG_INT32(logtail_alarm_interval, "the interval of two same type alarm message", 30); DEFINE_FLAG_INT32(logtail_low_level_alarm_speed, "the speed(count/second) which logtail's low level alarm allow", 100); @@ -103,6 +103,9 @@ LogtailAlarm::LogtailAlarm() { mMessageType[OBSERVER_RUNTIME_ALARM] = "OBSERVER_RUNTIME_ALARM"; mMessageType[OBSERVER_STOP_ALARM] = "OBSERVER_STOP_ALARM"; mMessageType[INVALID_CONTAINER_PATH_ALARM] = "INVALID_CONTAINER_PATH_ALARM"; + mMessageType[COMPRESS_FAIL_ALARM] = "COMPRESS_FAIL_ALARM"; + mMessageType[SERIALIZE_FAIL_ALARM] = "SERIALIZE_FAIL_ALARM"; + mMessageType[RELABEL_METRIC_FAIL_ALARM] = "RELABEL_METRIC_FAIL_ALARM"; } void LogtailAlarm::Init() { diff --git a/core/monitor/LogtailAlarm.h b/core/monitor/LogtailAlarm.h index 19e36a8571..4dc88a8b5d 100644 --- a/core/monitor/LogtailAlarm.h +++ b/core/monitor/LogtailAlarm.h @@ -96,10 +96,10 @@ enum LogtailAlarmType { OBSERVER_RUNTIME_ALARM = 62, OBSERVER_STOP_ALARM = 63, INVALID_CONTAINER_PATH_ALARM = 64, - ALL_LOGTAIL_ALARM_NUM = 65, - COMPRESS_FAIL_ALARM = 66, - SERIALIZE_FAIL_ALARM = 67, - RELABEL_METRIC_FAIL_ALARM = 68 + COMPRESS_FAIL_ALARM = 65, + SERIALIZE_FAIL_ALARM = 66, + RELABEL_METRIC_FAIL_ALARM = 67, + ALL_LOGTAIL_ALARM_NUM = 68 }; struct LogtailAlarmMessage { diff --git a/core/monitor/LogtailMetric.cpp b/core/monitor/LogtailMetric.cpp index d6ddc5e768..525bccf5b2 100644 --- a/core/monitor/LogtailMetric.cpp +++ b/core/monitor/LogtailMetric.cpp @@ -25,6 +25,9 @@ using namespace sls_logs; namespace logtail { +const std::string LABEL_PREFIX = "label."; +const std::string VALUE_PREFIX = "value."; + MetricsRecord::MetricsRecord(MetricLabelsPtr labels, DynamicMetricLabelsPtr dynamicLabels) : mLabels(labels), mDynamicLabels(dynamicLabels), mDeleted(false) { } @@ -205,34 +208,11 @@ WriteMetrics::~WriteMetrics() { Clear(); } -void WriteMetrics::PreparePluginCommonLabels(const std::string& projectName, - const std::string& logstoreName, - const std::string& region, - const std::string& configName, - const std::string& pluginType, - const std::string& pluginID, - const std::string& nodeID, - const std::string& childNodeID, - MetricLabels& labels) { - labels.emplace_back(std::make_pair(METRIC_LABEL_PROJECT, projectName)); - labels.emplace_back(std::make_pair(METRIC_LABEL_LOGSTORE, logstoreName)); - labels.emplace_back(std::make_pair(METRIC_LABEL_REGION, region)); - labels.emplace_back(std::make_pair(METRIC_LABEL_CONFIG_NAME, configName)); - labels.emplace_back(std::make_pair(METRIC_LABEL_PLUGIN_NAME, pluginType)); - labels.emplace_back(std::make_pair(METRIC_LABEL_PLUGIN_ID, pluginID)); - labels.emplace_back(std::make_pair(METRIC_LABEL_NODE_ID, nodeID)); - labels.emplace_back(std::make_pair(METRIC_LABEL_CHILD_NODE_ID, childNodeID)); -} - void WriteMetrics::PrepareMetricsRecordRef(MetricsRecordRef& ref, MetricLabels&& labels, DynamicMetricLabels&& dynamicLabels) { - MetricsRecord* cur = new MetricsRecord(std::make_shared(labels), - std::make_shared(dynamicLabels)); - ref.SetMetricsRecord(cur); - std::lock_guard lock(mMutex); - cur->SetNext(mHead); - mHead = cur; + CreateMetricsRecordRef(ref, std::move(labels), std::move(dynamicLabels)); + CommitMetricsRecordRef(ref); } void WriteMetrics::CreateMetricsRecordRef(MetricsRecordRef& ref, @@ -348,7 +328,9 @@ ReadMetrics::~ReadMetrics() { Clear(); } -void ReadMetrics::ReadAsLogGroup(std::map& logGroupMap) const { +void ReadMetrics::ReadAsLogGroup(const std::string& regionFieldName, + const std::string& defaultRegion, + std::map& logGroupMap) const { ReadLock lock(mReadWriteLock); MetricsRecord* tmp = mHead; while (tmp) { @@ -356,7 +338,7 @@ void ReadMetrics::ReadAsLogGroup(std::map& log for (auto item = tmp->GetLabels()->begin(); item != tmp->GetLabels()->end(); ++item) { std::pair pair = *item; - if (METRIC_FIELD_REGION == pair.first) { + if (regionFieldName == pair.first) { std::map::iterator iter; std::string region = pair.second; iter = logGroupMap.find(region); @@ -372,14 +354,14 @@ void ReadMetrics::ReadAsLogGroup(std::map& log } if (!logPtr) { std::map::iterator iter; - iter = logGroupMap.find(METRIC_REGION_DEFAULT); + iter = logGroupMap.find(defaultRegion); if (iter != logGroupMap.end()) { sls_logs::LogGroup* logGroup = iter->second; logPtr = logGroup->add_logs(); } else { sls_logs::LogGroup* logGroup = new sls_logs::LogGroup(); logPtr = logGroup->add_logs(); - logGroupMap.insert(std::pair(METRIC_REGION_DEFAULT, logGroup)); + logGroupMap.insert(std::pair(defaultRegion, logGroup)); } } auto now = GetCurrentLogtailTime(); diff --git a/core/monitor/LogtailMetric.h b/core/monitor/LogtailMetric.h index 6c98081b2a..197691ff58 100644 --- a/core/monitor/LogtailMetric.h +++ b/core/monitor/LogtailMetric.h @@ -133,15 +133,7 @@ class WriteMetrics { static WriteMetrics* ptr = new WriteMetrics(); return ptr; } - void PreparePluginCommonLabels(const std::string& projectName, - const std::string& logstoreName, - const std::string& region, - const std::string& configName, - const std::string& pluginType, - const std::string& pluginID, - const std::string& nodeID, - const std::string& childNodeID, - MetricLabels& labels); + void PrepareMetricsRecordRef(MetricsRecordRef& ref, MetricLabels&& labels, DynamicMetricLabels&& dynamicLabels = {}); void CreateMetricsRecordRef(MetricsRecordRef& ref, MetricLabels&& labels, DynamicMetricLabels&& dynamicLabels = {}); @@ -168,7 +160,9 @@ class ReadMetrics { static ReadMetrics* ptr = new ReadMetrics(); return ptr; } - void ReadAsLogGroup(std::map& logGroupMap) const; + void ReadAsLogGroup(const std::string& regionFieldName, + const std::string& defaultRegion, + std::map& logGroupMap) const; void ReadAsFileBuffer(std::string& metricsContent) const; void UpdateMetrics(); diff --git a/core/monitor/MetricConstants.cpp b/core/monitor/MetricConstants.cpp deleted file mode 100644 index 24212c9d3c..0000000000 --- a/core/monitor/MetricConstants.cpp +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2023 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "MetricConstants.h" - -namespace logtail { - - -const std::string METRIC_FIELD_REGION = "region"; -const std::string METRIC_REGION_DEFAULT = "default"; -const std::string METRIC_SLS_LOGSTORE_NAME = "shennong_log_profile"; -const std::string METRIC_TOPIC_TYPE = "logtail_metric"; -const std::string METRIC_TOPIC_FIELD_NAME = "__topic__"; - -const std::string LABEL_PREFIX = "label."; -const std::string VALUE_PREFIX = "value."; - -// global metrics labels - -const std::string METRIC_LABEL_ALIUIDS = "aliuids"; -const std::string METRIC_LABEL_INSTANCE_ID = "instance_id"; -const std::string METRIC_LABEL_IP = "ip"; -const std::string METRIC_LABEL_OS = "os"; -const std::string METRIC_LABEL_OS_DETAIL = "os_detail"; -const std::string METRIC_LABEL_USER_DEFINED_ID = "user_defined_id"; -const std::string METRIC_LABEL_UUID = "uuid"; -const std::string METRIC_LABEL_VERSION = "version"; - -// global metrics values - -const std::string METRIC_AGENT_CPU = "agent_cpu_percent"; -const std::string METRIC_AGENT_CPU_GO = "agent_go_cpu_percent"; -const std::string METRIC_AGENT_MEMORY = "agent_memory_used_mb"; -const std::string METRIC_AGENT_MEMORY_GO = "agent_go_memory_used_mb"; -const std::string METRIC_AGENT_GO_ROUTINES_TOTAL = "agent_go_routines_total"; -const std::string METRIC_AGENT_OPEN_FD_TOTAL = "agent_open_fd_total"; -const std::string METRIC_AGENT_POLLING_DIR_CACHE_SIZE_TOTAL = "agent_polling_dir_cache_size_total"; -const std::string METRIC_AGENT_POLLING_FILE_CACHE_SIZE_TOTAL = "agent_polling_file_cache_size_total"; -const std::string METRIC_AGENT_POLLING_MODIFY_SIZE_TOTAL = "agent_polling_modify_size_total"; -const std::string METRIC_AGENT_REGISTER_HANDLER_TOTAL = "agent_register_handler_total"; -const std::string METRIC_AGENT_INSTANCE_CONFIG_TOTAL = "agent_instance_config_total"; -const std::string METRIC_AGENT_PIPELINE_CONFIG_TOTAL = "agent_pipeline_config_total"; -const std::string METRIC_AGENT_ENV_PIPELINE_CONFIG_TOTAL = "agent_env_pipeline_config_total"; -const std::string METRIC_AGENT_CRD_PIPELINE_CONFIG_TOTAL = "agent_crd_pipeline_config_total"; -const std::string METRIC_AGENT_CONSOLE_PIPELINE_CONFIG_TOTAL = "agent_console_pipeline_config_total"; -const std::string METRIC_AGENT_PLUGIN_TOTAL = "agent_plugin_total"; -const std::string METRIC_AGENT_PROCESS_QUEUE_FULL_TOTAL = "agent_process_queue_full_total"; -const std::string METRIC_AGENT_PROCESS_QUEUE_TOTAL = "agent_process_queue_total"; -const std::string METRIC_AGENT_SEND_QUEUE_FULL_TOTAL = "agent_send_queue_full_total"; -const std::string METRIC_AGENT_SEND_QUEUE_TOTAL = "agent_send_queue_total"; -const std::string METRIC_AGENT_USED_SENDING_CONCURRENCY = "agent_used_sending_concurrency"; - -// common plugin labels -const std::string METRIC_LABEL_PROJECT = "project"; -const std::string METRIC_LABEL_LOGSTORE = "logstore"; -const std::string METRIC_LABEL_REGION = "region"; -const std::string METRIC_LABEL_CONFIG_NAME = "config_name"; -const std::string METRIC_LABEL_PLUGIN_NAME = "plugin_name"; -const std::string METRIC_LABEL_PLUGIN_ID = "plugin_id"; -const std::string METRIC_LABEL_NODE_ID = "node_id"; -const std::string METRIC_LABEL_CHILD_NODE_ID = "child_node_id"; - -const std::string METRIC_LABEL_KEY_COMPONENT_NAME = "component_name"; -const std::string METRIC_LABEL_KEY_QUEUE_TYPE = "queue_type"; -const std::string METRIC_LABEL_KEY_EXACTLY_ONCE_FLAG = "is_exactly_once"; -const std::string METRIC_LABEL_KEY_FLUSHER_NODE_ID = "flusher_node_id"; - -// input file plugin labels -const std::string METRIC_LABEL_FILE_DEV = "file_dev"; -const std::string METRIC_LABEL_FILE_INODE = "file_inode"; -const std::string METRIC_LABEL_FILE_NAME = "file_name"; - -// input file metrics -const std::string METRIC_INPUT_RECORDS_TOTAL = "input_records_total"; -const std::string METRIC_INPUT_RECORDS_SIZE_BYTES = "input_records_size_bytes"; -const std::string METRIC_INPUT_BATCH_TOTAL = "input_batch_total"; -const std::string METRIC_INPUT_READ_TOTAL = "input_read_total"; -const std::string METRIC_INPUT_FILE_SIZE_BYTES = "input_file_size_bytes"; -const std::string METRIC_INPUT_FILE_READ_DELAY_TIME_MS = "input_file_read_delay_time_ms"; -const std::string METRIC_INPUT_FILE_OFFSET_BYTES = "input_file_offset_bytes"; -const std::string METRIC_INPUT_FILE_MONITOR_TOTAL = "input_file_monitor_total"; - -// processor common metrics -const std::string METRIC_PROC_IN_RECORDS_TOTAL = "proc_in_records_total"; -const std::string METRIC_PROC_IN_RECORDS_SIZE_BYTES = "proc_in_records_size_bytes"; -const std::string METRIC_PROC_OUT_RECORDS_TOTAL = "proc_out_records_total"; -const std::string METRIC_PROC_OUT_RECORDS_SIZE_BYTES = "proc_out_records_size_bytes"; -const std::string METRIC_PROC_DISCARD_RECORDS_TOTAL = "proc_discard_records_total"; -const std::string METRIC_PROC_TIME_MS = "proc_time_ms"; - -// processor cunstom metrics -const std::string METRIC_PROC_PARSE_IN_SIZE_BYTES = "proc_parse_in_size_bytes"; -const std::string METRIC_PROC_PARSE_OUT_SIZE_BYTES = "proc_parse_out_size_bytes"; - -const std::string METRIC_PROC_PARSE_ERROR_TOTAL = "proc_parse_error_total"; -const std::string METRIC_PROC_PARSE_SUCCESS_TOTAL = "proc_parse_success_total"; -const std::string METRIC_PROC_KEY_COUNT_NOT_MATCH_ERROR_TOTAL = "proc_key_count_not_match_error_total"; -const std::string METRIC_PROC_HISTORY_FAILURE_TOTAL = "proc_history_failure_total"; - -const std::string METRIC_PROC_SPLIT_MULTILINE_LOG_MATCHED_RECORDS_TOTAL - = "proc_split_multiline_log_matched_records_total"; -const std::string METRIC_PROC_SPLIT_MULTILINE_LOG_MATCHED_LINES_TOTAL = "proc_split_multiline_log_matched_lines_total"; -const std::string METRIC_PROC_SPLIT_MULTILINE_LOG_UNMATCHED_LINES_TOTAL - = "proc_split_multiline_log_unmatched_lines_total"; - -// processor filter metrics -const std::string METRIC_PROC_FILTER_IN_SIZE_BYTES = "proc_filter_in_size_bytes"; -const std::string METRIC_PROC_FILTER_OUT_SIZE_BYTES = "proc_filter_out_size_bytes"; -const std::string METRIC_PROC_FILTER_ERROR_TOTAL = "proc_filter_error_total"; -const std::string METRIC_PROC_FILTER_RECORDS_TOTAL = "proc_filter_records_total"; - -// processore plugin name -const std::string PLUGIN_PROCESSOR_PARSE_REGEX_NATIVE = "processor_parse_regex_native"; - -// processor desensitize metrics -const std::string METRIC_PROC_DESENSITIZE_RECORDS_TOTAL = "proc_desensitize_records_total"; - -// processor merge multiline log metrics -const std::string METRIC_PROC_MERGE_MULTILINE_LOG_MERGED_RECORDS_TOTAL - = "proc_merge_multiline_log_merged_records_total"; -const std::string METRIC_PROC_MERGE_MULTILINE_LOG_UNMATCHED_RECORDS_TOTAL - = "proc_merge_multiline_log_unmatched_records_total"; - -// processor parse container log native metrics -const std::string METRIC_PROC_PARSE_STDOUT_TOTAL = "proc_parse_stdout_total"; -const std::string METRIC_PROC_PARSE_STDERR_TOTAL = "proc_parse_stderr_total"; - -// flusher common metrics -const std::string METRIC_FLUSHER_ERROR_TOTAL = "flusher_error_total"; -const std::string METRIC_FLUSHER_DISCARD_RECORDS_TOTAL = "flusher_discard_records_total"; -const std::string METRIC_FLUSHER_SUCCESS_RECORDS_TOTAL = "flusher_success_records_total"; -const std::string METRIC_FLUSHER_SUCCESS_TIME_MS = "flusher_success_time_ms"; -const std::string METRIC_FLUSHER_ERROR_TIME_MS = "flusher_error_time_ms"; - -// flusher sls metrics -const std::string METRIC_FLUSHER_NETWORK_ERROR_TOTAL = "flusher_network_error_total"; -const std::string METRIC_FLUSHER_QUOTA_ERROR_TOTAL = "flusher_quota_error_total"; -const std::string METRIC_FLUSHER_RETRIES_TOTAL = "flusher_retries_total"; -const std::string METRIC_FLUSHER_RETRIES_ERROR_TOTAL = "flusher_retries_error_total"; - -const std::string METRIC_IN_EVENTS_CNT = "in_events_cnt"; -const std::string METRIC_IN_ITEMS_CNT = "in_items_cnt"; -const std::string METRIC_IN_EVENT_GROUP_SIZE_BYTES = "in_event_group_data_size_bytes"; -const std::string METRIC_IN_ITEM_SIZE_BYTES = "in_item_data_size_bytes"; -const std::string METRIC_OUT_EVENTS_CNT = "out_events_cnt"; -const std::string METRIC_OUT_ITEMS_CNT = "out_items_cnt"; -const std::string METRIC_OUT_EVENT_GROUP_SIZE_BYTES = "out_event_group_data_size_bytes"; -const std::string METRIC_OUT_ITEM_SIZE_BYTES = "out_item_data_size_bytes"; -const std::string METRIC_TOTAL_DELAY_MS = "total_delay_ms"; - -} // namespace logtail diff --git a/core/monitor/MetricConstants.h b/core/monitor/MetricConstants.h deleted file mode 100644 index 3b78a5eae0..0000000000 --- a/core/monitor/MetricConstants.h +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright 2022 iLogtail Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once -#include - -namespace logtail { - -extern const std::string METRIC_FIELD_REGION; -extern const std::string METRIC_REGION_DEFAULT; -extern const std::string METRIC_SLS_LOGSTORE_NAME; -extern const std::string METRIC_TOPIC_TYPE; -extern const std::string METRIC_TOPIC_FIELD_NAME; - -extern const std::string LABEL_PREFIX; -extern const std::string VALUE_PREFIX; - -// global metrics labels - -extern const std::string METRIC_LABEL_ALIUIDS; -extern const std::string METRIC_LABEL_INSTANCE_ID; -extern const std::string METRIC_LABEL_IP; -extern const std::string METRIC_LABEL_OS; -extern const std::string METRIC_LABEL_OS_DETAIL; -extern const std::string METRIC_LABEL_USER_DEFINED_ID; -extern const std::string METRIC_LABEL_UUID; -extern const std::string METRIC_LABEL_VERSION; - -// global metrics values - -extern const std::string METRIC_AGENT_CPU; -extern const std::string METRIC_AGENT_CPU_GO; -extern const std::string METRIC_AGENT_MEMORY; -extern const std::string METRIC_AGENT_MEMORY_GO; -extern const std::string METRIC_AGENT_GO_ROUTINES_TOTAL; -extern const std::string METRIC_AGENT_OPEN_FD_TOTAL; -extern const std::string METRIC_AGENT_POLLING_DIR_CACHE_SIZE_TOTAL; -extern const std::string METRIC_AGENT_POLLING_FILE_CACHE_SIZE_TOTAL; -extern const std::string METRIC_AGENT_POLLING_MODIFY_SIZE_TOTAL; -extern const std::string METRIC_AGENT_REGISTER_HANDLER_TOTAL; -extern const std::string METRIC_AGENT_INSTANCE_CONFIG_TOTAL; -extern const std::string METRIC_AGENT_PIPELINE_CONFIG_TOTAL; -extern const std::string METRIC_AGENT_ENV_PIPELINE_CONFIG_TOTAL; -extern const std::string METRIC_AGENT_CRD_PIPELINE_CONFIG_TOTAL; -extern const std::string METRIC_AGENT_CONSOLE_PIPELINE_CONFIG_TOTAL; -extern const std::string METRIC_AGENT_PLUGIN_TOTAL; -extern const std::string METRIC_AGENT_PROCESS_QUEUE_FULL_TOTAL; -extern const std::string METRIC_AGENT_PROCESS_QUEUE_TOTAL; -extern const std::string METRIC_AGENT_SEND_QUEUE_FULL_TOTAL; -extern const std::string METRIC_AGENT_SEND_QUEUE_TOTAL; -extern const std::string METRIC_AGENT_USED_SENDING_CONCURRENCY; - -// common plugin labels -extern const std::string METRIC_LABEL_PROJECT; -extern const std::string METRIC_LABEL_LOGSTORE; -extern const std::string METRIC_LABEL_REGION; -extern const std::string METRIC_LABEL_CONFIG_NAME; -extern const std::string METRIC_LABEL_PLUGIN_NAME; -extern const std::string METRIC_LABEL_PLUGIN_ID; -extern const std::string METRIC_LABEL_NODE_ID; -extern const std::string METRIC_LABEL_CHILD_NODE_ID; - -// input file plugin labels -extern const std::string METRIC_LABEL_FILE_DEV; -extern const std::string METRIC_LABEL_FILE_INODE; -extern const std::string METRIC_LABEL_FILE_NAME; - -extern const std::string METRIC_LABEL_KEY_COMPONENT_NAME; -extern const std::string METRIC_LABEL_KEY_QUEUE_TYPE; -extern const std::string METRIC_LABEL_KEY_EXACTLY_ONCE_FLAG; -extern const std::string METRIC_LABEL_KEY_FLUSHER_NODE_ID; - -// input file metrics -extern const std::string METRIC_INPUT_RECORDS_TOTAL; -extern const std::string METRIC_INPUT_RECORDS_SIZE_BYTES; -extern const std::string METRIC_INPUT_BATCH_TOTAL; -extern const std::string METRIC_INPUT_READ_TOTAL; -extern const std::string METRIC_INPUT_FILE_SIZE_BYTES; -extern const std::string METRIC_INPUT_FILE_READ_DELAY_TIME_MS; -extern const std::string METRIC_INPUT_FILE_OFFSET_BYTES; -extern const std::string METRIC_INPUT_FILE_MONITOR_TOTAL; - -// processor common metrics -extern const std::string METRIC_PROC_IN_RECORDS_TOTAL; -extern const std::string METRIC_PROC_IN_RECORDS_SIZE_BYTES; -extern const std::string METRIC_PROC_OUT_RECORDS_TOTAL; -extern const std::string METRIC_PROC_OUT_RECORDS_SIZE_BYTES; -extern const std::string METRIC_PROC_DISCARD_RECORDS_TOTAL; -extern const std::string METRIC_PROC_TIME_MS; - -// processor custom metrics -extern const std::string METRIC_PROC_PARSE_IN_SIZE_BYTES; -extern const std::string METRIC_PROC_PARSE_OUT_SIZE_BYTES; -extern const std::string METRIC_PROC_PARSE_ERROR_TOTAL; -extern const std::string METRIC_PROC_PARSE_SUCCESS_TOTAL; -extern const std::string METRIC_PROC_KEY_COUNT_NOT_MATCH_ERROR_TOTAL; -extern const std::string METRIC_PROC_HISTORY_FAILURE_TOTAL; -extern const std::string METRIC_PROC_SPLIT_MULTILINE_LOG_MATCHED_RECORDS_TOTAL; -extern const std::string METRIC_PROC_SPLIT_MULTILINE_LOG_MATCHED_LINES_TOTAL; -extern const std::string METRIC_PROC_SPLIT_MULTILINE_LOG_UNMATCHED_LINES_TOTAL; - -// processor filter metrics -extern const std::string METRIC_PROC_FILTER_IN_SIZE_BYTES; -extern const std::string METRIC_PROC_FILTER_OUT_SIZE_BYTES; -extern const std::string METRIC_PROC_FILTER_ERROR_TOTAL; -extern const std::string METRIC_PROC_FILTER_RECORDS_TOTAL; - -// processor desensitize metrics -extern const std::string METRIC_PROC_DESENSITIZE_RECORDS_TOTAL; - -// processor merge multiline log metrics -extern const std::string METRIC_PROC_MERGE_MULTILINE_LOG_MERGED_RECORDS_TOTAL; -extern const std::string METRIC_PROC_MERGE_MULTILINE_LOG_UNMATCHED_RECORDS_TOTAL; - -// processor parse container log native metrics -extern const std::string METRIC_PROC_PARSE_STDOUT_TOTAL; -extern const std::string METRIC_PROC_PARSE_STDERR_TOTAL; - -// flusher common metrics -extern const std::string METRIC_FLUSHER_ERROR_TOTAL; -extern const std::string METRIC_FLUSHER_DISCARD_RECORDS_TOTAL; -extern const std::string METRIC_FLUSHER_SUCCESS_RECORDS_TOTAL; -extern const std::string METRIC_FLUSHER_SUCCESS_TIME_MS; -extern const std::string METRIC_FLUSHER_ERROR_TIME_MS; - -// flusher sls metrics -extern const std::string METRIC_FLUSHER_NETWORK_ERROR_TOTAL; -extern const std::string METRIC_FLUSHER_QUOTA_ERROR_TOTAL; -extern const std::string METRIC_FLUSHER_RETRIES_TOTAL; -extern const std::string METRIC_FLUSHER_RETRIES_ERROR_TOTAL; - -extern const std::string METRIC_IN_EVENTS_CNT; -extern const std::string METRIC_IN_ITEMS_CNT; -extern const std::string METRIC_IN_EVENT_GROUP_SIZE_BYTES; -extern const std::string METRIC_IN_ITEM_SIZE_BYTES; -extern const std::string METRIC_OUT_EVENTS_CNT; -extern const std::string METRIC_OUT_ITEMS_CNT; -extern const std::string METRIC_OUT_EVENT_GROUP_SIZE_BYTES; -extern const std::string METRIC_OUT_ITEM_SIZE_BYTES; -extern const std::string METRIC_TOTAL_DELAY_MS; - -} // namespace logtail diff --git a/core/monitor/MetricExportor.cpp b/core/monitor/MetricExportor.cpp index e8198d8150..99c4d2894f 100644 --- a/core/monitor/MetricExportor.cpp +++ b/core/monitor/MetricExportor.cpp @@ -16,6 +16,7 @@ #include +#include "app_config/AppConfig.h" #include "LogFileProfiler.h" #include "LogtailMetric.h" #include "MetricConstants.h" @@ -24,8 +25,8 @@ #include "common/RuntimeUtil.h" #include "common/TimeUtil.h" #include "go_pipeline/LogtailPlugin.h" -#include "protobuf/sls/sls_logs.pb.h" #include "pipeline/PipelineManager.h" +#include "protobuf/sls/sls_logs.pb.h" using namespace sls_logs; using namespace std; @@ -34,13 +35,17 @@ DECLARE_FLAG_STRING(metrics_report_method); namespace logtail { +const string METRIC_REGION_FIELD_NAME = "region"; +const string METRIC_REGION_DEFAULT = "default"; +const string METRIC_SLS_LOGSTORE_NAME = "shennong_log_profile"; +const string METRIC_TOPIC_TYPE = "loong_collector_metric"; + const std::string METRIC_EXPORT_TYPE_GO = "direct"; const std::string METRIC_EXPORT_TYPE_CPP = "cpp_provided"; MetricExportor::MetricExportor() : mSendInterval(60), mLastSendTime(time(NULL) - (rand() % (mSendInterval / 10)) * 10) { - // mAgentCpuGo = LoongCollectorMonitor::GetInstance()->GetDoubleGauge(METRIC_AGENT_CPU_GO); mAgentMemGo = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_MEMORY_GO); - mAgentGoRoutines = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_GO_ROUTINES_TOTAL); + mAgentGoRoutines = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_GO_ROUTINES_TOTAL); } void MetricExportor::PushMetrics(bool forceSend) { @@ -63,7 +68,7 @@ void MetricExportor::PushCppMetrics() { if ("sls" == STRING_FLAG(metrics_report_method)) { std::map logGroupMap; - ReadMetrics::GetInstance()->ReadAsLogGroup(logGroupMap); + ReadMetrics::GetInstance()->ReadAsLogGroup(METRIC_REGION_FIELD_NAME, METRIC_REGION_DEFAULT, logGroupMap); SendToSLS(logGroupMap); } else if ("file" == STRING_FLAG(metrics_report_method)) { std::string metricsContent; @@ -90,8 +95,7 @@ void MetricExportor::SendToSLS(std::map& logGr logGroup->set_source(LogFileProfiler::mIpAddr); logGroup->set_topic(METRIC_TOPIC_TYPE); if (METRIC_REGION_DEFAULT == iter->first) { - GetProfileSender()->SendToProfileProject(GetProfileSender()->GetDefaultProfileRegion(), - *logGroup); + GetProfileSender()->SendToProfileProject(GetProfileSender()->GetDefaultProfileRegion(), *logGroup); } else { GetProfileSender()->SendToProfileProject(iter->first, *logGroup); } @@ -105,7 +109,7 @@ void MetricExportor::SendToLocalFile(std::string& metricsContent, const std::str if (!metricsContent.empty()) { // 创建输出目录(如果不存在) - std::string outputDirectory = GetProcessExecutionDir() + "/" + metricsDirName; + std::string outputDirectory = GetAgentLogDir() + metricsDirName; Mkdirs(outputDirectory); std::vector metricFiles; @@ -173,9 +177,6 @@ void MetricExportor::PushGoCppProvidedMetrics(std::vectorSet(std::stod(metric.second)); - // } if (metric.first == METRIC_AGENT_MEMORY_GO) { mAgentMemGo->Set(std::stoi(metric.second)); } diff --git a/core/monitor/Monitor.cpp b/core/monitor/Monitor.cpp index f3bfe2b391..b4af8d2e45 100644 --- a/core/monitor/Monitor.cpp +++ b/core/monitor/Monitor.cpp @@ -116,8 +116,6 @@ bool LogtailMonitor::Init() { // init metrics mAgentCpuGauge = LoongCollectorMonitor::GetInstance()->GetDoubleGauge(METRIC_AGENT_CPU); mAgentMemoryGauge = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_MEMORY); - mAgentUsedSendingConcurrency - = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_USED_SENDING_CONCURRENCY); // Initialize monitor thread. mThreadRes = async(launch::async, &LogtailMonitor::Monitor, this); @@ -286,7 +284,6 @@ bool LogtailMonitor::SendStatusProfile(bool suicide) { AddLogContent(logPtr, "projects", FlusherSLS::GetAllProjects()); AddLogContent(logPtr, "instance_id", Application::GetInstance()->GetInstanceId()); AddLogContent(logPtr, "instance_key", id); - AddLogContent(logPtr, "syslog_open", AppConfig::GetInstance()->GetOpenStreamLog()); // Host informations. AddLogContent(logPtr, "ip", LogFileProfiler::mIpAddr); AddLogContent(logPtr, "hostname", LogFileProfiler::mHostname); @@ -320,7 +317,6 @@ bool LogtailMonitor::SendStatusProfile(bool suicide) { } int32_t usedSendingConcurrency = FlusherRunner::GetInstance()->GetSendingBufferCount(); UpdateMetric("used_sending_concurrency", usedSendingConcurrency); - mAgentUsedSendingConcurrency->Set(usedSendingConcurrency); AddLogContent(logPtr, "metric_json", MetricToString()); AddLogContent(logPtr, "status", CheckLogtailStatus()); @@ -489,12 +485,12 @@ void LogtailMonitor::DumpToLocal(const sls_logs::LogGroup& logGroup) { } dumpStr += "####status end####\n"; - static auto gMonitorLogger = Logger::Instance().GetLogger("/apsara/sls/ilogtail/status"); + static auto gMonitorLogger = Logger::Instance().GetLogger(GetAgentLoggersPrefix() + "/status"); LOG_INFO(gMonitorLogger, ("\n", dumpStr)); } bool LogtailMonitor::DumpMonitorInfo(time_t monitorTime) { - string path = GetProcessExecutionDir() + "logtail_monitor_info"; + string path = GetAgentLogDir() + "loongcollector_monitor_info"; ofstream outfile(path.c_str(), ofstream::app); if (!outfile) return false; @@ -595,9 +591,9 @@ bool LogtailMonitor::CalCpuCores() { // Use mCpuArrayForScale and mOsCpuArrayForScale to calculate if ilogtail can scale up // to use more CPU or scale down. void LogtailMonitor::CheckScaledCpuUsageUpLimit() { - // flag(cpu_usage_up_limit) or cpu_usage_limit in ilogtail_config.json. + // flag(cpu_usage_up_limit) or cpu_usage_limit in loongcollector_config.json. float cpuUsageUpLimit = AppConfig::GetInstance()->GetCpuUsageUpLimit(); - // flag(machine_cpu_usage_threshold) or same name in ilogtail_config.json. + // flag(machine_cpu_usage_threshold) or same name in loongcollector_config.json. float machineCpuUsageThreshold = AppConfig::GetInstance()->GetMachineCpuUsageThreshold(); // mScaledCpuUsageUpLimit is greater or equal than cpuUsageUpLimit. // It will be increased when Monitor finds the global CPU usage is low, which means @@ -707,18 +703,18 @@ LoongCollectorMonitor* LoongCollectorMonitor::GetInstance() { void LoongCollectorMonitor::Init() { // create metric record MetricLabels labels; - labels.emplace_back(METRIC_LABEL_INSTANCE_ID, Application::GetInstance()->GetInstanceId()); - labels.emplace_back(METRIC_LABEL_IP, LogFileProfiler::mIpAddr); - labels.emplace_back(METRIC_LABEL_OS, OS_NAME); - labels.emplace_back(METRIC_LABEL_OS_DETAIL, LogFileProfiler::mOsDetail); - labels.emplace_back(METRIC_LABEL_UUID, Application::GetInstance()->GetUUID()); - labels.emplace_back(METRIC_LABEL_VERSION, ILOGTAIL_VERSION); + labels.emplace_back(METRIC_LABEL_KEY_INSTANCE_ID, Application::GetInstance()->GetInstanceId()); + labels.emplace_back(METRIC_LABEL_KEY_IP, LogFileProfiler::mIpAddr); + labels.emplace_back(METRIC_LABEL_KEY_OS, OS_NAME); + labels.emplace_back(METRIC_LABEL_KEY_OS_DETAIL, LogFileProfiler::mOsDetail); + labels.emplace_back(METRIC_LABEL_KEY_UUID, Application::GetInstance()->GetUUID()); + labels.emplace_back(METRIC_LABEL_KEY_VERSION, ILOGTAIL_VERSION); DynamicMetricLabels dynamicLabels; - dynamicLabels.emplace_back(METRIC_LABEL_PROJECT, []() -> std::string { return FlusherSLS::GetAllProjects(); }); + dynamicLabels.emplace_back(METRIC_LABEL_KEY_PROJECT, []() -> std::string { return FlusherSLS::GetAllProjects(); }); #ifdef __ENTERPRISE__ - dynamicLabels.emplace_back(METRIC_LABEL_ALIUIDS, + dynamicLabels.emplace_back(METRIC_LABEL_KEY_ALIUIDS, []() -> std::string { return EnterpriseConfigProvider::GetInstance()->GetAliuidSet(); }); - dynamicLabels.emplace_back(METRIC_LABEL_USER_DEFINED_ID, []() -> std::string { + dynamicLabels.emplace_back(METRIC_LABEL_KEY_USER_DEFINED_ID, []() -> std::string { return EnterpriseConfigProvider::GetInstance()->GetUserDefinedIdSet(); }); #endif @@ -726,38 +722,14 @@ void LoongCollectorMonitor::Init() { mMetricsRecordRef, std::move(labels), std::move(dynamicLabels)); // init value mDoubleGauges[METRIC_AGENT_CPU] = mMetricsRecordRef.CreateDoubleGauge(METRIC_AGENT_CPU); - // mDoubleGauges[METRIC_AGENT_CPU_GO] = mMetricsRecordRef.CreateDoubleGauge(METRIC_AGENT_CPU_GO); mIntGauges[METRIC_AGENT_MEMORY] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_MEMORY); mIntGauges[METRIC_AGENT_MEMORY_GO] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_MEMORY_GO); mIntGauges[METRIC_AGENT_GO_ROUTINES_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_GO_ROUTINES_TOTAL); mIntGauges[METRIC_AGENT_OPEN_FD_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_OPEN_FD_TOTAL); - mIntGauges[METRIC_AGENT_POLLING_DIR_CACHE_SIZE_TOTAL] - = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_POLLING_DIR_CACHE_SIZE_TOTAL); - mIntGauges[METRIC_AGENT_POLLING_FILE_CACHE_SIZE_TOTAL] - = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_POLLING_FILE_CACHE_SIZE_TOTAL); - mIntGauges[METRIC_AGENT_POLLING_MODIFY_SIZE_TOTAL] - = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_POLLING_MODIFY_SIZE_TOTAL); - mIntGauges[METRIC_AGENT_REGISTER_HANDLER_TOTAL] - = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_REGISTER_HANDLER_TOTAL); // mIntGauges[METRIC_AGENT_INSTANCE_CONFIG_TOTAL] = // mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_INSTANCE_CONFIG_TOTAL); mIntGauges[METRIC_AGENT_PIPELINE_CONFIG_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_PIPELINE_CONFIG_TOTAL); - // mIntGauges[METRIC_AGENT_ENV_PIPELINE_CONFIG_TOTAL] = - // mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_ENV_PIPELINE_CONFIG_TOTAL); - // mIntGauges[METRIC_AGENT_CRD_PIPELINE_CONFIG_TOTAL] = - // mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_CRD_PIPELINE_CONFIG_TOTAL); - // mIntGauges[METRIC_AGENT_CONSOLE_PIPELINE_CONFIG_TOTAL] - // = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_CONSOLE_PIPELINE_CONFIG_TOTAL); - // mIntGauges[METRIC_AGENT_PLUGIN_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_PLUGIN_TOTAL); - mIntGauges[METRIC_AGENT_PROCESS_QUEUE_FULL_TOTAL] - = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_PROCESS_QUEUE_FULL_TOTAL); - mIntGauges[METRIC_AGENT_PROCESS_QUEUE_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_PROCESS_QUEUE_TOTAL); - mIntGauges[METRIC_AGENT_SEND_QUEUE_FULL_TOTAL] - = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_SEND_QUEUE_FULL_TOTAL); - mIntGauges[METRIC_AGENT_SEND_QUEUE_TOTAL] = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_SEND_QUEUE_TOTAL); - mIntGauges[METRIC_AGENT_USED_SENDING_CONCURRENCY] - = mMetricsRecordRef.CreateIntGauge(METRIC_AGENT_USED_SENDING_CONCURRENCY); LOG_INFO(sLogger, ("LoongCollectorMonitor", "started")); } diff --git a/core/monitor/Monitor.h b/core/monitor/Monitor.h index 5fcc551dd5..0a4d557361 100644 --- a/core/monitor/Monitor.h +++ b/core/monitor/Monitor.h @@ -167,8 +167,6 @@ class LogtailMonitor : public MetricStore { MemStat mMemStat; IntGaugePtr mAgentMemoryGauge; - IntGaugePtr mAgentUsedSendingConcurrency; - // Current scale up level, updated by CheckScaledCpuUsageUpLimit. float mScaledCpuUsageUpLimit; #if defined(__linux__) diff --git a/core/monitor/metric_constants/AgentMetrics.cpp b/core/monitor/metric_constants/AgentMetrics.cpp new file mode 100644 index 0000000000..87d282bf60 --- /dev/null +++ b/core/monitor/metric_constants/AgentMetrics.cpp @@ -0,0 +1,41 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "MetricConstants.h" + +using namespace std; + +namespace logtail { + +// label keys +const string METRIC_LABEL_KEY_ALIUIDS = "aliuids"; +const string METRIC_LABEL_KEY_INSTANCE_ID = "instance_id"; +const string METRIC_LABEL_KEY_IP = "ip"; +const string METRIC_LABEL_KEY_OS = "os"; +const string METRIC_LABEL_KEY_OS_DETAIL = "os_detail"; +const string METRIC_LABEL_KEY_PROJECT = "project"; +const string METRIC_LABEL_KEY_USER_DEFINED_ID = "user_defined_id"; +const string METRIC_LABEL_KEY_UUID = "uuid"; +const string METRIC_LABEL_KEY_VERSION = "version"; + +// metric keys +const string METRIC_AGENT_CPU = "agent_cpu_percent"; +const string METRIC_AGENT_GO_ROUTINES_TOTAL = "agent_go_routines_total"; +const string METRIC_AGENT_INSTANCE_CONFIG_TOTAL = "agent_instance_config_total"; // Not Implemented +const string METRIC_AGENT_MEMORY = "agent_memory_used_mb"; +const string METRIC_AGENT_MEMORY_GO = "agent_go_memory_used_mb"; +const string METRIC_AGENT_OPEN_FD_TOTAL = "agent_open_fd_total"; +const string METRIC_AGENT_PIPELINE_CONFIG_TOTAL = "agent_pipeline_config_total"; + +} // namespace logtail diff --git a/core/monitor/metric_constants/ComponentMetrics.cpp b/core/monitor/metric_constants/ComponentMetrics.cpp new file mode 100644 index 0000000000..06b040569a --- /dev/null +++ b/core/monitor/metric_constants/ComponentMetrics.cpp @@ -0,0 +1,74 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "MetricConstants.h" + +using namespace std; + +namespace logtail { + +// label keys +const string METRIC_LABEL_KEY_COMPONENT_NAME = "component_name"; +const string METRIC_LABEL_KEY_FLUSHER_PLUGIN_ID = "flusher_plugin_id"; +const string METRIC_LABEL_KEY_EXACTLY_ONCE_FLAG = "exactly_once_enabled"; +const string METRIC_LABEL_KEY_QUEUE_TYPE = "queue_type"; + +// label values +const string METRIC_LABEL_VALUE_COMPONENT_NAME_BATCHER = "batcher"; +const string METRIC_LABEL_VALUE_COMPONENT_NAME_COMPRESSOR = "compressor"; +const string METRIC_LABEL_VALUE_COMPONENT_NAME_PROCESS_QUEUE = "process_queue"; +const string METRIC_LABEL_VALUE_COMPONENT_NAME_ROUTER = "router"; +const string METRIC_LABEL_VALUE_COMPONENT_NAME_SENDER_QUEUE = "sender_queue"; +const string METRIC_LABEL_VALUE_COMPONENT_NAME_SERIALIZER = "serializer"; + +// metric keys +const string METRIC_COMPONENT_IN_EVENTS_TOTAL = "component_in_events_total"; +const string METRIC_COMPONENT_IN_SIZE_BYTES = "component_in_size_bytes"; +const string METRIC_COMPONENT_IN_ITEMS_TOTAL = "component_in_items_total"; +const string METRIC_COMPONENT_OUT_EVENTS_TOTAL = "component_out_events_total"; +const string METRIC_COMPONENT_OUT_ITEMS_TOTAL = "component_out_items_total"; +const string METRIC_COMPONENT_OUT_SIZE_BYTES = "component_out_size_bytes"; +const string METRIC_COMPONENT_TOTAL_DELAY_MS = "component_total_delay_ms"; +const string METRIC_COMPONENT_TOTAL_PROCESS_TIME_MS = "component_total_process_time_ms"; +const string METRIC_COMPONENT_DISCARDED_ITEMS_TOTAL = "component_discarded_items_total"; +const string METRIC_COMPONENT_DISCARDED_ITEMS_SIZE_BYTES = "component_discarded_item_size_bytes"; + +/********************************************************** + * batcher + **********************************************************/ +const string METRIC_COMPONENT_BATCHER_EVENT_BATCHES_TOTAL = "component_event_batches_total"; +const string METRIC_COMPONENT_BATCHER_BUFFERED_GROUPS_TOTAL = "component_buffered_groups_total"; +const string METRIC_COMPONENT_BATCHER_BUFFERED_EVENTS_TOTAL = "component_buffered_events_total"; +const string METRIC_COMPONENT_BATCHER_BUFFERED_SIZE_BYTES = "component_buffered_size_bytes"; + +/********************************************************** + * queue + **********************************************************/ +const string METRIC_COMPONENT_QUEUE_SIZE = "component_queue_size"; +const string METRIC_COMPONENT_QUEUE_SIZE_BYTES = "component_queue_size_bytes"; +const string METRIC_COMPONENT_QUEUE_VALID_TO_PUSH_FLAG = "component_valid_to_push_status"; +const string METRIC_COMPONENT_QUEUE_EXTRA_BUFFER_SIZE = "component_extra_buffer_size"; +const string METRIC_COMPONENT_QUEUE_EXTRA_BUFFER_SIZE_BYTES = "component_extra_buffer_size_bytes"; +const string METRIC_COMPONENT_QUEUE_DISCARDED_EVENTS_TOTAL = "component_discarded_events_total"; + +const string METRIC_COMPONENT_FETCH_TIMES_TOTAL = "component_fetch_times_total"; +const string METRIC_COMPONENT_FETCHED_ITEMS_TOTAL = "componet_fetched_items_total"; + +const string METRIC_COMPONENT_FETCH_REJECTED_BY_REGION_LIMITER_TIMES_TOTAL = "component_fetch_rejected_by_region_limiter_times_total"; +const string METRIC_COMPONENT_FETCH_REJECTED_BY_PROJECT_LIMITER_TIMES_TOTAL = "component_fetch_rejected_by_project_limiter_times_total"; +const string METRIC_COMPONENT_FETCH_REJECTED_BY_LOGSTORE_LIMITER_TIMES_TOTAL = "component_fetch_rejected_by_logstore_limiter_times_total"; + +const string METRIC_COMPONENT_FETCH_REJECTED_BY_RATE_LIMITER_TIMES_TOTAL = "component_fetch_rejected_by_rate_limiter_times_total"; + +} // namespace logtail diff --git a/core/monitor/metric_constants/MetricConstants.h b/core/monitor/metric_constants/MetricConstants.h new file mode 100644 index 0000000000..9b8abceab7 --- /dev/null +++ b/core/monitor/metric_constants/MetricConstants.h @@ -0,0 +1,267 @@ +/* + * Copyright 2022 iLogtail Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once +#include + +namespace logtail { + +////////////////////////////////////////////////////////////////////////// +// agent +////////////////////////////////////////////////////////////////////////// + +// label keys +extern const std::string METRIC_LABEL_KEY_ALIUIDS; +extern const std::string METRIC_LABEL_KEY_INSTANCE_ID; +extern const std::string METRIC_LABEL_KEY_IP; +extern const std::string METRIC_LABEL_KEY_OS; +extern const std::string METRIC_LABEL_KEY_OS_DETAIL; +extern const std::string METRIC_LABEL_KEY_PROJECT; +extern const std::string METRIC_LABEL_KEY_USER_DEFINED_ID; +extern const std::string METRIC_LABEL_KEY_UUID; +extern const std::string METRIC_LABEL_KEY_VERSION; + +// metric keys +extern const std::string METRIC_AGENT_CPU; +extern const std::string METRIC_AGENT_GO_ROUTINES_TOTAL; +extern const std::string METRIC_AGENT_INSTANCE_CONFIG_TOTAL; +extern const std::string METRIC_AGENT_MEMORY; +extern const std::string METRIC_AGENT_MEMORY_GO; +extern const std::string METRIC_AGENT_OPEN_FD_TOTAL; +extern const std::string METRIC_AGENT_PIPELINE_CONFIG_TOTAL; + +////////////////////////////////////////////////////////////////////////// +// pipeline +////////////////////////////////////////////////////////////////////////// + +// label keys +extern const std::string METRIC_LABEL_KEY_LOGSTORE; +extern const std::string METRIC_LABEL_KEY_PIPELINE_NAME; +extern const std::string METRIC_LABEL_KEY_REGION; + +// metric keys +extern const std::string METRIC_PIPELINE_PROCESSORS_IN_EVENTS_TOTAL; +extern const std::string METRIC_PIPELINE_PROCESSORS_IN_EVENT_GROUPS_TOTAL; +extern const std::string METRIC_PIPELINE_PROCESSORS_IN_SIZE_BYTES; +extern const std::string METRIC_PIPELINE_PROCESSORS_TOTAL_PROCESS_TIME_MS; +extern const std::string METRIC_PIPELINE_START_TIME; + +////////////////////////////////////////////////////////////////////////// +// plugin +////////////////////////////////////////////////////////////////////////// + +// label keys +extern const std::string METRIC_LABEL_KEY_PLUGIN_ID; +extern const std::string METRIC_LABEL_KEY_PLUGIN_TYPE; + +// metric keys +extern const std::string METRIC_PLUGIN_IN_EVENTS_TOTAL; +extern const std::string METRIC_PLUGIN_IN_EVENT_GROUPS_TOTAL; +extern const std::string METRIC_PLUGIN_IN_SIZE_BYTES; +extern const std::string METRIC_PLUGIN_OUT_EVENTS_TOTAL; +extern const std::string METRIC_PLUGIN_OUT_EVENT_GROUPS_TOTAL; +extern const std::string METRIC_PLUGIN_OUT_SIZE_BYTES; +extern const std::string METRIC_PLUGIN_TOTAL_DELAY_MS; +extern const std::string METRIC_PLUGIN_TOTAL_PROCESS_TIME_MS; + +/********************************************************** + * input_file + * input_container_stdio + **********************************************************/ +extern const std::string METRIC_LABEL_KEY_FILE_DEV; +extern const std::string METRIC_LABEL_KEY_FILE_INODE; +extern const std::string METRIC_LABEL_KEY_FILE_NAME; + +extern const std::string METRIC_PLUGIN_MONITOR_FILE_TOTAL; +extern const std::string METRIC_PLUGIN_SOURCE_READ_OFFSET_BYTES; +extern const std::string METRIC_PLUGIN_SOURCE_SIZE_BYTES; + +/********************************************************** + * input_prometheus + **********************************************************/ +extern const std::string METRIC_LABEL_KEY_JOB; +extern const std::string METRIC_LABEL_KEY_POD_NAME; +extern const std::string METRIC_LABEL_KEY_SERVICE_HOST; +extern const std::string METRIC_LABEL_KEY_SERVICE_PORT; +extern const std::string METRIC_LABEL_KEY_STATUS; +extern const std::string METRIC_LABEL_KEY_INSTANCE; + +extern const std::string METRIC_PLUGIN_PROM_SUBSCRIBE_TARGETS; +extern const std::string METRIC_PLUGIN_PROM_SUBSCRIBE_TOTAL; +extern const std::string METRIC_PLUGIN_PROM_SUBSCRIBE_TIME_MS; +extern const std::string METRIC_PLUGIN_PROM_SCRAPE_TIME_MS; +extern const std::string METRIC_PLUGIN_PROM_SCRAPE_DELAY_TOTAL; + +/********************************************************** + * all processor (所有解析类的处理插件通用指标。Todo:目前统计还不全、不准确) + **********************************************************/ +extern const std::string METRIC_PLUGIN_DISCARDED_EVENTS_TOTAL; +extern const std::string METRIC_PLUGIN_OUT_FAILED_EVENTS_TOTAL; +extern const std::string METRIC_PLUGIN_OUT_KEY_NOT_FOUND_EVENTS_TOTAL; +extern const std::string METRIC_PLUGIN_OUT_SUCCESSFUL_EVENTS_TOTAL; + + +/********************************************************** + * all flusher (所有发送插件通用指标) + **********************************************************/ +extern const std::string METRIC_PLUGIN_FLUSHER_OUT_EVENT_GROUPS_TOTAL; +extern const std::string METRIC_PLUGIN_FLUSHER_SEND_DONE_TOTAL; +extern const std::string METRIC_PLUGIN_FLUSHER_SUCCESS_TOTAL; +extern const std::string METRIC_PLUGIN_FLUSHER_NETWORK_ERROR_TOTAL; +extern const std::string METRIC_PLUGIN_FLUSHER_SERVER_ERROR_TOTAL; +extern const std::string METRIC_PLUGIN_FLUSHER_UNAUTH_ERROR_TOTAL; +extern const std::string METRIC_PLUGIN_FLUSHER_PARAMS_ERROR_TOTAL; +extern const std::string METRIC_PLUGIN_FLUSHER_OTHER_ERROR_TOTAL; + +/********************************************************** + * processor_parse_apsara_native + * processor_parse_timestamp_native + **********************************************************/ +extern const std::string METRIC_PLUGIN_HISTORY_FAILURE_TOTAL; + +/********************************************************** + * processor_split_multiline_log_string_native + **********************************************************/ +extern const std::string METRIC_PLUGIN_MATCHED_EVENTS_TOTAL; +extern const std::string METRIC_PLUGIN_MATCHED_LINES_TOTAL; +extern const std::string METRIC_PLUGIN_UNMATCHED_LINES_TOTAL; + +/********************************************************** + * processor_merge_multiline_log_native + **********************************************************/ +extern const std::string METRIC_PLUGIN_MERGED_EVENTS_TOTAL; +extern const std::string METRIC_PLUGIN_UNMATCHED_EVENTS_TOTAL; + +/********************************************************** + * processor_parse_container_log_native + **********************************************************/ +extern const std::string METRIC_PLUGIN_PARSE_STDERR_TOTAL; +extern const std::string METRIC_PLUGIN_PARSE_STDOUT_TOTAL; + + +/********************************************************** + * flusher_sls + **********************************************************/ +extern const std::string METRIC_PLUGIN_FLUSHER_SLS_SHARD_WRITE_QUOTA_ERROR_TOTAL; +extern const std::string METRIC_PLUGIN_FLUSHER_SLS_PROJECT_QUOTA_ERROR_TOTAL; +extern const std::string METRIC_PLUGIN_FLUSHER_SLS_SEQUENCE_ID_ERROR_TOTAL; +extern const std::string METRIC_PLUGIN_FLUSHER_SLS_REQUEST_EXPRIRED_ERROR_TOTAL; + +////////////////////////////////////////////////////////////////////////// +// component +////////////////////////////////////////////////////////////////////////// + + +// label keys +extern const std::string METRIC_LABEL_KEY_COMPONENT_NAME; +extern const std::string METRIC_LABEL_KEY_FLUSHER_PLUGIN_ID; +extern const std::string METRIC_LABEL_KEY_EXACTLY_ONCE_FLAG; +extern const std::string METRIC_LABEL_KEY_QUEUE_TYPE; + +// label values +extern const std::string METRIC_LABEL_VALUE_COMPONENT_NAME_BATCHER; +extern const std::string METRIC_LABEL_VALUE_COMPONENT_NAME_COMPRESSOR; +extern const std::string METRIC_LABEL_VALUE_COMPONENT_NAME_PROCESS_QUEUE; +extern const std::string METRIC_LABEL_VALUE_COMPONENT_NAME_ROUTER; +extern const std::string METRIC_LABEL_VALUE_COMPONENT_NAME_SENDER_QUEUE; +extern const std::string METRIC_LABEL_VALUE_COMPONENT_NAME_SERIALIZER; + +// metric keys +extern const std::string METRIC_COMPONENT_IN_EVENTS_TOTAL; +extern const std::string METRIC_COMPONENT_IN_SIZE_BYTES; +extern const std::string METRIC_COMPONENT_IN_ITEMS_TOTAL; +extern const std::string METRIC_COMPONENT_OUT_EVENTS_TOTAL; +extern const std::string METRIC_COMPONENT_OUT_ITEMS_TOTAL; +extern const std::string METRIC_COMPONENT_OUT_SIZE_BYTES; +extern const std::string METRIC_COMPONENT_TOTAL_DELAY_MS; +extern const std::string METRIC_COMPONENT_TOTAL_PROCESS_TIME_MS; +extern const std::string METRIC_COMPONENT_DISCARDED_ITEMS_TOTAL; +extern const std::string METRIC_COMPONENT_DISCARDED_ITEMS_SIZE_BYTES; + +/********************************************************** + * batcher + **********************************************************/ +extern const std::string METRIC_COMPONENT_BATCHER_EVENT_BATCHES_TOTAL; +extern const std::string METRIC_COMPONENT_BATCHER_BUFFERED_GROUPS_TOTAL; +extern const std::string METRIC_COMPONENT_BATCHER_BUFFERED_EVENTS_TOTAL; +extern const std::string METRIC_COMPONENT_BATCHER_BUFFERED_SIZE_BYTES; + +/********************************************************** + * queue + **********************************************************/ +extern const std::string METRIC_COMPONENT_QUEUE_SIZE; +extern const std::string METRIC_COMPONENT_QUEUE_SIZE_BYTES; +extern const std::string METRIC_COMPONENT_QUEUE_VALID_TO_PUSH_FLAG; +extern const std::string METRIC_COMPONENT_QUEUE_EXTRA_BUFFER_SIZE; +extern const std::string METRIC_COMPONENT_QUEUE_EXTRA_BUFFER_SIZE_BYTES; +extern const std::string METRIC_COMPONENT_QUEUE_DISCARDED_EVENTS_TOTAL; + +extern const std::string METRIC_COMPONENT_FETCH_TIMES_TOTAL; +extern const std::string METRIC_COMPONENT_FETCHED_ITEMS_TOTAL; +extern const std::string METRIC_COMPONENT_FETCH_REJECTED_BY_REGION_LIMITER_TIMES_TOTAL; +extern const std::string METRIC_COMPONENT_FETCH_REJECTED_BY_PROJECT_LIMITER_TIMES_TOTAL; +extern const std::string METRIC_COMPONENT_FETCH_REJECTED_BY_LOGSTORE_LIMITER_TIMES_TOTAL; +extern const std::string METRIC_COMPONENT_FETCH_REJECTED_BY_RATE_LIMITER_TIMES_TOTAL; + + +////////////////////////////////////////////////////////////////////////// +// runner +////////////////////////////////////////////////////////////////////////// + +// label keys +extern const std::string METRIC_LABEL_KEY_RUNNER_NAME; + +// label values +extern const std::string METRIC_LABEL_VALUE_RUNNER_NAME_FILE_SERVER; +extern const std::string METRIC_LABEL_VALUE_RUNNER_NAME_FLUSHER; +extern const std::string METRIC_LABEL_VALUE_RUNNER_NAME_HTTP_SINK; +extern const std::string METRIC_LABEL_VALUE_RUNNER_NAME_PROCESSOR; +extern const std::string METRIC_LABEL_VALUE_RUNNER_NAME_PROMETHEUS; + +// metric keys +extern const std::string METRIC_RUNNER_IN_EVENTS_TOTAL; +extern const std::string METRIC_RUNNER_IN_EVENT_GROUPS_TOTAL; +extern const std::string METRIC_RUNNER_IN_SIZE_BYTES; +extern const std::string METRIC_RUNNER_IN_ITEMS_TOTAL; +extern const std::string METRIC_RUNNER_LAST_RUN_TIME; +extern const std::string METRIC_RUNNER_OUT_ITEMS_TOTAL; +extern const std::string METRIC_RUNNER_TOTAL_DELAY_MS; +extern const std::string METRIC_RUNNER_SINK_OUT_SUCCESSFUL_ITEMS_TOTAL; +extern const std::string METRIC_RUNNER_SINK_OUT_FAILED_ITEMS_TOTAL; +extern const std::string METRIC_RUNNER_SINK_SENDING_ITEMS_TOTAL; +extern const std::string METRIC_RUNNER_SINK_SEND_CONCURRENCY; +extern const std::string METRIC_RUNNER_CLIENT_REGISTER_STATE; +extern const std::string METRIC_RUNNER_CLIENT_REGISTER_RETRY_TOTAL; +extern const std::string METRIC_RUNNER_JOB_NUM; + + +/********************************************************** + * flusher runner + **********************************************************/ +extern const std::string METRIC_RUNNER_FLUSHER_IN_SIZE_BYTES; +extern const std::string METRIC_RUNNER_FLUSHER_WAITING_ITEMS_TOTAL; + +/********************************************************** + * file server + **********************************************************/ +extern const std::string METRIC_RUNNER_FILE_WATCHED_DIRS_TOTAL; +extern const std::string METRIC_RUNNER_FILE_ACTIVE_READERS_TOTAL; +extern const std::string METRIC_RUNNER_FILE_ENABLE_FILE_INCLUDED_BY_MULTI_CONFIGS_FLAG; +extern const std::string METRIC_RUNNER_FILE_POLLING_MODIFY_CACHE_SIZE; +extern const std::string METRIC_RUNNER_FILE_POLLING_DIR_CACHE_SIZE; +extern const std::string METRIC_RUNNER_FILE_POLLING_FILE_CACHE_SIZE; + +} // namespace logtail diff --git a/core/monitor/metric_constants/PipelineMetrics.cpp b/core/monitor/metric_constants/PipelineMetrics.cpp new file mode 100644 index 0000000000..44017501d6 --- /dev/null +++ b/core/monitor/metric_constants/PipelineMetrics.cpp @@ -0,0 +1,33 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "MetricConstants.h" + +using namespace std; + +namespace logtail { + +// label keys +const string METRIC_LABEL_KEY_LOGSTORE = "logstore"; +const string METRIC_LABEL_KEY_PIPELINE_NAME = "pipeline_name"; +const string METRIC_LABEL_KEY_REGION = "region"; + +// metric keys +const string METRIC_PIPELINE_PROCESSORS_IN_EVENTS_TOTAL = "pipeline_processors_in_events_total"; +const string METRIC_PIPELINE_PROCESSORS_IN_EVENT_GROUPS_TOTAL = "pipeline_processors_in_event_groups_total"; +const string METRIC_PIPELINE_PROCESSORS_IN_SIZE_BYTES = "pipeline_processors_in_size_bytes"; +const string METRIC_PIPELINE_PROCESSORS_TOTAL_PROCESS_TIME_MS = "pipeline_processors_total_process_time_ms"; +const string METRIC_PIPELINE_START_TIME = "pipeline_start_time"; + +} // namespace logtail diff --git a/core/monitor/metric_constants/PluginMetrics.cpp b/core/monitor/metric_constants/PluginMetrics.cpp new file mode 100644 index 0000000000..b8e27f1b09 --- /dev/null +++ b/core/monitor/metric_constants/PluginMetrics.cpp @@ -0,0 +1,117 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "MetricConstants.h" + +using namespace std; + +namespace logtail { + +// label keys +const string METRIC_LABEL_KEY_PLUGIN_ID = "plugin_id"; +const string METRIC_LABEL_KEY_PLUGIN_TYPE = "plugin_type"; + +// metric keys +const string METRIC_PLUGIN_IN_EVENTS_TOTAL = "plugin_in_events_total"; +const string METRIC_PLUGIN_IN_EVENT_GROUPS_TOTAL = "plugin_in_event_groups_total"; +const string METRIC_PLUGIN_IN_SIZE_BYTES = "plugin_in_size_bytes"; +const string METRIC_PLUGIN_OUT_EVENTS_TOTAL = "plugin_out_events_total"; +const string METRIC_PLUGIN_OUT_EVENT_GROUPS_TOTAL = "plugin_out_event_groups_total"; +const string METRIC_PLUGIN_OUT_SIZE_BYTES = "plugin_out_size_bytes"; +const string METRIC_PLUGIN_TOTAL_DELAY_MS = "plugin_total_delay_ms"; +const string METRIC_PLUGIN_TOTAL_PROCESS_TIME_MS = "plugin_total_process_time_ms"; + +/********************************************************** + * input_file + * input_container_stdio + **********************************************************/ +const string METRIC_LABEL_KEY_FILE_DEV = "file_dev"; +const string METRIC_LABEL_KEY_FILE_INODE = "file_inode"; +const string METRIC_LABEL_KEY_FILE_NAME = "file_name"; + +const string METRIC_PLUGIN_MONITOR_FILE_TOTAL = "plugin_monitor_file_total"; +const string METRIC_PLUGIN_SOURCE_READ_OFFSET_BYTES = "plugin_source_read_offset_bytes"; +const string METRIC_PLUGIN_SOURCE_SIZE_BYTES = "plugin_source_size_bytes"; + +/********************************************************** + * input_prometheus + **********************************************************/ +const std::string METRIC_LABEL_KEY_JOB = "job"; +const std::string METRIC_LABEL_KEY_POD_NAME = "pod_name"; +const std::string METRIC_LABEL_KEY_SERVICE_HOST = "service_host"; +const std::string METRIC_LABEL_KEY_SERVICE_PORT = "service_port"; +const std::string METRIC_LABEL_KEY_STATUS = "status"; +const std::string METRIC_LABEL_KEY_INSTANCE = "instance"; + +const std::string METRIC_PLUGIN_PROM_SUBSCRIBE_TARGETS = "plugin_prom_subscribe_targets"; +const std::string METRIC_PLUGIN_PROM_SUBSCRIBE_TOTAL = "plugin_prom_subscribe_total"; +const std::string METRIC_PLUGIN_PROM_SUBSCRIBE_TIME_MS = "plugin_prom_subscribe_time_ms"; +const std::string METRIC_PLUGIN_PROM_SCRAPE_TIME_MS = "plugin_prom_scrape_time_ms"; +const std::string METRIC_PLUGIN_PROM_SCRAPE_DELAY_TOTAL = "plugin_prom_scrape_delay_total"; + +/********************************************************** + * all processor (所有解析类的处理插件通用指标。Todo:目前统计还不全、不准确) + **********************************************************/ +const string METRIC_PLUGIN_DISCARDED_EVENTS_TOTAL = "plugin_discarded_events_total"; +const string METRIC_PLUGIN_OUT_FAILED_EVENTS_TOTAL = "plugin_out_failed_events_total"; +const string METRIC_PLUGIN_OUT_KEY_NOT_FOUND_EVENTS_TOTAL = "plugin_out_key_not_found_events_total"; +const string METRIC_PLUGIN_OUT_SUCCESSFUL_EVENTS_TOTAL = "plugin_out_successful_events_total"; + +/********************************************************** + * processor_parse_apsara_native + * processor_parse_timestamp_native + **********************************************************/ +const string METRIC_PLUGIN_HISTORY_FAILURE_TOTAL = "plugin_history_failure_total"; + +/********************************************************** + * processor_split_multiline_log_string_native + **********************************************************/ +const string METRIC_PLUGIN_MATCHED_EVENTS_TOTAL = "plugin_matched_events_total"; +const string METRIC_PLUGIN_MATCHED_LINES_TOTAL = "plugin_matched_lines_total"; +const string METRIC_PLUGIN_UNMATCHED_LINES_TOTAL = "plugin_unmatched_lines_total"; + +/********************************************************** + * processor_merge_multiline_log_native + **********************************************************/ +const string METRIC_PLUGIN_MERGED_EVENTS_TOTAL = "plugin_merged_events_total"; +const string METRIC_PLUGIN_UNMATCHED_EVENTS_TOTAL = "plugin_unmatched_events_total"; + +/********************************************************** + * processor_parse_container_log_native + **********************************************************/ +const string METRIC_PLUGIN_PARSE_STDERR_TOTAL = "plugin_parse_stderr_total"; +const string METRIC_PLUGIN_PARSE_STDOUT_TOTAL = "plugin_parse_stdout_total"; + + +/********************************************************** + * all flusher (所有发送插件通用指标) + **********************************************************/ +const string METRIC_PLUGIN_FLUSHER_OUT_EVENT_GROUPS_TOTAL = "plugin_flusher_send_total"; +const string METRIC_PLUGIN_FLUSHER_SEND_DONE_TOTAL = "plugin_flusher_send_done_total"; +const string METRIC_PLUGIN_FLUSHER_SUCCESS_TOTAL = "plugin_flusher_success_total"; +const string METRIC_PLUGIN_FLUSHER_NETWORK_ERROR_TOTAL = "plugin_flusher_network_error_total"; +const string METRIC_PLUGIN_FLUSHER_SERVER_ERROR_TOTAL = "plugin_flusher_server_error_total"; +const string METRIC_PLUGIN_FLUSHER_UNAUTH_ERROR_TOTAL = "plugin_flusher_unauth_error_total"; +const string METRIC_PLUGIN_FLUSHER_PARAMS_ERROR_TOTAL = "plugin_flusher_params_error_total"; +const string METRIC_PLUGIN_FLUSHER_OTHER_ERROR_TOTAL = "plugin_flusher_other_error_total"; + +/********************************************************** + * flusher_sls + **********************************************************/ +const string METRIC_PLUGIN_FLUSHER_SLS_SHARD_WRITE_QUOTA_ERROR_TOTAL = "plugin_flusher_sls_shard_write_quota_error_total"; +const string METRIC_PLUGIN_FLUSHER_SLS_PROJECT_QUOTA_ERROR_TOTAL = "plugin_flusher_sls_project_quota_error_total"; +const string METRIC_PLUGIN_FLUSHER_SLS_SEQUENCE_ID_ERROR_TOTAL = "plugin_flusher_sls_sequence_id_error_total"; +const string METRIC_PLUGIN_FLUSHER_SLS_REQUEST_EXPRIRED_ERROR_TOTAL = "plugin_flusher_sls_request_exprired_error_total"; + +} // namespace logtail \ No newline at end of file diff --git a/core/monitor/metric_constants/RunnerMetrics.cpp b/core/monitor/metric_constants/RunnerMetrics.cpp new file mode 100644 index 0000000000..fbe745e05f --- /dev/null +++ b/core/monitor/metric_constants/RunnerMetrics.cpp @@ -0,0 +1,65 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "MetricConstants.h" + +using namespace std; + +namespace logtail { + +// label keys +const string METRIC_LABEL_KEY_RUNNER_NAME = "runner_name"; + +// label values +const string METRIC_LABEL_VALUE_RUNNER_NAME_FILE_SERVER = "file_server"; +const string METRIC_LABEL_VALUE_RUNNER_NAME_FLUSHER = "flusher_runner"; +const string METRIC_LABEL_VALUE_RUNNER_NAME_HTTP_SINK = "http_sink"; +const string METRIC_LABEL_VALUE_RUNNER_NAME_PROCESSOR = "processor_runner"; +const string METRIC_LABEL_VALUE_RUNNER_NAME_PROMETHEUS = "prometheus_runner"; + +// metric keys +const string METRIC_RUNNER_IN_EVENTS_TOTAL = "runner_in_events_total"; +const string METRIC_RUNNER_IN_EVENT_GROUPS_TOTAL = "runner_in_event_groups_total"; +const string METRIC_RUNNER_IN_SIZE_BYTES = "runner_in_size_bytes"; +const string METRIC_RUNNER_IN_ITEMS_TOTAL = "runner_in_items_total"; +const string METRIC_RUNNER_LAST_RUN_TIME = "runner_last_run_time"; +const string METRIC_RUNNER_OUT_ITEMS_TOTAL = "runner_out_items_total"; +const string METRIC_RUNNER_TOTAL_DELAY_MS = "runner_total_delay_ms"; +const string METRIC_RUNNER_SINK_OUT_SUCCESSFUL_ITEMS_TOTAL = "runner_out_successful_items_total"; +const string METRIC_RUNNER_SINK_OUT_FAILED_ITEMS_TOTAL = "runner_out_failed_items_total"; +const string METRIC_RUNNER_SINK_SENDING_ITEMS_TOTAL = "runner_sending_items_total"; +const string METRIC_RUNNER_SINK_SEND_CONCURRENCY = "runner_send_concurrency"; +const string METRIC_RUNNER_CLIENT_REGISTER_STATE = "runner_client_register_state"; +const string METRIC_RUNNER_CLIENT_REGISTER_RETRY_TOTAL = "runner_client_register_retry_total"; +const string METRIC_RUNNER_JOB_NUM = "runner_job_num"; + + +/********************************************************** + * flusher runner + **********************************************************/ +const string METRIC_RUNNER_FLUSHER_IN_SIZE_BYTES = "runner_in_size_bytes"; +const string METRIC_RUNNER_FLUSHER_WAITING_ITEMS_TOTAL = "runner_waiting_items_total"; + +/********************************************************** + * file server + **********************************************************/ +const string METRIC_RUNNER_FILE_WATCHED_DIRS_TOTAL = "runner_watched_dirs_total"; +const string METRIC_RUNNER_FILE_ACTIVE_READERS_TOTAL = "runner_active_readers_total"; +const string METRIC_RUNNER_FILE_ENABLE_FILE_INCLUDED_BY_MULTI_CONFIGS_FLAG + = "runner_enable_file_included_by_multi_configs"; +const string METRIC_RUNNER_FILE_POLLING_MODIFY_CACHE_SIZE = "runner_polling_modify_cache_size"; +const string METRIC_RUNNER_FILE_POLLING_DIR_CACHE_SIZE = "runner_polling_dir_cache_size"; +const string METRIC_RUNNER_FILE_POLLING_FILE_CACHE_SIZE = "runner_polling_file_cache_size"; + +} // namespace logtail \ No newline at end of file diff --git a/core/observer/network/sources/ebpf/EBPFWrapper.cpp b/core/observer/network/sources/ebpf/EBPFWrapper.cpp index 5e7b758bc2..034590ccf0 100644 --- a/core/observer/network/sources/ebpf/EBPFWrapper.cpp +++ b/core/observer/network/sources/ebpf/EBPFWrapper.cpp @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "app_config/AppConfig.h" #include "logger/Logger.h" #include "EBPFWrapper.h" #include "RuntimeUtil.h" @@ -224,6 +225,7 @@ static std::string GetValidBTFPath(const int64_t& kernelVersion, const std::stri if (configedBTFPath != nullptr) { return {configedBTFPath}; } + // ebpf lib load std::string execDir = GetProcessExecutionDir(); fsutil::Dir dir(execDir); if (!dir.Open()) { @@ -284,6 +286,7 @@ bool EBPFWrapper::loadEbpfLib(int64_t kernelVersion, std::string& soPath) { return true; } LOG_INFO(sLogger, ("load ebpf dynamic library", "begin")); + // load ebpf lib std::string dlPrefix = GetProcessExecutionDir(); soPath = dlPrefix + "libebpf.so"; if (kernelVersion < INT64_FLAG(sls_observer_ebpf_min_kernel_version)) { @@ -292,12 +295,12 @@ bool EBPFWrapper::loadEbpfLib(int64_t kernelVersion, std::string& soPath) { // overlay fs. detail: https://lore.kernel.org/lkml/20180228004014.445-1-hmclauchlan@fb.com/ if (fsutil::PathStat::stat(STRING_FLAG(default_container_host_path).c_str(), buf)) { std::string cmd - = std::string("\\cp ").append(soPath).append(" ").append(STRING_FLAG(sls_observer_ebpf_host_path)); + = std::string("\\cp ").append(soPath).append(" ").append(GetObserverEbpfHostPath()); LOG_INFO(sLogger, ("invoke cp cmd:", cmd)); - system(std::string("mkdir ").append(STRING_FLAG(sls_observer_ebpf_host_path)).c_str()); + system(std::string("mkdir ").append(GetObserverEbpfHostPath()).c_str()); system(cmd.c_str()); - dlPrefix = STRING_FLAG(sls_observer_ebpf_host_path); - soPath = STRING_FLAG(sls_observer_ebpf_host_path) + "libebpf.so"; + dlPrefix = GetObserverEbpfHostPath(); + soPath = GetObserverEbpfHostPath() + "libebpf.so"; } } LOG_INFO(sLogger, ("load ebpf, libebpf path", soPath)); diff --git a/core/observer/network/sources/pcap/PCAPWrapper.cpp b/core/observer/network/sources/pcap/PCAPWrapper.cpp index 3d97fa4d8a..63adcff359 100644 --- a/core/observer/network/sources/pcap/PCAPWrapper.cpp +++ b/core/observer/network/sources/pcap/PCAPWrapper.cpp @@ -82,6 +82,7 @@ bool PCAPWrapper::Init(std::function processor) { LOG_INFO(sLogger, ("load pcap dynamic library", "begin")); mPCAPLib = new DynamicLibLoader; std::string loadErr; + // pcap lib load if (!mPCAPLib->LoadDynLib("pcap", loadErr, GetProcessExecutionDir())) { if (!mPCAPLib->LoadDynLib("pcap", loadErr)) { LOG_ERROR(sLogger, ("load pcap dynamic library", "failed")("error", loadErr)); diff --git a/core/options.cmake b/core/options.cmake index f82172f8bf..79881d7c98 100644 --- a/core/options.cmake +++ b/core/options.cmake @@ -14,11 +14,11 @@ # Name/Version information. if (NOT DEFINED LOGTAIL_VERSION) - set(LOGTAIL_VERSION "2.0.0") + set(LOGTAIL_VERSION "0.0.1") endif () message(STATUS "Version: ${LOGTAIL_VERSION}") -set(LOGTAIL_TARGET "ilogtail") +set(LOGTAIL_TARGET "loongcollector") # Extract Git commit information for tracing. # For a better solution see https://jonathanhamberg.com/post/cmake-embedding-git-hash/ but this is simple and easy. diff --git a/core/pipeline/Pipeline.cpp b/core/pipeline/Pipeline.cpp index 7d38880084..45373db490 100644 --- a/core/pipeline/Pipeline.cpp +++ b/core/pipeline/Pipeline.cpp @@ -89,6 +89,7 @@ bool Pipeline::Init(PipelineConfig&& config) { } mPluginID.store(0); + mInProcessCnt.store(0); for (size_t i = 0; i < config.mInputs.size(); ++i) { const Json::Value& detail = *config.mInputs[i]; string pluginType = detail["Type"].asString(); @@ -316,12 +317,13 @@ bool Pipeline::Init(PipelineConfig&& config) { } WriteMetrics::GetInstance()->PrepareMetricsRecordRef( - mMetricsRecordRef, {{METRIC_LABEL_PROJECT, mContext.GetProjectName()}, {METRIC_LABEL_CONFIG_NAME, mName}}); - mStartTime = mMetricsRecordRef.CreateIntGauge("start_time"); - mProcessorsInEventsCnt = mMetricsRecordRef.CreateCounter("processors_in_events_cnt"); - mProcessorsInGroupsCnt = mMetricsRecordRef.CreateCounter("processors_in_event_groups_cnt"); - mProcessorsInGroupDataSizeBytes = mMetricsRecordRef.CreateCounter("processors_in_event_group_data_size_bytes"); - mProcessorsTotalDelayMs = mMetricsRecordRef.CreateCounter("processors_total_delay_ms"); + mMetricsRecordRef, + {{METRIC_LABEL_KEY_PROJECT, mContext.GetProjectName()}, {METRIC_LABEL_KEY_PIPELINE_NAME, mName}}); + mStartTime = mMetricsRecordRef.CreateIntGauge(METRIC_PIPELINE_START_TIME); + mProcessorsInEventsTotal = mMetricsRecordRef.CreateCounter(METRIC_PIPELINE_PROCESSORS_IN_EVENTS_TOTAL); + mProcessorsInGroupsTotal = mMetricsRecordRef.CreateCounter(METRIC_PIPELINE_PROCESSORS_IN_EVENT_GROUPS_TOTAL); + mProcessorsInSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_PIPELINE_PROCESSORS_IN_SIZE_BYTES); + mProcessorsTotalProcessTimeMs = mMetricsRecordRef.CreateCounter(METRIC_PIPELINE_PROCESSORS_TOTAL_PROCESS_TIME_MS); return true; } @@ -334,13 +336,13 @@ void Pipeline::Start() { } if (!mGoPipelineWithoutInput.isNull()) { - // TODO: 加载该Go流水线 + LogtailPlugin::GetInstance()->Start(GetConfigNameOfGoPipelineWithoutInput()); } - // TODO: 启用Process中改流水线对应的输入队列 + ProcessQueueManager::GetInstance()->EnablePop(mName); if (!mGoPipelineWithInput.isNull()) { - // TODO: 加载该Go流水线 + LogtailPlugin::GetInstance()->Start(GetConfigNameOfGoPipelineWithInput()); } for (const auto& input : mInputs) { @@ -349,15 +351,15 @@ void Pipeline::Start() { mStartTime->Set(chrono::duration_cast(chrono::system_clock::now().time_since_epoch()).count()); #endif - LOG_INFO(sLogger, ("pipeline start", "succeeded")("config", mName)("ptr", mStartTime.get())); + LOG_INFO(sLogger, ("pipeline start", "succeeded")("config", mName)); } void Pipeline::Process(vector& logGroupList, size_t inputIndex) { for (const auto& logGroup : logGroupList) { - mProcessorsInEventsCnt->Add(logGroup.GetEvents().size()); - mProcessorsInGroupDataSizeBytes->Add(logGroup.DataSize()); + mProcessorsInEventsTotal->Add(logGroup.GetEvents().size()); + mProcessorsInSizeBytes->Add(logGroup.DataSize()); } - mProcessorsInGroupsCnt->Add(logGroupList.size()); + mProcessorsInGroupsTotal->Add(logGroupList.size()); auto before = chrono::system_clock::now(); for (auto& p : mInputs[inputIndex]->GetInnerProcessors()) { @@ -366,7 +368,7 @@ void Pipeline::Process(vector& logGroupList, size_t inputInd for (auto& p : mProcessorLine) { p->Process(logGroupList); } - mProcessorsTotalDelayMs->Add( + mProcessorsTotalProcessTimeMs->Add( chrono::duration_cast(chrono::system_clock::now() - before).count()); } @@ -409,17 +411,20 @@ void Pipeline::Stop(bool isRemoving) { } if (!mGoPipelineWithInput.isNull()) { - // TODO: 卸载该Go流水线 + // Go pipeline `Stop` will stop and delete + LogtailPlugin::GetInstance()->Stop(GetConfigNameOfGoPipelineWithInput(), isRemoving); } - // TODO: 禁用Process中改流水线对应的输入队列 + ProcessQueueManager::GetInstance()->DisablePop(mName, isRemoving); + WaitAllItemsInProcessFinished(); if (!isRemoving) { FlushBatch(); } if (!mGoPipelineWithoutInput.isNull()) { - // TODO: 卸载该Go流水线 + // Go pipeline `Stop` will stop and delete + LogtailPlugin::GetInstance()->Stop(GetConfigNameOfGoPipelineWithoutInput(), isRemoving); } for (const auto& flusher : mFlushers) { @@ -474,19 +479,16 @@ void Pipeline::CopyNativeGlobalParamToGoPipeline(Json::Value& pipeline) { } bool Pipeline::LoadGoPipelines() const { - // TODO:将下面的代码替换成批量原子Load。 - // note: - // 目前按照从后往前顺序加载,即便without成功with失败导致without残留在插件系统中,也不会有太大的问题,但最好改成原子的。 if (!mGoPipelineWithoutInput.isNull()) { string content = mGoPipelineWithoutInput.toStyledString(); - if (!LogtailPlugin::GetInstance()->LoadPipeline(mName + "/2", + if (!LogtailPlugin::GetInstance()->LoadPipeline(GetConfigNameOfGoPipelineWithoutInput(), content, mContext.GetProjectName(), mContext.GetLogstoreName(), mContext.GetRegion(), mContext.GetLogstoreKey())) { LOG_ERROR(mContext.GetLogger(), - ("failed to init pipeline", "Go pipeline is invalid, see logtail_plugin.LOG for detail")( + ("failed to init pipeline", "Go pipeline is invalid, see go_plugin.LOG for detail")( "Go pipeline num", "2")("Go pipeline content", content)("config", mName)); LogtailAlarm::GetInstance()->SendAlarm(CATEGORY_CONFIG_ALARM, "Go pipeline is invalid, content: " + content + ", config: " + mName, @@ -498,20 +500,23 @@ bool Pipeline::LoadGoPipelines() const { } if (!mGoPipelineWithInput.isNull()) { string content = mGoPipelineWithInput.toStyledString(); - if (!LogtailPlugin::GetInstance()->LoadPipeline(mName + "/1", + if (!LogtailPlugin::GetInstance()->LoadPipeline(GetConfigNameOfGoPipelineWithInput(), content, mContext.GetProjectName(), mContext.GetLogstoreName(), mContext.GetRegion(), mContext.GetLogstoreKey())) { LOG_ERROR(mContext.GetLogger(), - ("failed to init pipeline", "Go pipeline is invalid, see logtail_plugin.LOG for detail")( + ("failed to init pipeline", "Go pipeline is invalid, see go_plugin.LOG for detail")( "Go pipeline num", "1")("Go pipeline content", content)("config", mName)); LogtailAlarm::GetInstance()->SendAlarm(CATEGORY_CONFIG_ALARM, "Go pipeline is invalid, content: " + content + ", config: " + mName, mContext.GetProjectName(), mContext.GetLogstoreName(), mContext.GetRegion()); + if (!mGoPipelineWithoutInput.isNull()) { + LogtailPlugin::GetInstance()->UnloadPipeline(GetConfigNameOfGoPipelineWithoutInput()); + } return false; } } @@ -524,14 +529,27 @@ std::string Pipeline::GetNowPluginID() { PluginInstance::PluginMeta Pipeline::GenNextPluginMeta(bool lastOne) { mPluginID.fetch_add(1); - int32_t childNodeID = mPluginID.load(); - if (lastOne) { - childNodeID = -1; - } else { - childNodeID += 1; - } return PluginInstance::PluginMeta( - std::to_string(mPluginID.load()), std::to_string(mPluginID.load()), std::to_string(childNodeID)); + std::to_string(mPluginID.load())); +} + +void Pipeline::WaitAllItemsInProcessFinished() { + uint64_t startTime = GetCurrentTimeInMilliSeconds(); + bool alarmOnce = false; + while (mInProcessCnt.load() != 0) { + this_thread::sleep_for(chrono::milliseconds(100)); // 100ms + uint64_t duration = GetCurrentTimeInMilliSeconds() - startTime; + if (!alarmOnce && duration > 10000) { // 10s + LOG_ERROR(sLogger, ("pipeline stop", "too slow")("config", mName)("cost", duration)); + LogtailAlarm::GetInstance()->SendAlarm(CONFIG_UPDATE_ALARM, + string("pipeline stop too slow, config: ") + mName + + "; cost:" + std::to_string(duration), + mContext.GetProjectName(), + mContext.GetLogstoreName(), + mContext.GetRegion()); + alarmOnce = true; + } + } } } // namespace logtail diff --git a/core/pipeline/Pipeline.h b/core/pipeline/Pipeline.h index e3775f7b48..6e5c2cfd5c 100644 --- a/core/pipeline/Pipeline.h +++ b/core/pipeline/Pipeline.h @@ -46,6 +46,17 @@ class Pipeline { bool Send(std::vector&& groupList); bool FlushBatch(); void RemoveProcessQueue() const; + // Should add before or when item pop from ProcessorQueue, must be called in the lock of ProcessorQueue + void AddInProcessCnt() { mInProcessCnt.fetch_add(1); } + // Should sub when or after item push to SenderQueue + void SubInProcessCnt() { + if (mInProcessCnt.load() == 0) { + // should never happen + LOG_ERROR(sLogger, ("in processing count error", "sub when 0")("config", mName)); + return; + } + mInProcessCnt.fetch_sub(1); + } const std::string& Name() const { return mName; } PipelineContext& GetContext() const { return mContext; } @@ -55,7 +66,6 @@ class Pipeline { const std::unordered_map>& GetPluginStatistics() const { return mPluginCntMap; } - bool LoadGoPipelines() const; // 应当放在private,过渡期间放在public // only for input_observer_network for compatability const std::vector>& GetInputs() const { return mInputs; } @@ -64,7 +74,13 @@ class Pipeline { static std::string GenPluginTypeWithID(std::string pluginType, std::string pluginID); PluginInstance::PluginMeta GenNextPluginMeta(bool lastOne); + bool HasGoPipelineWithInput() const { return !mGoPipelineWithInput.isNull(); } + bool HasGoPipelineWithoutInput() const { return !mGoPipelineWithoutInput.isNull(); } + std::string GetConfigNameOfGoPipelineWithInput() const { return mName + "/1"; } + std::string GetConfigNameOfGoPipelineWithoutInput() const { return mName + "/2"; } + private: + bool LoadGoPipelines() const; void MergeGoPipeline(const Json::Value& src, Json::Value& dst); void AddPluginToGoPipeline(const std::string& type, const Json::Value& plugin, @@ -72,6 +88,7 @@ class Pipeline { Json::Value& dst); void CopyNativeGlobalParamToGoPipeline(Json::Value& root); bool ShouldAddPluginToGoPipelineWithInput() const { return mInputs.empty() && mProcessorLine.empty(); } + void WaitAllItemsInProcessFinished(); std::string mName; std::vector> mInputs; @@ -84,16 +101,17 @@ class Pipeline { std::unordered_map> mPluginCntMap; std::unique_ptr mConfig; std::atomic_uint16_t mPluginID; + std::atomic_int16_t mInProcessCnt; mutable MetricsRecordRef mMetricsRecordRef; IntGaugePtr mStartTime; - CounterPtr mProcessorsInEventsCnt; - CounterPtr mProcessorsInGroupsCnt; - CounterPtr mProcessorsInGroupDataSizeBytes; - CounterPtr mProcessorsTotalDelayMs; + CounterPtr mProcessorsInEventsTotal; + CounterPtr mProcessorsInGroupsTotal; + CounterPtr mProcessorsInSizeBytes; + CounterPtr mProcessorsTotalProcessTimeMs; #ifdef APSARA_UNIT_TEST_MAIN - friend class PipelineMock; + friend class PipelineMock; friend class PipelineUnittest; friend class InputContainerStdioUnittest; friend class InputFileUnittest; diff --git a/core/pipeline/PipelineManager.cpp b/core/pipeline/PipelineManager.cpp index 9a0a69f60c..529c9a4d97 100644 --- a/core/pipeline/PipelineManager.cpp +++ b/core/pipeline/PipelineManager.cpp @@ -22,13 +22,11 @@ #include "prometheus/PrometheusInputRunner.h" #if defined(__linux__) && !defined(__ANDROID__) #include "ebpf/eBPFServer.h" -#include "observer/ObserverManager.h" #endif -#include "runner/LogProcess.h" +#include "runner/ProcessorRunner.h" #if defined(__ENTERPRISE__) && defined(__linux__) && !defined(__ANDROID__) #include "app_config/AppConfig.h" #include "shennong/ShennongManager.h" -#include "streamlog/StreamLogManager.h" #endif #include "config/feedbacker/ConfigFeedbackReceiver.h" #include "pipeline/queue/ProcessQueueManager.h" @@ -38,55 +36,38 @@ using namespace std; namespace logtail { +PipelineManager::PipelineManager() + : mInputRunners({ + PrometheusInputRunner::GetInstance(), +#if defined(__linux__) && !defined(__ANDROID__) + ebpf::eBPFServer::GetInstance(), +#endif + }) { +} + void logtail::PipelineManager::UpdatePipelines(PipelineConfigDiff& diff) { #ifndef APSARA_UNIT_TEST_MAIN // 过渡使用 - static bool isFileServerStarted = false, isInputObserverStarted = false; -#if defined(__ENTERPRISE__) && defined(__linux__) && !defined(__ANDROID__) - static bool isInputStreamStarted = false; -#endif - bool isInputObserverChanged = false, isInputFileChanged = false, isInputStreamChanged = false, - isInputContainerStdioChanged = false; + static bool isFileServerStarted = false; + bool isFileServerInputChanged = false; for (const auto& name : diff.mRemoved) { - CheckIfInputUpdated(mPipelineNameEntityMap[name]->GetConfig()["inputs"][0], - isInputObserverChanged, - isInputFileChanged, - isInputStreamChanged, - isInputContainerStdioChanged); + isFileServerInputChanged = CheckIfFileServerUpdated(mPipelineNameEntityMap[name]->GetConfig()["inputs"][0]); } for (const auto& config : diff.mModified) { - CheckIfInputUpdated(*config.mInputs[0], - isInputObserverChanged, - isInputFileChanged, - isInputStreamChanged, - isInputContainerStdioChanged); + isFileServerInputChanged = CheckIfFileServerUpdated(*config.mInputs[0]); } for (const auto& config : diff.mAdded) { - CheckIfInputUpdated(*config.mInputs[0], - isInputObserverChanged, - isInputFileChanged, - isInputStreamChanged, - isInputContainerStdioChanged); + isFileServerInputChanged = CheckIfFileServerUpdated(*config.mInputs[0]); } #if defined(__ENTERPRISE__) && defined(__linux__) && !defined(__ANDROID__) if (AppConfig::GetInstance()->ShennongSocketEnabled()) { ShennongManager::GetInstance()->Pause(); } - if (isInputStreamStarted && isInputStreamChanged) { - StreamLogManager::GetInstance()->ShutdownConfigUsage(); - } -#endif -#if defined(__linux__) && !defined(__ANDROID__) - if (isInputObserverStarted && isInputObserverChanged) { - ObserverManager::GetInstance()->HoldOn(false); - } #endif - if (isFileServerStarted && (isInputFileChanged || isInputContainerStdioChanged)) { + if (isFileServerStarted && isFileServerInputChanged) { FileServer::GetInstance()->Pause(); } - LogProcess::GetInstance()->HoldOn(); - LogtailPlugin::GetInstance()->HoldOn(false); #endif for (const auto& name : diff.mRemoved) { @@ -98,7 +79,7 @@ void logtail::PipelineManager::UpdatePipelines(PipelineConfigDiff& diff) { ConfigFeedbackReceiver::GetInstance().FeedbackPipelineConfigStatus(name, ConfigFeedbackStatus::DELETED); } for (auto& config : diff.mModified) { - auto p = BuildPipeline(std::move(config)); + auto p = BuildPipeline(std::move(config)); // auto reuse old pipeline's process queue and sender queue if (!p) { LOG_WARNING(sLogger, ("failed to build pipeline for existing config", @@ -109,22 +90,21 @@ void logtail::PipelineManager::UpdatePipelines(PipelineConfigDiff& diff) { config.mProject, config.mLogstore, config.mRegion); - diff.mUnchanged.push_back(config.mName); ConfigFeedbackReceiver::GetInstance().FeedbackPipelineConfigStatus(config.mName, ConfigFeedbackStatus::FAILED); continue; } - ConfigFeedbackReceiver::GetInstance().FeedbackPipelineConfigStatus(config.mName, ConfigFeedbackStatus::APPLIED); LOG_INFO(sLogger, ("pipeline building for existing config succeeded", "stop the old pipeline and start the new one")("config", config.mName)); - auto iter = mPipelineNameEntityMap.find(config.mName); iter->second->Stop(false); DecreasePluginUsageCnt(iter->second->GetPluginStatistics()); + mPipelineNameEntityMap[config.mName] = p; IncreasePluginUsageCnt(p->GetPluginStatistics()); p->Start(); + ConfigFeedbackReceiver::GetInstance().FeedbackPipelineConfigStatus(config.mName, ConfigFeedbackStatus::APPLIED); } for (auto& config : diff.mAdded) { auto p = BuildPipeline(std::move(config)); @@ -143,24 +123,17 @@ void logtail::PipelineManager::UpdatePipelines(PipelineConfigDiff& diff) { } LOG_INFO(sLogger, ("pipeline building for new config succeeded", "begin to start pipeline")("config", config.mName)); - ConfigFeedbackReceiver::GetInstance().FeedbackPipelineConfigStatus(config.mName, ConfigFeedbackStatus::APPLIED); mPipelineNameEntityMap[config.mName] = p; IncreasePluginUsageCnt(p->GetPluginStatistics()); p->Start(); + ConfigFeedbackReceiver::GetInstance().FeedbackPipelineConfigStatus(config.mName, ConfigFeedbackStatus::APPLIED); } #ifndef APSARA_UNIT_TEST_MAIN - // 过渡使用,有变更的流水线的Go流水线加载在BuildPipeline中完成 - for (auto& name : diff.mUnchanged) { - mPipelineNameEntityMap[name]->LoadGoPipelines(); - } // 在Flusher改造完成前,先不执行如下步骤,不会造成太大影响 // Sender::CleanUnusedAk(); - // 过渡使用 - LogtailPlugin::GetInstance()->Resume(); - LogProcess::GetInstance()->Resume(); - if (isInputFileChanged || isInputContainerStdioChanged) { + if (isFileServerInputChanged) { if (isFileServerStarted) { FileServer::GetInstance()->Resume(); } else { @@ -169,34 +142,18 @@ void logtail::PipelineManager::UpdatePipelines(PipelineConfigDiff& diff) { } } -#if defined(__linux__) && !defined(__ANDROID__) - if (isInputObserverChanged) { - if (isInputObserverStarted) { - ObserverManager::GetInstance()->Resume(); - } else { - // input_observer_network always relies on PluginBase - LogtailPlugin::GetInstance()->LoadPluginBase(); - ObserverManager::GetInstance()->Reload(); - isInputObserverStarted = true; - } - } -#endif #if defined(__ENTERPRISE__) && defined(__linux__) && !defined(__ANDROID__) - if (isInputStreamChanged) { - if (isInputStreamStarted) { - StreamLogManager::GetInstance()->StartupConfigUsage(); - } else { - if (AppConfig::GetInstance()->GetOpenStreamLog()) { - StreamLogManager::GetInstance()->Init(); - isInputStreamStarted = true; - } - } - } if (AppConfig::GetInstance()->ShennongSocketEnabled()) { ShennongManager::GetInstance()->Resume(); } #endif #endif + + for (auto& item : mInputRunners) { + if (!item->HasRegisteredPlugins()) { + item->Stop(); + } + } } shared_ptr PipelineManager::FindConfigByName(const string& configName) const { @@ -228,32 +185,18 @@ string PipelineManager::GetPluginStatistics() const { void PipelineManager::StopAllPipelines() { LOG_INFO(sLogger, ("stop all pipelines", "starts")); -#if defined(__ENTERPRISE__) && defined(__linux__) && !defined(__ANDROID__) - if (AppConfig::GetInstance()->GetOpenStreamLog()) { - StreamLogManager::GetInstance()->Shutdown(); + for (auto& item : mInputRunners) { + item->Stop(); } -#endif - PrometheusInputRunner::GetInstance()->Stop(); -#if defined(__linux__) && !defined(__ANDROID__) - ObserverManager::GetInstance()->HoldOn(true); - ebpf::eBPFServer::GetInstance()->Stop(); -#endif FileServer::GetInstance()->Stop(); - bool logProcessFlushFlag = false; - for (int i = 0; !logProcessFlushFlag && i < 500; ++i) { - logProcessFlushFlag = LogProcess::GetInstance()->FlushOut(10); - } - if (!logProcessFlushFlag) { - LOG_WARNING(sLogger, ("flush process daemon queue", "failed")); - } else { - LOG_INFO(sLogger, ("flush process daemon queue", "succeeded")); - } - LogProcess::GetInstance()->HoldOn(); + LogtailPlugin::GetInstance()->StopAllPipelines(true); + + ProcessorRunner::GetInstance()->Stop(); FlushAllBatch(); - LogtailPlugin::GetInstance()->HoldOn(true); + LogtailPlugin::GetInstance()->StopAllPipelines(false); // TODO: make it common FlusherSLS::RecycleResourceIfNotUsed(); @@ -293,21 +236,9 @@ void PipelineManager::DecreasePluginUsageCnt(const unordered_map BuildPipeline(PipelineConfig&& config); // virtual for ut @@ -57,21 +58,24 @@ class PipelineManager { void DecreasePluginUsageCnt( const std::unordered_map>& statistics); void FlushAllBatch(); - // 过渡使用 - void CheckIfInputUpdated(const Json::Value& config, - bool& isInputObserverChanged, - bool& isInputFileChanged, - bool& isInputStreamChanged, - bool& isInputContainerStdioChanged); + // TODO: 长期过渡使用 + bool CheckIfFileServerUpdated(const Json::Value& config); std::unordered_map> mPipelineNameEntityMap; mutable SpinLock mPluginCntMapLock; std::unordered_map> mPluginCntMap; + std::vector mInputRunners; + #ifdef APSARA_UNIT_TEST_MAIN friend class PipelineManagerMock; friend class PipelineManagerUnittest; + friend class ProcessQueueManagerUnittest; + friend class ExactlyOnceQueueManagerUnittest; + friend class BoundedProcessQueueUnittest; + friend class CircularProcessQueueUnittest; friend class CommonConfigProviderUnittest; + friend class FlusherUnittest; #endif }; diff --git a/core/pipeline/batch/Batcher.h b/core/pipeline/batch/Batcher.h index ca01986ca5..9d67e0cd90 100644 --- a/core/pipeline/batch/Batcher.h +++ b/core/pipeline/batch/Batcher.h @@ -28,7 +28,7 @@ #include "common/ParamExtractor.h" #include "models/PipelineEventGroup.h" #include "monitor/LogtailMetric.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/PipelineContext.h" #include "pipeline/batch/BatchItem.h" #include "pipeline/batch/BatchStatus.h" @@ -100,24 +100,24 @@ class Batcher { mFlusher = flusher; std::vector> labels{ - {METRIC_LABEL_PROJECT, ctx.GetProjectName()}, - {METRIC_LABEL_CONFIG_NAME, ctx.GetConfigName()}, - {METRIC_LABEL_KEY_COMPONENT_NAME, "batcher"}, - {METRIC_LABEL_KEY_FLUSHER_NODE_ID, flusher->GetNodeID()}}; + {METRIC_LABEL_KEY_PROJECT, ctx.GetProjectName()}, + {METRIC_LABEL_KEY_PIPELINE_NAME, ctx.GetConfigName()}, + {METRIC_LABEL_KEY_COMPONENT_NAME, METRIC_LABEL_VALUE_COMPONENT_NAME_BATCHER}, + {METRIC_LABEL_KEY_FLUSHER_PLUGIN_ID, flusher->GetPluginID()}}; if (enableGroupBatch) { labels.emplace_back("enable_group_batch", "true"); } else { labels.emplace_back("enable_group_batch", "false"); } WriteMetrics::GetInstance()->PrepareMetricsRecordRef(mMetricsRecordRef, std::move(labels)); - mInEventsCnt = mMetricsRecordRef.CreateCounter(METRIC_IN_EVENTS_CNT); - mInGroupDataSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_IN_EVENT_GROUP_SIZE_BYTES); - mOutEventsCnt = mMetricsRecordRef.CreateCounter(METRIC_OUT_EVENTS_CNT); - mTotalDelayMs = mMetricsRecordRef.CreateCounter(METRIC_TOTAL_DELAY_MS); - mEventBatchItemsCnt = mMetricsRecordRef.CreateIntGauge("event_batches_cnt"); - mBufferedGroupsCnt = mMetricsRecordRef.CreateIntGauge("buffered_groups_cnt"); - mBufferedEventsCnt = mMetricsRecordRef.CreateIntGauge("buffered_events_cnt"); - mBufferedDataSizeByte = mMetricsRecordRef.CreateIntGauge("buffered_data_size_bytes"); + mInEventsTotal = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_IN_EVENTS_TOTAL); + mInGroupDataSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_IN_SIZE_BYTES); + mOutEventsTotal = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_OUT_EVENTS_TOTAL); + mTotalDelayMs = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_TOTAL_DELAY_MS); + mEventBatchItemsTotal = mMetricsRecordRef.CreateIntGauge(METRIC_COMPONENT_BATCHER_EVENT_BATCHES_TOTAL); + mBufferedGroupsTotal = mMetricsRecordRef.CreateIntGauge(METRIC_COMPONENT_BATCHER_BUFFERED_GROUPS_TOTAL); + mBufferedEventsTotal = mMetricsRecordRef.CreateIntGauge(METRIC_COMPONENT_BATCHER_BUFFERED_EVENTS_TOTAL); + mBufferedDataSizeByte = mMetricsRecordRef.CreateIntGauge(METRIC_COMPONENT_BATCHER_BUFFERED_SIZE_BYTES); return true; } @@ -127,9 +127,9 @@ class Batcher { std::lock_guard lock(mMux); size_t key = g.GetTagsHash(); EventBatchItem& item = mEventQueueMap[key]; - mInEventsCnt->Add(g.GetEvents().size()); + mInEventsTotal->Add(g.GetEvents().size()); mInGroupDataSizeBytes->Add(g.DataSize()); - mEventBatchItemsCnt->Set(mEventQueueMap.size()); + mEventBatchItemsTotal->Set(mEventQueueMap.size()); size_t eventsSize = g.GetEvents().size(); for (size_t i = 0; i < eventsSize; ++i) { @@ -164,12 +164,12 @@ class Batcher { g.GetMetadata(EventGroupMetaKey::SOURCE_ID)); TimeoutFlushManager::GetInstance()->UpdateRecord( mFlusher->GetContext().GetConfigName(), 0, key, mEventFlushStrategy.GetTimeoutSecs(), mFlusher); - mBufferedGroupsCnt->Add(1); + mBufferedGroupsTotal->Add(1); mBufferedDataSizeByte->Add(item.DataSize()); } else if (i == 0) { item.AddSourceBuffer(g.GetSourceBuffer()); } - mBufferedEventsCnt->Add(1); + mBufferedEventsTotal->Add(1); mBufferedDataSizeByte->Add(e->DataSize()); item.Add(std::move(e)); if (mEventFlushStrategy.NeedFlushBySize(item.GetStatus()) @@ -201,7 +201,7 @@ class Batcher { UpdateMetricsOnFlushingEventQueue(iter->second); iter->second.Flush(res); mEventQueueMap.erase(iter); - mEventBatchItemsCnt->Set(mEventQueueMap.size()); + mEventBatchItemsTotal->Set(mEventQueueMap.size()); return; } @@ -215,7 +215,7 @@ class Batcher { } iter->second.Flush(mGroupQueue.value()); mEventQueueMap.erase(iter); - mEventBatchItemsCnt->Set(mEventQueueMap.size()); + mEventBatchItemsTotal->Set(mEventQueueMap.size()); if (mGroupFlushStrategy->NeedFlushBySize(mGroupQueue->GetStatus())) { UpdateMetricsOnFlushingGroupQueue(); mGroupQueue->Flush(res); @@ -244,7 +244,7 @@ class Batcher { UpdateMetricsOnFlushingGroupQueue(); mGroupQueue->Flush(res); } - mEventBatchItemsCnt->Set(0); + mEventBatchItemsTotal->Set(0); mEventQueueMap.clear(); } @@ -255,28 +255,28 @@ class Batcher { private: void UpdateMetricsOnFlushingEventQueue(const EventBatchItem& item) { - mOutEventsCnt->Add(item.EventSize()); + mOutEventsTotal->Add(item.EventSize()); mTotalDelayMs->Add( item.EventSize() * std::chrono::time_point_cast(std::chrono::system_clock::now()) .time_since_epoch() .count() - item.TotalEnqueTimeMs()); - mBufferedGroupsCnt->Sub(1); - mBufferedEventsCnt->Sub(item.EventSize()); + mBufferedGroupsTotal->Sub(1); + mBufferedEventsTotal->Sub(item.EventSize()); mBufferedDataSizeByte->Sub(item.DataSize()); } void UpdateMetricsOnFlushingGroupQueue() { - mOutEventsCnt->Add(mGroupQueue->EventSize()); + mOutEventsTotal->Add(mGroupQueue->EventSize()); mTotalDelayMs->Add( mGroupQueue->EventSize() * std::chrono::time_point_cast(std::chrono::system_clock::now()) .time_since_epoch() .count() - mGroupQueue->TotalEnqueTimeMs()); - mBufferedGroupsCnt->Sub(mGroupQueue->GroupSize()); - mBufferedEventsCnt->Sub(mGroupQueue->EventSize()); + mBufferedGroupsTotal->Sub(mGroupQueue->GroupSize()); + mBufferedEventsTotal->Sub(mGroupQueue->EventSize()); mBufferedDataSizeByte->Sub(mGroupQueue->DataSize()); } @@ -290,13 +290,13 @@ class Batcher { Flusher* mFlusher = nullptr; mutable MetricsRecordRef mMetricsRecordRef; - CounterPtr mInEventsCnt; + CounterPtr mInEventsTotal; CounterPtr mInGroupDataSizeBytes; - CounterPtr mOutEventsCnt; + CounterPtr mOutEventsTotal; CounterPtr mTotalDelayMs; - IntGaugePtr mEventBatchItemsCnt; - IntGaugePtr mBufferedGroupsCnt; - IntGaugePtr mBufferedEventsCnt; + IntGaugePtr mEventBatchItemsTotal; + IntGaugePtr mBufferedGroupsTotal; + IntGaugePtr mBufferedEventsTotal; IntGaugePtr mBufferedDataSizeByte; #ifdef APSARA_UNIT_TEST_MAIN diff --git a/core/pipeline/limiter/ConcurrencyLimiter.cpp b/core/pipeline/limiter/ConcurrencyLimiter.cpp index a7a8be80a6..1fdb45ec7b 100644 --- a/core/pipeline/limiter/ConcurrencyLimiter.cpp +++ b/core/pipeline/limiter/ConcurrencyLimiter.cpp @@ -18,21 +18,73 @@ using namespace std; namespace logtail { +#ifdef APSARA_UNIT_TEST_MAIN +uint32_t ConcurrencyLimiter::GetCurrentLimit() const { + lock_guard lock(mLimiterMux); + return mCurrenctConcurrency; +} + +uint32_t ConcurrencyLimiter::GetCurrentInterval() const { + lock_guard lock(mLimiterMux); + return mRetryIntervalSecs; +} +void ConcurrencyLimiter::SetCurrentLimit(uint32_t limit) { + lock_guard lock(mLimiterMux); + mCurrenctConcurrency = limit; +} + +void ConcurrencyLimiter::SetInSendingCount(uint32_t count) { + mInSendingCnt.store(count); +} +uint32_t ConcurrencyLimiter::GetInSendingCount() const { return mInSendingCnt.load(); } + +#endif + + bool ConcurrencyLimiter::IsValidToPop() { - return mLimit != 0; + lock_guard lock(mLimiterMux); + if (mCurrenctConcurrency == 0) { + auto curTime = std::chrono::system_clock::now(); + if (chrono::duration_cast(curTime - mLastCheckTime).count() > mRetryIntervalSecs) { + mLastCheckTime = curTime; + return true; + } else { + return false; + } + } + if (mCurrenctConcurrency > mInSendingCnt.load()) { + return true; + } + return false; } void ConcurrencyLimiter::PostPop() { - if (mLimit <= 0) { - return; - } - --mLimit; + ++mInSendingCnt; +} + +void ConcurrencyLimiter::OnSendDone() { + --mInSendingCnt; } void ConcurrencyLimiter::OnSuccess() { + lock_guard lock(mLimiterMux); + if (mCurrenctConcurrency <= 0) { + mRetryIntervalSecs = mMinRetryIntervalSecs; + } + if (mCurrenctConcurrency != mMaxConcurrency) { + ++mCurrenctConcurrency; + } } -void ConcurrencyLimiter::OnFail(time_t curTime) { +void ConcurrencyLimiter::OnFail() { + lock_guard lock(mLimiterMux); + if (mCurrenctConcurrency != 0) { + mCurrenctConcurrency = static_cast(mCurrenctConcurrency * mConcurrencyDownRatio); + } else { + if (mRetryIntervalSecs != mMaxRetryIntervalSecs) { + mRetryIntervalSecs = min(mMaxRetryIntervalSecs, static_cast(mRetryIntervalSecs * mRetryIntervalUpRatio)); + } + } } } // namespace logtail diff --git a/core/pipeline/limiter/ConcurrencyLimiter.h b/core/pipeline/limiter/ConcurrencyLimiter.h index 31a114817b..3228124eb4 100644 --- a/core/pipeline/limiter/ConcurrencyLimiter.h +++ b/core/pipeline/limiter/ConcurrencyLimiter.h @@ -17,25 +17,68 @@ #pragma once #include -#include +#include +#include +#include +#include + +#include "monitor/metric_constants/MetricConstants.h" namespace logtail { class ConcurrencyLimiter { public: + ConcurrencyLimiter(uint32_t maxConcurrency, uint32_t maxRetryIntervalSecs = 3600, + uint32_t minRetryIntervalSecs = 30, double retryIntervalUpRatio = 1.5, double concurrencyDownRatio = 0.5) : + mMaxConcurrency(maxConcurrency), mCurrenctConcurrency(maxConcurrency), + mMaxRetryIntervalSecs(maxRetryIntervalSecs), mMinRetryIntervalSecs(minRetryIntervalSecs), + mRetryIntervalSecs(minRetryIntervalSecs), mRetryIntervalUpRatio(retryIntervalUpRatio), mConcurrencyDownRatio(concurrencyDownRatio) {} + bool IsValidToPop(); void PostPop(); + void OnSendDone(); + void OnSuccess(); - void OnFail(time_t curTime); + void OnFail(); + + static std::string GetLimiterMetricName(const std::string& limiter) { + if (limiter == "region") { + return METRIC_COMPONENT_FETCH_REJECTED_BY_REGION_LIMITER_TIMES_TOTAL; + } else if (limiter == "project") { + return METRIC_COMPONENT_FETCH_REJECTED_BY_PROJECT_LIMITER_TIMES_TOTAL; + } else if (limiter == "logstore") { + return METRIC_COMPONENT_FETCH_REJECTED_BY_LOGSTORE_LIMITER_TIMES_TOTAL; + } + return limiter; + } #ifdef APSARA_UNIT_TEST_MAIN - void Reset() { mLimit = -1; } - void SetLimit(int limit) { mLimit = limit; } - int GetLimit() const { return mLimit; } + + uint32_t GetCurrentLimit() const; + uint32_t GetCurrentInterval() const; + void SetCurrentLimit(uint32_t limit); + void SetInSendingCount(uint32_t count); + uint32_t GetInSendingCount() const; + #endif private: - std::atomic_int mLimit = -1; + std::atomic_uint32_t mInSendingCnt = 0U; + + uint32_t mMaxConcurrency = 0; + + mutable std::mutex mLimiterMux; + uint32_t mCurrenctConcurrency = 0; + + uint32_t mMaxRetryIntervalSecs = 0; + uint32_t mMinRetryIntervalSecs = 0; + + uint32_t mRetryIntervalSecs = 0; + + double mRetryIntervalUpRatio = 0.0; + double mConcurrencyDownRatio = 0.0; + + std::chrono::system_clock::time_point mLastCheckTime; }; } // namespace logtail diff --git a/core/pipeline/plugin/PluginRegistry.cpp b/core/pipeline/plugin/PluginRegistry.cpp index 5beeb2ab0e..2135a81a06 100644 --- a/core/pipeline/plugin/PluginRegistry.cpp +++ b/core/pipeline/plugin/PluginRegistry.cpp @@ -36,9 +36,6 @@ #include "plugin/input/InputNetworkSecurity.h" #include "plugin/input/InputProcessSecurity.h" #include "plugin/input/InputObserverNetwork.h" -#ifdef __ENTERPRISE__ -#include "plugin/input/InputStream.h" -#endif #endif #include "logger/Logger.h" #include "pipeline/plugin/creator/CProcessor.h" @@ -136,9 +133,6 @@ void PluginRegistry::LoadStaticPlugins() { RegisterInputCreator(new StaticInputCreator()); RegisterInputCreator(new StaticInputCreator()); RegisterInputCreator(new StaticInputCreator()); -#ifdef __ENTERPRISE__ - RegisterInputCreator(new StaticInputCreator()); -#endif #endif RegisterProcessorCreator(new StaticProcessorCreator()); @@ -171,6 +165,7 @@ void PluginRegistry::LoadDynamicPlugins(const set& plugins) { return; } string error; + // 动态插件加载 auto pluginDir = AppConfig::GetInstance()->GetProcessExecutionDir() + "/plugins"; for (auto& pluginType : plugins) { DynamicLibLoader loader; diff --git a/core/pipeline/plugin/instance/FlusherInstance.cpp b/core/pipeline/plugin/instance/FlusherInstance.cpp index 997f75fe4b..04ddd12b06 100644 --- a/core/pipeline/plugin/instance/FlusherInstance.cpp +++ b/core/pipeline/plugin/instance/FlusherInstance.cpp @@ -14,25 +14,25 @@ #include "pipeline/plugin/instance/FlusherInstance.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" namespace logtail { bool FlusherInstance::Init(const Json::Value& config, PipelineContext& context, Json::Value& optionalGoPipeline) { mPlugin->SetContext(context); - mPlugin->SetNodeID(NodeID()); - mPlugin->SetMetricsRecordRef(Name(), PluginID(), NodeID(), ChildNodeID()); + mPlugin->SetPluginID(PluginID()); + mPlugin->SetMetricsRecordRef(Name(), PluginID()); if (!mPlugin->Init(config, optionalGoPipeline)) { return false; } - mInEventsCnt = mPlugin->GetMetricsRecordRef().CreateCounter(METRIC_IN_EVENTS_CNT); - mInGroupDataSizeBytes = mPlugin->GetMetricsRecordRef().CreateCounter(METRIC_IN_EVENT_GROUP_SIZE_BYTES); + mInEventsTotal = mPlugin->GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_IN_EVENTS_TOTAL); + mInSizeBytes = mPlugin->GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_IN_SIZE_BYTES); return true; } bool FlusherInstance::Send(PipelineEventGroup&& g) { - mInEventsCnt->Add(g.GetEvents().size()); - mInGroupDataSizeBytes->Add(g.DataSize()); + mInEventsTotal->Add(g.GetEvents().size()); + mInSizeBytes->Add(g.DataSize()); return mPlugin->Send(std::move(g)); } diff --git a/core/pipeline/plugin/instance/FlusherInstance.h b/core/pipeline/plugin/instance/FlusherInstance.h index 4abec0d766..6ce367051d 100644 --- a/core/pipeline/plugin/instance/FlusherInstance.h +++ b/core/pipeline/plugin/instance/FlusherInstance.h @@ -46,8 +46,8 @@ class FlusherInstance : public PluginInstance { private: std::unique_ptr mPlugin; - CounterPtr mInEventsCnt; - CounterPtr mInGroupDataSizeBytes; + CounterPtr mInEventsTotal; + CounterPtr mInSizeBytes; }; } // namespace logtail diff --git a/core/pipeline/plugin/instance/InputInstance.cpp b/core/pipeline/plugin/instance/InputInstance.cpp index 8b3dfffc31..0ed85148ba 100644 --- a/core/pipeline/plugin/instance/InputInstance.cpp +++ b/core/pipeline/plugin/instance/InputInstance.cpp @@ -20,7 +20,7 @@ bool InputInstance::Init(const Json::Value& config, size_t inputIdx, Json::Value& optionalGoPipeline) { mPlugin->SetContext(context); - mPlugin->SetMetricsRecordRef(Name(), PluginID(), NodeID(), ChildNodeID()); + mPlugin->SetMetricsRecordRef(Name(), PluginID()); mPlugin->SetInputIndex(inputIdx); if (!mPlugin->Init(config, optionalGoPipeline)) { return false; diff --git a/core/pipeline/plugin/instance/PluginInstance.h b/core/pipeline/plugin/instance/PluginInstance.h index 27ae627cd4..34bf33eb0d 100644 --- a/core/pipeline/plugin/instance/PluginInstance.h +++ b/core/pipeline/plugin/instance/PluginInstance.h @@ -23,19 +23,15 @@ namespace logtail { class PluginInstance { public: struct PluginMeta { - PluginMeta(std::string pluginID, std::string nodeID, std::string childNodeID) - : mPluginID(pluginID), mNodeID(nodeID), mChildNodeID(childNodeID) {} + PluginMeta(std::string pluginID) + : mPluginID(pluginID) {} std::string mPluginID; - std::string mNodeID; - std::string mChildNodeID; }; PluginInstance(const PluginMeta& pluginMeta) : mMeta(pluginMeta) {} virtual ~PluginInstance() = default; const PluginMeta& Meta() const { return mMeta; } const std::string PluginID() const { return mMeta.mPluginID; } - const std::string NodeID() const { return mMeta.mNodeID; } - const std::string ChildNodeID() const { return mMeta.mChildNodeID; } virtual const std::string& Name() const = 0; diff --git a/core/pipeline/plugin/instance/ProcessorInstance.cpp b/core/pipeline/plugin/instance/ProcessorInstance.cpp index 70dbc5a172..b779cc9f52 100644 --- a/core/pipeline/plugin/instance/ProcessorInstance.cpp +++ b/core/pipeline/plugin/instance/ProcessorInstance.cpp @@ -20,7 +20,7 @@ #include "common/TimeUtil.h" #include "logger/Logger.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" using namespace std; @@ -28,33 +28,37 @@ namespace logtail { bool ProcessorInstance::Init(const Json::Value& config, PipelineContext& context) { mPlugin->SetContext(context); - mPlugin->SetMetricsRecordRef(Name(), PluginID(), NodeID(), ChildNodeID()); + mPlugin->SetMetricsRecordRef(Name(), PluginID()); if (!mPlugin->Init(config)) { return false; } // should init plugin first, then could GetMetricsRecordRef from plugin - mProcInRecordsTotal = mPlugin->GetMetricsRecordRef().CreateCounter(METRIC_PROC_IN_RECORDS_TOTAL); - mProcOutRecordsTotal = mPlugin->GetMetricsRecordRef().CreateCounter(METRIC_PROC_OUT_RECORDS_TOTAL); - mProcTimeMS = mPlugin->GetMetricsRecordRef().CreateCounter(METRIC_PROC_TIME_MS); + mInEventsTotal = mPlugin->GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_IN_EVENTS_TOTAL); + mOutEventsTotal = mPlugin->GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_EVENTS_TOTAL); + mInSizeBytes = mPlugin->GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_IN_SIZE_BYTES); + mOutSizeBytes = mPlugin->GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_SIZE_BYTES); + mTotalProcessTimeMs = mPlugin->GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_TOTAL_PROCESS_TIME_MS); return true; } -void ProcessorInstance::Process(vector& logGroupList) { - if (logGroupList.empty()) { +void ProcessorInstance::Process(vector& eventGroupList) { + if (eventGroupList.empty()) { return; } - for (const auto& logGroup : logGroupList) { - mProcInRecordsTotal->Add(logGroup.GetEvents().size()); + for (const auto& eventGroup : eventGroupList) { + mInEventsTotal->Add(eventGroup.GetEvents().size()); + mInSizeBytes->Add(eventGroup.DataSize()); } auto before = chrono::system_clock::now(); - mPlugin->Process(logGroupList); - mProcTimeMS->Add(chrono::duration_cast(chrono::system_clock::now() - before).count()); + mPlugin->Process(eventGroupList); + mTotalProcessTimeMs->Add(chrono::duration_cast(chrono::system_clock::now() - before).count()); - for (const auto& logGroup : logGroupList) { - mProcOutRecordsTotal->Add(logGroup.GetEvents().size()); + for (const auto& eventGroup : eventGroupList) { + mOutEventsTotal->Add(eventGroup.GetEvents().size()); + mOutSizeBytes->Add(eventGroup.DataSize()); } } diff --git a/core/pipeline/plugin/instance/ProcessorInstance.h b/core/pipeline/plugin/instance/ProcessorInstance.h index 5f96756688..eb7f7afb90 100644 --- a/core/pipeline/plugin/instance/ProcessorInstance.h +++ b/core/pipeline/plugin/instance/ProcessorInstance.h @@ -40,11 +40,11 @@ class ProcessorInstance : public PluginInstance { private: std::unique_ptr mPlugin; - CounterPtr mProcInRecordsTotal; - CounterPtr mProcOutRecordsTotal; - // CounterPtr mProcInRecordsSizeBytes; - // CounterPtr mProcOutRecordsSizeBytes; - CounterPtr mProcTimeMS; + CounterPtr mInEventsTotal; + CounterPtr mOutEventsTotal; + CounterPtr mInSizeBytes; + CounterPtr mOutSizeBytes; + CounterPtr mTotalProcessTimeMs; #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorInstanceUnittest; diff --git a/core/pipeline/plugin/interface/Flusher.cpp b/core/pipeline/plugin/interface/Flusher.cpp index 432b184fd2..e5c9edae1f 100644 --- a/core/pipeline/plugin/interface/Flusher.cpp +++ b/core/pipeline/plugin/interface/Flusher.cpp @@ -29,28 +29,29 @@ bool Flusher::Start() { } bool Flusher::Stop(bool isPipelineRemoving) { + // TODO: temporarily used here + SetPipelineForItemsWhenStop(); SenderQueueManager::GetInstance()->DeleteQueue(mQueueKey); return true; } +void Flusher::SetPipelineForItemsWhenStop() { + if (HasContext()) { + const auto& pipeline = PipelineManager::GetInstance()->FindConfigByName(mContext->GetConfigName()); + if (!pipeline) { + LOG_ERROR(sLogger, ("failed to get pipeline context", "context not found")("action", "not set pipeline")); + return; + } + SenderQueueManager::GetInstance()->SetPipelineForItems(mQueueKey, pipeline); + } +} + void Flusher::GenerateQueueKey(const std::string& target) { mQueueKey = QueueKeyManager::GetInstance()->GetKey((HasContext() ? mContext->GetConfigName() : "") + "-" + Name() + "-" + target); } bool Flusher::PushToQueue(unique_ptr&& item, uint32_t retryTimes) { -#ifndef APSARA_UNIT_TEST_MAIN - // TODO: temporarily set here, should be removed after independent config update refactor - if (item->mFlusher->HasContext()) { - item->mPipeline - = PipelineManager::GetInstance()->FindConfigByName(item->mFlusher->GetContext().GetConfigName()); - if (!item->mPipeline) { - // should not happen - return false; - } - } -#endif - const string& str = QueueKeyManager::GetInstance()->GetName(item->mQueueKey); for (size_t i = 0; i < retryTimes; ++i) { int rst = SenderQueueManager::GetInstance()->PushQueue(item->mQueueKey, std::move(item)); @@ -85,7 +86,7 @@ bool Flusher::PushToQueue(unique_ptr&& item, uint32_t retryTime void Flusher::DealSenderQueueItemAfterSend(SenderQueueItem* item, bool keep) { if (keep) { - item->mStatus = SendingStatus::IDLE; + item->mStatus.Set(SendingStatus::IDLE); ++item->mTryCnt; } else { // TODO: because current profile has a dummy flusher, we have to use item->mQueueKey here diff --git a/core/pipeline/plugin/interface/Flusher.h b/core/pipeline/plugin/interface/Flusher.h index d643559611..6bf3301477 100644 --- a/core/pipeline/plugin/interface/Flusher.h +++ b/core/pipeline/plugin/interface/Flusher.h @@ -43,20 +43,22 @@ class Flusher : public Plugin { virtual SinkType GetSinkType() { return SinkType::NONE; } QueueKey GetQueueKey() const { return mQueueKey; } - void SetNodeID(const std::string& nodeID) { mNodeID = nodeID; } - const std::string& GetNodeID() const { return mNodeID; } + void SetPluginID(const std::string& pluginID) { mPluginID = pluginID; } + const std::string& GetPluginID() const { return mPluginID; } protected: void GenerateQueueKey(const std::string& target); bool PushToQueue(std::unique_ptr&& item, uint32_t retryTimes = 500); void DealSenderQueueItemAfterSend(SenderQueueItem* item, bool keep); + void SetPipelineForItemsWhenStop(); QueueKey mQueueKey; - std::string mNodeID; + std::string mPluginID; #ifdef APSARA_UNIT_TEST_MAIN friend class FlusherInstanceUnittest; friend class FlusherRunnerUnittest; + friend class FlusherUnittest; #endif }; diff --git a/core/pipeline/plugin/interface/Plugin.h b/core/pipeline/plugin/interface/Plugin.h index e63d098ae3..1c44743968 100644 --- a/core/pipeline/plugin/interface/Plugin.h +++ b/core/pipeline/plugin/interface/Plugin.h @@ -21,6 +21,7 @@ #include #include "monitor/LogtailMetric.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/PipelineContext.h" namespace logtail { @@ -35,22 +36,13 @@ class Plugin { bool HasContext() const { return mContext != nullptr; } void SetContext(PipelineContext& context) { mContext = &context; } MetricsRecordRef& GetMetricsRecordRef() const { return mMetricsRecordRef; } - void SetMetricsRecordRef(const std::string& name, - const std::string& id, - const std::string& nodeID, - const std::string& childNodeID) { - std::vector> labels; - WriteMetrics::GetInstance()->PreparePluginCommonLabels(mContext->GetProjectName(), - mContext->GetLogstoreName(), - mContext->GetRegion(), - mContext->GetConfigName(), - name, - id, - nodeID, - childNodeID, - labels); - - WriteMetrics::GetInstance()->PrepareMetricsRecordRef(mMetricsRecordRef, std::move(labels)); + void SetMetricsRecordRef(const std::string& name, const std::string& id) { + WriteMetrics::GetInstance()->PrepareMetricsRecordRef( + mMetricsRecordRef, + {{METRIC_LABEL_KEY_PROJECT, mContext->GetProjectName()}, + {METRIC_LABEL_KEY_PIPELINE_NAME, mContext->GetConfigName()}, + {METRIC_LABEL_KEY_PLUGIN_TYPE, name}, + {METRIC_LABEL_KEY_PLUGIN_ID, id}}); } protected: diff --git a/core/pipeline/queue/BoundedProcessQueue.cpp b/core/pipeline/queue/BoundedProcessQueue.cpp index ced855e43e..9f4273dc23 100644 --- a/core/pipeline/queue/BoundedProcessQueue.cpp +++ b/core/pipeline/queue/BoundedProcessQueue.cpp @@ -14,6 +14,8 @@ #include "pipeline/queue/BoundedProcessQueue.h" +#include "pipeline/PipelineManager.h" + using namespace std; namespace logtail { @@ -35,12 +37,12 @@ bool BoundedProcessQueue::Push(unique_ptr&& item) { } item->mEnqueTime = chrono::system_clock::now(); auto size = item->mEventGroup.DataSize(); - mQueue.push(std::move(item)); + mQueue.push_back(std::move(item)); ChangeStateIfNeededAfterPush(); - mInItemsCnt->Add(1); + mInItemsTotal->Add(1); mInItemDataSizeBytes->Add(size); - mQueueSize->Set(Size()); + mQueueSizeTotal->Set(Size()); mQueueDataSizeByte->Add(size); mValidToPushFlag->Set(IsValidToPush()); return true; @@ -51,20 +53,29 @@ bool BoundedProcessQueue::Pop(unique_ptr& item) { return false; } item = std::move(mQueue.front()); - mQueue.pop(); + mQueue.pop_front(); + item->AddPipelineInProcessCnt(GetConfigName()); if (ChangeStateIfNeededAfterPop()) { GiveFeedback(); } - mOutItemsCnt->Add(1); + mOutItemsTotal->Add(1); mTotalDelayMs->Add( chrono::duration_cast(chrono::system_clock::now() - item->mEnqueTime).count()); - mQueueSize->Set(Size()); + mQueueSizeTotal->Set(Size()); mQueueDataSizeByte->Sub(item->mEventGroup.DataSize()); mValidToPushFlag->Set(IsValidToPush()); return true; } +void BoundedProcessQueue::SetPipelineForItems(const std::shared_ptr& p) const { + for (auto& item : mQueue) { + if (!item->mPipeline) { + item->mPipeline = p; + } + } +} + void BoundedProcessQueue::SetUpStreamFeedbacks(vector&& feedbacks) { mUpStreamFeedbacks.clear(); for (auto& item : feedbacks) { diff --git a/core/pipeline/queue/BoundedProcessQueue.h b/core/pipeline/queue/BoundedProcessQueue.h index 78c7772fd0..728abe0bbe 100644 --- a/core/pipeline/queue/BoundedProcessQueue.h +++ b/core/pipeline/queue/BoundedProcessQueue.h @@ -31,10 +31,12 @@ namespace logtail { class BoundedProcessQueue : public BoundedQueueInterface>, public ProcessQueueInterface { public: - BoundedProcessQueue(size_t cap, size_t low, size_t high, int64_t key, uint32_t priority, const PipelineContext& ctx); + BoundedProcessQueue( + size_t cap, size_t low, size_t high, int64_t key, uint32_t priority, const PipelineContext& ctx); bool Push(std::unique_ptr&& item) override; bool Pop(std::unique_ptr& item) override; + void SetPipelineForItems(const std::shared_ptr& p) const override; void SetUpStreamFeedbacks(std::vector&& feedbacks); @@ -43,7 +45,7 @@ class BoundedProcessQueue : public BoundedQueueInterface> mQueue; + std::deque> mQueue; std::vector mUpStreamFeedbacks; #ifdef APSARA_UNIT_TEST_MAIN diff --git a/core/pipeline/queue/BoundedQueueInterface.h b/core/pipeline/queue/BoundedQueueInterface.h index 350e22be37..c8102a5fbd 100644 --- a/core/pipeline/queue/BoundedQueueInterface.h +++ b/core/pipeline/queue/BoundedQueueInterface.h @@ -26,7 +26,7 @@ class BoundedQueueInterface : virtual public QueueInterface { BoundedQueueInterface(QueueKey key, size_t cap, size_t low, size_t high, const PipelineContext& ctx) : QueueInterface(key, cap, ctx), mLowWatermark(low), mHighWatermark(high) { this->mMetricsRecordRef.AddLabels({{METRIC_LABEL_KEY_QUEUE_TYPE, "bounded"}}); - mValidToPushFlag = this->mMetricsRecordRef.CreateIntGauge("valid_to_push"); + mValidToPushFlag = this->mMetricsRecordRef.CreateIntGauge(METRIC_COMPONENT_QUEUE_VALID_TO_PUSH_FLAG); } virtual ~BoundedQueueInterface() = default; diff --git a/core/pipeline/queue/BoundedSenderQueueInterface.cpp b/core/pipeline/queue/BoundedSenderQueueInterface.cpp index 03e26ebac3..2fe599c8b2 100644 --- a/core/pipeline/queue/BoundedSenderQueueInterface.cpp +++ b/core/pipeline/queue/BoundedSenderQueueInterface.cpp @@ -24,10 +24,11 @@ FeedbackInterface* BoundedSenderQueueInterface::sFeedback = nullptr; BoundedSenderQueueInterface::BoundedSenderQueueInterface( size_t cap, size_t low, size_t high, QueueKey key, const string& flusherId, const PipelineContext& ctx) : QueueInterface(key, cap, ctx), BoundedQueueInterface>(key, cap, low, high, ctx) { - mMetricsRecordRef.AddLabels({{METRIC_LABEL_KEY_COMPONENT_NAME, "sender_queue"}}); - mMetricsRecordRef.AddLabels({{METRIC_LABEL_KEY_FLUSHER_NODE_ID, flusherId}}); - mExtraBufferSize = mMetricsRecordRef.CreateIntGauge("extra_buffer_size"); - mExtraBufferDataSizeBytes = mMetricsRecordRef.CreateIntGauge("extra_buffer_data_size_bytes"); + mMetricsRecordRef.AddLabels({{METRIC_LABEL_KEY_COMPONENT_NAME, METRIC_LABEL_VALUE_COMPONENT_NAME_SENDER_QUEUE}}); + mMetricsRecordRef.AddLabels({{METRIC_LABEL_KEY_FLUSHER_PLUGIN_ID, flusherId}}); + mExtraBufferSize = mMetricsRecordRef.CreateIntGauge(METRIC_COMPONENT_QUEUE_EXTRA_BUFFER_SIZE); + mRejectedByRateLimiterCnt = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_FETCH_REJECTED_BY_RATE_LIMITER_TIMES_TOTAL); + mExtraBufferDataSizeBytes = mMetricsRecordRef.CreateIntGauge(METRIC_COMPONENT_QUEUE_EXTRA_BUFFER_SIZE_BYTES); } void BoundedSenderQueueInterface::SetFeedback(FeedbackInterface* feedback) { @@ -44,14 +45,30 @@ void BoundedSenderQueueInterface::SetRateLimiter(uint32_t maxRate) { } } -void BoundedSenderQueueInterface::SetConcurrencyLimiters(std::vector>&& limiters) { +void BoundedSenderQueueInterface::SetConcurrencyLimiters(std::unordered_map>&& concurrencyLimitersMap) { mConcurrencyLimiters.clear(); - for (auto& item : limiters) { - if (item == nullptr) { + for (const auto& item : concurrencyLimitersMap) { + if (item.second == nullptr) { // should not happen continue; } - mConcurrencyLimiters.emplace_back(item); + mConcurrencyLimiters.emplace_back(item.second, mMetricsRecordRef.CreateCounter(ConcurrencyLimiter::GetLimiterMetricName(item.first))); + } +} + +void BoundedSenderQueueInterface::OnSendingSuccess() { + for (auto& limiter : mConcurrencyLimiters) { + if (limiter.first != nullptr) { + limiter.first->OnSuccess(); + } + } +} + +void BoundedSenderQueueInterface::DecreaseSendingCnt() { + for (auto& limiter : mConcurrencyLimiters) { + if (limiter.first != nullptr) { + limiter.first->OnSendDone(); + } } } @@ -61,7 +78,7 @@ void BoundedSenderQueueInterface::GiveFeedback() const { } void BoundedSenderQueueInterface::Reset(size_t cap, size_t low, size_t high) { - queue>().swap(mExtraBuffer); + deque>().swap(mExtraBuffer); mRateLimiter.reset(); mConcurrencyLimiters.clear(); BoundedQueueInterface::Reset(low, high); diff --git a/core/pipeline/queue/BoundedSenderQueueInterface.h b/core/pipeline/queue/BoundedSenderQueueInterface.h index 5828790ae4..dfb3495415 100644 --- a/core/pipeline/queue/BoundedSenderQueueInterface.h +++ b/core/pipeline/queue/BoundedSenderQueueInterface.h @@ -20,6 +20,7 @@ #include #include #include +#include #include "common/FeedbackInterface.h" #include "pipeline/limiter/ConcurrencyLimiter.h" @@ -43,14 +44,18 @@ class BoundedSenderQueueInterface : public BoundedQueueInterface& item) override { return false; } virtual bool Remove(SenderQueueItem* item) = 0; - virtual void GetAllAvailableItems(std::vector& items, bool withLimits = true) = 0; + + virtual void GetAvailableItems(std::vector& items, int32_t limit) = 0; + void DecreaseSendingCnt(); + void OnSendingSuccess(); void SetRateLimiter(uint32_t maxRate); - void SetConcurrencyLimiters(std::vector>&& limiters); + void SetConcurrencyLimiters(std::unordered_map>&& concurrencyLimitersMap); + virtual void SetPipelineForItems(const std::shared_ptr& p) const = 0; #ifdef APSARA_UNIT_TEST_MAIN std::optional& GetRateLimiter() { return mRateLimiter; } - std::vector>& GetConcurrencyLimiters() { return mConcurrencyLimiters; } + std::vector, CounterPtr>>& GetConcurrencyLimiters() { return mConcurrencyLimiters; } #endif protected: @@ -60,12 +65,17 @@ class BoundedSenderQueueInterface : public BoundedQueueInterface mRateLimiter; - std::vector> mConcurrencyLimiters; + std::vector, CounterPtr>> mConcurrencyLimiters; - std::queue> mExtraBuffer; + std::deque> mExtraBuffer; IntGaugePtr mExtraBufferSize; IntGaugePtr mExtraBufferDataSizeBytes; + CounterPtr mRejectedByRateLimiterCnt; + +#ifdef APSARA_UNIT_TEST_MAIN + friend class FlusherUnittest; +#endif }; } // namespace logtail diff --git a/core/pipeline/queue/CircularProcessQueue.cpp b/core/pipeline/queue/CircularProcessQueue.cpp index c6df833e7f..91049425c6 100644 --- a/core/pipeline/queue/CircularProcessQueue.cpp +++ b/core/pipeline/queue/CircularProcessQueue.cpp @@ -15,6 +15,7 @@ #include "pipeline/queue/CircularProcessQueue.h" #include "logger/Logger.h" +#include "pipeline/PipelineManager.h" #include "pipeline/queue/QueueKeyManager.h" using namespace std; @@ -24,7 +25,7 @@ namespace logtail { CircularProcessQueue::CircularProcessQueue(size_t cap, int64_t key, uint32_t priority, const PipelineContext& ctx) : QueueInterface>(key, cap, ctx), ProcessQueueInterface(key, cap, priority, ctx) { mMetricsRecordRef.AddLabels({{METRIC_LABEL_KEY_QUEUE_TYPE, "circular"}}); - mDroppedEventsCnt = mMetricsRecordRef.CreateCounter("dropped_events_cnt"); + mDiscardedEventsTotal = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_QUEUE_DISCARDED_EVENTS_TOTAL); WriteMetrics::GetInstance()->CommitMetricsRecordRef(mMetricsRecordRef); } @@ -35,9 +36,9 @@ bool CircularProcessQueue::Push(unique_ptr&& item) { auto size = mQueue.front()->mEventGroup.DataSize(); mEventCnt -= cnt; mQueue.pop_front(); - mQueueSize->Set(Size()); + mQueueSizeTotal->Set(Size()); mQueueDataSizeByte->Sub(size); - mDroppedEventsCnt->Add(cnt); + mDiscardedEventsTotal->Add(cnt); } if (mEventCnt + newCnt > mCapacity) { return false; @@ -47,9 +48,9 @@ bool CircularProcessQueue::Push(unique_ptr&& item) { mQueue.push_back(std::move(item)); mEventCnt += newCnt; - mInItemsCnt->Add(1); + mInItemsTotal->Add(1); mInItemDataSizeBytes->Add(size); - mQueueSize->Set(Size()); + mQueueSizeTotal->Set(Size()); mQueueDataSizeByte->Add(size); return true; } @@ -59,18 +60,27 @@ bool CircularProcessQueue::Pop(unique_ptr& item) { return false; } item = std::move(mQueue.front()); + item->AddPipelineInProcessCnt(GetConfigName()); mQueue.pop_front(); mEventCnt -= item->mEventGroup.GetEvents().size(); - mOutItemsCnt->Add(1); + mOutItemsTotal->Add(1); mTotalDelayMs->Add( std::chrono::duration_cast(std::chrono::system_clock::now() - item->mEnqueTime) .count()); - mQueueSize->Set(Size()); + mQueueSizeTotal->Set(Size()); mQueueDataSizeByte->Sub(item->mEventGroup.DataSize()); return true; } +void CircularProcessQueue::SetPipelineForItems(const std::shared_ptr& p) const { + for (auto& item : mQueue) { + if (!item->mPipeline) { + item->mPipeline = p; + } + } +} + void CircularProcessQueue::Reset(size_t cap) { // it seems more reasonable to retain extra items and process them immediately, however this contray to current // framework design so we simply discard extra items, considering that it is a rare case to change capacity diff --git a/core/pipeline/queue/CircularProcessQueue.h b/core/pipeline/queue/CircularProcessQueue.h index 813929c799..db05d038ea 100644 --- a/core/pipeline/queue/CircularProcessQueue.h +++ b/core/pipeline/queue/CircularProcessQueue.h @@ -33,6 +33,7 @@ class CircularProcessQueue : virtual public QueueInterface&& item) override; bool Pop(std::unique_ptr& item) override; + void SetPipelineForItems(const std::shared_ptr& p) const override; void Reset(size_t cap); @@ -42,7 +43,7 @@ class CircularProcessQueue : virtual public QueueInterface> mQueue; size_t mEventCnt = 0; - CounterPtr mDroppedEventsCnt; + CounterPtr mDiscardedEventsTotal; #ifdef APSARA_UNIT_TEST_MAIN friend class CircularProcessQueueUnittest; diff --git a/core/pipeline/queue/ExactlyOnceQueueManager.cpp b/core/pipeline/queue/ExactlyOnceQueueManager.cpp index 3c66b636e3..7446f6a9bc 100644 --- a/core/pipeline/queue/ExactlyOnceQueueManager.cpp +++ b/core/pipeline/queue/ExactlyOnceQueueManager.cpp @@ -16,11 +16,11 @@ #include "common/Flags.h" #include "common/TimeUtil.h" -#include "plugin/input/InputFeedbackInterfaceRegistry.h" -#include "plugin/input/InputFile.h" #include "logger/Logger.h" #include "pipeline/queue/ProcessQueueManager.h" #include "pipeline/queue/QueueKeyManager.h" +#include "plugin/input/InputFeedbackInterfaceRegistry.h" +#include "plugin/input/InputFile.h" DEFINE_FLAG_INT32(logtail_queue_gc_threshold_sec, "2min", 2 * 60); DEFINE_FLAG_INT64(logtail_queue_max_used_time_per_round_in_msec, "500ms", 500); @@ -148,20 +148,26 @@ bool ExactlyOnceQueueManager::IsAllProcessQueueEmpty() const { return true; } -void ExactlyOnceQueueManager::InvalidatePopProcessQueue(const string& configName) { +void ExactlyOnceQueueManager::DisablePopProcessQueue(const string& configName, bool isPipelineRemoving) { lock_guard lock(mProcessQueueMux); for (auto& iter : mProcessQueues) { if (iter.second->GetConfigName() == configName) { - iter.second->InvalidatePop(); + iter.second->DisablePop(); + if (!isPipelineRemoving) { + const auto& p = PipelineManager::GetInstance()->FindConfigByName(configName); + if (p) { + iter.second->SetPipelineForItems(p); + } + } } } } -void ExactlyOnceQueueManager::ValidatePopProcessQueue(const string& configName) { +void ExactlyOnceQueueManager::EnablePopProcessQueue(const string& configName) { lock_guard lock(mProcessQueueMux); for (auto& iter : mProcessQueues) { if (iter.second->GetConfigName() == configName) { - iter.second->ValidatePop(); + iter.second->EnablePop(); } } } @@ -178,10 +184,10 @@ int ExactlyOnceQueueManager::PushSenderQueue(QueueKey key, unique_ptr& item, bool withLimits) { +void ExactlyOnceQueueManager::GetAvailableSenderQueueItems(std::vector& item, int32_t itemsCntLimit) { lock_guard lock(mSenderQueueMux); for (auto iter = mSenderQueues.begin(); iter != mSenderQueues.end(); ++iter) { - iter->second.GetAllAvailableItems(item, withLimits); + iter->second.GetAvailableItems(item, itemsCntLimit); } } @@ -273,6 +279,14 @@ uint32_t ExactlyOnceQueueManager::GetProcessQueueCnt() const { return mProcessQueues.size(); } +void ExactlyOnceQueueManager::SetPipelineForSenderItems(QueueKey key, const std::shared_ptr& p) { + lock_guard lock(mSenderQueueMux); + auto iter = mSenderQueues.find(key); + if (iter != mSenderQueues.end()) { + iter->second.SetPipelineForItems(p); + } +} + #ifdef APSARA_UNIT_TEST_MAIN void ExactlyOnceQueueManager::Clear() { { diff --git a/core/pipeline/queue/ExactlyOnceQueueManager.h b/core/pipeline/queue/ExactlyOnceQueueManager.h index 7f5e8a70ea..d53be58bd0 100644 --- a/core/pipeline/queue/ExactlyOnceQueueManager.h +++ b/core/pipeline/queue/ExactlyOnceQueueManager.h @@ -59,14 +59,15 @@ class ExactlyOnceQueueManager { // 0: success, 1: queue is full, 2: queue not found int PushProcessQueue(QueueKey key, std::unique_ptr&& item); bool IsAllProcessQueueEmpty() const; - void InvalidatePopProcessQueue(const std::string& configName); - void ValidatePopProcessQueue(const std::string& configName); + void DisablePopProcessQueue(const std::string& configName, bool isPipelineRemoving); + void EnablePopProcessQueue(const std::string& configName); // 0: success, 1: queue is full, 2: queue not found int PushSenderQueue(QueueKey key, std::unique_ptr&& item); - void GetAllAvailableSenderQueueItems(std::vector& item, bool withLimits = true); + void GetAvailableSenderQueueItems(std::vector& item, int32_t itemsCntLimit); bool RemoveSenderQueueItem(QueueKey key, SenderQueueItem* item); bool IsAllSenderQueueEmpty() const; + void SetPipelineForSenderItems(QueueKey key, const std::shared_ptr& p); void ClearTimeoutQueues(); diff --git a/core/pipeline/queue/ExactlyOnceSenderQueue.cpp b/core/pipeline/queue/ExactlyOnceSenderQueue.cpp index 71c0547df8..3057c8ff3b 100644 --- a/core/pipeline/queue/ExactlyOnceSenderQueue.cpp +++ b/core/pipeline/queue/ExactlyOnceSenderQueue.cpp @@ -46,8 +46,9 @@ bool ExactlyOnceSenderQueue::Push(unique_ptr&& item) { if (f->mMaxSendRate > 0) { mRateLimiter = RateLimiter(f->mMaxSendRate); } - mConcurrencyLimiters.emplace_back(FlusherSLS::GetRegionConcurrencyLimiter(f->mRegion)); - mConcurrencyLimiters.emplace_back(FlusherSLS::GetProjectConcurrencyLimiter(f->mProject)); + mConcurrencyLimiters.emplace_back(FlusherSLS::GetRegionConcurrencyLimiter(f->mRegion), mMetricsRecordRef.CreateCounter(ConcurrencyLimiter::GetLimiterMetricName("region"))); + mConcurrencyLimiters.emplace_back(FlusherSLS::GetProjectConcurrencyLimiter(f->mProject), mMetricsRecordRef.CreateCounter(ConcurrencyLimiter::GetLimiterMetricName("project"))); + mConcurrencyLimiters.emplace_back(FlusherSLS::GetLogstoreConcurrencyLimiter(f->mProject, f->mLogstore), mMetricsRecordRef.CreateCounter(ConcurrencyLimiter::GetLimiterMetricName("logstore"))); mIsInitialised = true; } @@ -78,7 +79,7 @@ bool ExactlyOnceSenderQueue::Push(unique_ptr&& item) { } if (!eo->IsComplete()) { item->mEnqueTime = chrono::system_clock::now(); - mExtraBuffer.push(std::move(item)); + mExtraBuffer.push_back(std::move(item)); return true; } } @@ -102,7 +103,7 @@ bool ExactlyOnceSenderQueue::Remove(SenderQueueItem* item) { if (!mExtraBuffer.empty()) { Push(std::move(mExtraBuffer.front())); - mExtraBuffer.pop(); + mExtraBuffer.pop_front(); return true; } if (ChangeStateIfNeededAfterPop()) { @@ -111,36 +112,53 @@ bool ExactlyOnceSenderQueue::Remove(SenderQueueItem* item) { return true; } -void ExactlyOnceSenderQueue::GetAllAvailableItems(vector& items, bool withLimits) { + +void ExactlyOnceSenderQueue::GetAvailableItems(vector& items, int32_t limit) { if (Empty()) { return; } + if (limit < 0) { + for (size_t index = 0; index < mCapacity; ++index) { + SenderQueueItem* item = mQueue[index].get(); + if (item == nullptr) { + continue; + } + if (item->mStatus.Get() == SendingStatus::IDLE) { + item->mStatus.Set(SendingStatus::SENDING); + items.emplace_back(item); + + } + } + } for (size_t index = 0; index < mCapacity; ++index) { SenderQueueItem* item = mQueue[index].get(); if (item == nullptr) { continue; } - if (withLimits) { - if (mRateLimiter && !mRateLimiter->IsValidToPop()) { + if (limit == 0) { + return; + } + if (mRateLimiter && !mRateLimiter->IsValidToPop()) { + return; + } + for (auto& limiter : mConcurrencyLimiters) { + if (!limiter.first->IsValidToPop()) { return; } - for (auto& limiter : mConcurrencyLimiters) { - if (!limiter->IsValidToPop()) { - return; - } - } } - if (item->mStatus == SendingStatus::IDLE) { - item->mStatus = SendingStatus::SENDING; + if (item->mStatus.Get() == SendingStatus::IDLE) { + --limit; + item->mStatus.Set(SendingStatus::SENDING); items.emplace_back(item); - if (withLimits) { - for (auto& limiter : mConcurrencyLimiters) { - limiter->PostPop(); - } - if (mRateLimiter) { - mRateLimiter->PostPop(item->mRawSize); + for (auto& limiter : mConcurrencyLimiters) { + if (limiter.first != nullptr) { + limiter.first->PostPop(); } } + if (mRateLimiter) { + mRateLimiter->PostPop(item->mRawSize); + } + } } } @@ -153,4 +171,23 @@ void ExactlyOnceSenderQueue::Reset(const vector& checkpoints mRangeCheckpoints = checkpoints; } +void ExactlyOnceSenderQueue::SetPipelineForItems(const std::shared_ptr& p) const { + if (Empty()) { + return; + } + for (size_t index = 0; index < mCapacity; ++index) { + if (!mQueue[index]) { + continue; + } + if (!mQueue[index]->mPipeline) { + mQueue[index]->mPipeline = p; + } + } + for (auto& item : mExtraBuffer) { + if (!item->mPipeline) { + item->mPipeline = p; + } + } +} + } // namespace logtail diff --git a/core/pipeline/queue/ExactlyOnceSenderQueue.h b/core/pipeline/queue/ExactlyOnceSenderQueue.h index 7187bb0845..95aad4ced6 100644 --- a/core/pipeline/queue/ExactlyOnceSenderQueue.h +++ b/core/pipeline/queue/ExactlyOnceSenderQueue.h @@ -30,11 +30,14 @@ namespace logtail { // not thread-safe, should be protected explicitly by queue manager class ExactlyOnceSenderQueue : public BoundedSenderQueueInterface { public: - ExactlyOnceSenderQueue(const std::vector& checkpoints, QueueKey key, const PipelineContext& ctx); + ExactlyOnceSenderQueue(const std::vector& checkpoints, + QueueKey key, + const PipelineContext& ctx); bool Push(std::unique_ptr&& item) override; bool Remove(SenderQueueItem* item) override; - void GetAllAvailableItems(std::vector& items, bool withLimits = true) override; + void GetAvailableItems(std::vector& items, int32_t limit) override; + void SetPipelineForItems(const std::shared_ptr& p) const override; void Reset(const std::vector& checkpoints); diff --git a/core/pipeline/queue/ProcessQueueInterface.cpp b/core/pipeline/queue/ProcessQueueInterface.cpp index edf8aabf49..3f28864625 100644 --- a/core/pipeline/queue/ProcessQueueInterface.cpp +++ b/core/pipeline/queue/ProcessQueueInterface.cpp @@ -22,7 +22,7 @@ namespace logtail { ProcessQueueInterface::ProcessQueueInterface(int64_t key, size_t cap, uint32_t priority, const PipelineContext& ctx) : QueueInterface(key, cap, ctx), mPriority(priority), mConfigName(ctx.GetConfigName()) { - mMetricsRecordRef.AddLabels({{METRIC_LABEL_KEY_COMPONENT_NAME, "process_queue"}}); + mMetricsRecordRef.AddLabels({{METRIC_LABEL_KEY_COMPONENT_NAME, METRIC_LABEL_VALUE_COMPONENT_NAME_PROCESS_QUEUE}}); } void ProcessQueueInterface::SetDownStreamQueues(vector&& ques) { diff --git a/core/pipeline/queue/ProcessQueueInterface.h b/core/pipeline/queue/ProcessQueueInterface.h index 6a3b8833c8..b8664cbdda 100644 --- a/core/pipeline/queue/ProcessQueueInterface.h +++ b/core/pipeline/queue/ProcessQueueInterface.h @@ -42,8 +42,10 @@ class ProcessQueueInterface : virtual public QueueInterface&& ques); - void InvalidatePop() { mValidToPop = false; } - void ValidatePop() { mValidToPop = true; } + void DisablePop() { mValidToPop = false; } + void EnablePop() { mValidToPop = true; } + + virtual void SetPipelineForItems(const std::shared_ptr& p) const = 0; void Reset() { mDownStreamQueues.clear(); } @@ -57,7 +59,7 @@ class ProcessQueueInterface : virtual public QueueInterface mDownStreamQueues; - bool mValidToPop = true; + bool mValidToPop = false; #ifdef APSARA_UNIT_TEST_MAIN friend class BoundedProcessQueueUnittest; diff --git a/core/pipeline/queue/ProcessQueueItem.h b/core/pipeline/queue/ProcessQueueItem.h index 2d7a719a81..f49ce8e56b 100644 --- a/core/pipeline/queue/ProcessQueueItem.h +++ b/core/pipeline/queue/ProcessQueueItem.h @@ -20,6 +20,7 @@ #include #include "models/PipelineEventGroup.h" +#include "pipeline/PipelineManager.h" namespace logtail { @@ -32,6 +33,17 @@ struct ProcessQueueItem { std::chrono::system_clock::time_point mEnqueTime; ProcessQueueItem(PipelineEventGroup&& group, size_t index) : mEventGroup(std::move(group)), mInputIndex(index) {} + + void AddPipelineInProcessCnt(const std::string& configName) { + if (mPipeline) { + mPipeline->AddInProcessCnt(); + } else { + const auto& p = PipelineManager::GetInstance()->FindConfigByName(configName); + if (p) { + p->AddInProcessCnt(); + } + } + } }; } // namespace logtail diff --git a/core/pipeline/queue/ProcessQueueManager.cpp b/core/pipeline/queue/ProcessQueueManager.cpp index 0b66f6769b..a7e32324cc 100644 --- a/core/pipeline/queue/ProcessQueueManager.cpp +++ b/core/pipeline/queue/ProcessQueueManager.cpp @@ -229,29 +229,35 @@ bool ProcessQueueManager::SetFeedbackInterface(QueueKey key, vectorHasKey(configName)) { auto key = QueueKeyManager::GetInstance()->GetKey(configName); lock_guard lock(mQueueMux); auto iter = mQueues.find(key); if (iter != mQueues.end()) { - (*iter->second.first)->InvalidatePop(); + (*iter->second.first)->DisablePop(); + if (!isPipelineRemoving) { + const auto& p = PipelineManager::GetInstance()->FindConfigByName(configName); + if (p) { + (*iter->second.first)->SetPipelineForItems(p); + } + } } } else { - ExactlyOnceQueueManager::GetInstance()->InvalidatePopProcessQueue(configName); + ExactlyOnceQueueManager::GetInstance()->DisablePopProcessQueue(configName, isPipelineRemoving); } } -void ProcessQueueManager::ValidatePop(const string& configName) { +void ProcessQueueManager::EnablePop(const string& configName) { if (QueueKeyManager::GetInstance()->HasKey(configName)) { auto key = QueueKeyManager::GetInstance()->GetKey(configName); lock_guard lock(mQueueMux); auto iter = mQueues.find(key); if (iter != mQueues.end()) { - (*iter->second.first)->ValidatePop(); + (*iter->second.first)->EnablePop(); } } else { - ExactlyOnceQueueManager::GetInstance()->ValidatePopProcessQueue(configName); + ExactlyOnceQueueManager::GetInstance()->EnablePopProcessQueue(configName); } } diff --git a/core/pipeline/queue/ProcessQueueManager.h b/core/pipeline/queue/ProcessQueueManager.h index dd0198c4b4..ab877a2c7d 100644 --- a/core/pipeline/queue/ProcessQueueManager.h +++ b/core/pipeline/queue/ProcessQueueManager.h @@ -62,8 +62,8 @@ class ProcessQueueManager : public FeedbackInterface { bool IsAllQueueEmpty() const; bool SetDownStreamQueues(QueueKey key, std::vector&& ques); bool SetFeedbackInterface(QueueKey key, std::vector&& feedback); - void InvalidatePop(const std::string& configName); - void ValidatePop(const std::string& configName); + void DisablePop(const std::string& configName, bool isPipelineRemoving); + void EnablePop(const std::string& configName); bool Wait(uint64_t ms); void Trigger(); diff --git a/core/pipeline/queue/QueueInterface.h b/core/pipeline/queue/QueueInterface.h index 1c450013ad..c3bf386a4c 100644 --- a/core/pipeline/queue/QueueInterface.h +++ b/core/pipeline/queue/QueueInterface.h @@ -17,7 +17,7 @@ #pragma once #include "monitor/LogtailMetric.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/PipelineContext.h" #include "pipeline/queue/QueueKey.h" @@ -29,16 +29,16 @@ class QueueInterface { QueueInterface(QueueKey key, size_t cap, const PipelineContext& ctx) : mKey(key), mCapacity(cap) { WriteMetrics::GetInstance()->CreateMetricsRecordRef(mMetricsRecordRef, { - {METRIC_LABEL_PROJECT, ctx.GetProjectName()}, - {METRIC_LABEL_CONFIG_NAME, ctx.GetConfigName()}, + {METRIC_LABEL_KEY_PROJECT, ctx.GetProjectName()}, + {METRIC_LABEL_KEY_PIPELINE_NAME, ctx.GetConfigName()}, }); - mInItemsCnt = mMetricsRecordRef.CreateCounter(METRIC_IN_ITEMS_CNT); - mInItemDataSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_IN_ITEM_SIZE_BYTES); - mOutItemsCnt = mMetricsRecordRef.CreateCounter(METRIC_OUT_ITEMS_CNT); - mTotalDelayMs = mMetricsRecordRef.CreateCounter(METRIC_TOTAL_DELAY_MS); - mQueueSize = mMetricsRecordRef.CreateIntGauge("queue_size"); - mQueueDataSizeByte = mMetricsRecordRef.CreateIntGauge("queue_data_size_bytes"); + mInItemsTotal = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_IN_ITEMS_TOTAL); + mInItemDataSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_IN_SIZE_BYTES); + mOutItemsTotal = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_OUT_ITEMS_TOTAL); + mTotalDelayMs = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_TOTAL_DELAY_MS); + mQueueSizeTotal = mMetricsRecordRef.CreateIntGauge(METRIC_COMPONENT_QUEUE_SIZE); + mQueueDataSizeByte = mMetricsRecordRef.CreateIntGauge(METRIC_COMPONENT_QUEUE_SIZE_BYTES); } virtual ~QueueInterface() = default; @@ -59,11 +59,11 @@ class QueueInterface { size_t mCapacity = 0; mutable MetricsRecordRef mMetricsRecordRef; - CounterPtr mInItemsCnt; + CounterPtr mInItemsTotal; CounterPtr mInItemDataSizeBytes; - CounterPtr mOutItemsCnt; + CounterPtr mOutItemsTotal; CounterPtr mTotalDelayMs; - IntGaugePtr mQueueSize; + IntGaugePtr mQueueSizeTotal; IntGaugePtr mQueueDataSizeByte; private: diff --git a/core/pipeline/queue/SenderQueue.cpp b/core/pipeline/queue/SenderQueue.cpp index fda5330f56..84ac0ec5f8 100644 --- a/core/pipeline/queue/SenderQueue.cpp +++ b/core/pipeline/queue/SenderQueue.cpp @@ -24,18 +24,20 @@ SenderQueue::SenderQueue( size_t cap, size_t low, size_t high, QueueKey key, const string& flusherId, const PipelineContext& ctx) : QueueInterface(key, cap, ctx), BoundedSenderQueueInterface(cap, low, high, key, flusherId, ctx) { mQueue.resize(cap); - WriteMetrics::GetInstance()->CommitMetricsRecordRef(mMetricsRecordRef); + mFetchedTimesCnt = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_FETCH_TIMES_TOTAL); + mFetchedItemsCnt = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_FETCHED_ITEMS_TOTAL); + WriteMetrics::GetInstance()->CommitMetricsRecordRef(mMetricsRecordRef); } bool SenderQueue::Push(unique_ptr&& item) { item->mEnqueTime = chrono::system_clock::now(); auto size = item->mData.size(); - mInItemsCnt->Add(1); + mInItemsTotal->Add(1); mInItemDataSizeBytes->Add(size); if (Full()) { - mExtraBuffer.push(std::move(item)); + mExtraBuffer.push_back(std::move(item)); mExtraBufferSize->Set(mExtraBuffer.size()); mExtraBufferDataSizeBytes->Add(size); @@ -55,7 +57,7 @@ bool SenderQueue::Push(unique_ptr&& item) { ++mSize; ChangeStateIfNeededAfterPush(); - mQueueSize->Set(Size()); + mQueueSizeTotal->Set(Size()); mQueueDataSizeByte->Add(size); mValidToPushFlag->Set(IsValidToPush()); return true; @@ -65,6 +67,7 @@ bool SenderQueue::Remove(SenderQueueItem* item) { if (item == nullptr) { return false; } + size_t size = 0; chrono::system_clock::time_point enQueuTime; auto index = mRead; @@ -84,14 +87,14 @@ bool SenderQueue::Remove(SenderQueueItem* item) { } --mSize; - mOutItemsCnt->Add(1); + mOutItemsTotal->Add(1); mTotalDelayMs->Add(chrono::duration_cast(chrono::system_clock::now() - enQueuTime).count()); mQueueDataSizeByte->Sub(size); if (!mExtraBuffer.empty()) { auto newSize = mExtraBuffer.front()->mData.size(); Push(std::move(mExtraBuffer.front())); - mExtraBuffer.pop(); + mExtraBuffer.pop_front(); mExtraBufferSize->Set(mExtraBuffer.size()); mExtraBufferDataSizeBytes->Sub(newSize); @@ -101,43 +104,82 @@ bool SenderQueue::Remove(SenderQueueItem* item) { GiveFeedback(); } - mQueueSize->Set(Size()); + mQueueSizeTotal->Set(Size()); mValidToPushFlag->Set(IsValidToPush()); return true; } -void SenderQueue::GetAllAvailableItems(vector& items, bool withLimits) { + +void SenderQueue::GetAvailableItems(vector& items, int32_t limit) { + mFetchedTimesCnt->Add(1); if (Empty()) { return; } + if (limit < 0) { + for (auto index = mRead; index < mWrite; ++index) { + SenderQueueItem* item = mQueue[index % mCapacity].get(); + if (item == nullptr) { + continue; + } + if (item->mStatus.Get() == SendingStatus::IDLE) { + item->mStatus.Set(SendingStatus::SENDING); + items.emplace_back(item); + } + } + return; + } + for (auto index = mRead; index < mWrite; ++index) { SenderQueueItem* item = mQueue[index % mCapacity].get(); if (item == nullptr) { continue; } - if (withLimits) { - if (mRateLimiter && !mRateLimiter->IsValidToPop()) { + if (limit == 0) { + return; + } + if (mRateLimiter && !mRateLimiter->IsValidToPop()) { + mRejectedByRateLimiterCnt->Add(1); + return; + } + for (auto& limiter : mConcurrencyLimiters) { + if (!limiter.first->IsValidToPop()) { + limiter.second->Add(1); return; } - for (auto& limiter : mConcurrencyLimiters) { - if (!limiter->IsValidToPop()) { - return; - } - } } - if (item->mStatus == SendingStatus::IDLE) { - item->mStatus = SendingStatus::SENDING; + if (item->mStatus.Get() == SendingStatus::IDLE) { + mFetchedItemsCnt->Add(1); + --limit; + item->mStatus.Set(SendingStatus::SENDING); items.emplace_back(item); - if (withLimits) { - for (auto& limiter : mConcurrencyLimiters) { - if (limiter != nullptr) { - limiter->PostPop(); - } - } - if (mRateLimiter) { - mRateLimiter->PostPop(item->mRawSize); + for (auto& limiter : mConcurrencyLimiters) { + if (limiter.first != nullptr) { + limiter.first->PostPop(); } } + if (mRateLimiter) { + mRateLimiter->PostPop(item->mRawSize); + } + } + } +} + +void SenderQueue::SetPipelineForItems(const std::shared_ptr& p) const { + if (Empty()) { + return; + } + for (auto index = mRead; index < mWrite; ++index) { + auto realIndex = index % mCapacity; + if (!mQueue[realIndex]) { + continue; + } + if (!mQueue[realIndex]->mPipeline) { + mQueue[realIndex]->mPipeline = p; + } + } + for (auto& item : mExtraBuffer) { + if (!item->mPipeline) { + item->mPipeline = p; } } } diff --git a/core/pipeline/queue/SenderQueue.h b/core/pipeline/queue/SenderQueue.h index 1fca5ab85b..7893aa5ef1 100644 --- a/core/pipeline/queue/SenderQueue.h +++ b/core/pipeline/queue/SenderQueue.h @@ -35,7 +35,8 @@ class SenderQueue : public BoundedSenderQueueInterface { bool Push(std::unique_ptr&& item) override; bool Remove(SenderQueueItem* item) override; - void GetAllAvailableItems(std::vector& items, bool withLimits = true) override; + void GetAvailableItems(std::vector& items, int32_t limit) override; + void SetPipelineForItems(const std::shared_ptr& p) const override; private: size_t Size() const override { return mSize; } @@ -45,9 +46,14 @@ class SenderQueue : public BoundedSenderQueueInterface { size_t mRead = 0; size_t mSize = 0; + CounterPtr mFetchedTimesCnt; + CounterPtr mFetchedItemsCnt; + + #ifdef APSARA_UNIT_TEST_MAIN friend class SenderQueueUnittest; friend class SenderQueueManagerUnittest; + friend class FlusherUnittest; #endif }; diff --git a/core/pipeline/queue/SenderQueueItem.h b/core/pipeline/queue/SenderQueueItem.h index 914f98e971..3b271914c9 100644 --- a/core/pipeline/queue/SenderQueueItem.h +++ b/core/pipeline/queue/SenderQueueItem.h @@ -31,6 +31,25 @@ class Pipeline; enum class SendingStatus { IDLE, SENDING }; enum class RawDataType { EVENT_GROUP_LIST, EVENT_GROUP }; // the order must not be changed for backward compatibility +class AtomicSendingStatusEnum { +public: + AtomicSendingStatusEnum(SendingStatus initialValue) : value(initialValue) {} + + // Set new value + void Set(SendingStatus newValue) { + value.store(newValue); + } + + // Get current value + SendingStatus Get() const { + return value.load(); + } + +private: + std::atomic value; +}; + + struct SenderQueueItem { std::string mData; size_t mRawSize = 0; @@ -40,7 +59,7 @@ struct SenderQueueItem { Flusher* mFlusher = nullptr; QueueKey mQueueKey; - SendingStatus mStatus = SendingStatus::IDLE; + AtomicSendingStatusEnum mStatus; std::chrono::system_clock::time_point mEnqueTime; time_t mLastSendTime = 0; uint32_t mTryCnt = 1; @@ -56,7 +75,21 @@ struct SenderQueueItem { mType(type), mBufferOrNot(bufferOrNot), mFlusher(flusher), - mQueueKey(key) {} + mQueueKey(key), + mStatus(SendingStatus::IDLE){} + + SenderQueueItem(const SenderQueueItem& item) + : mData(item.mData), + mRawSize(item.mRawSize), + mType(item.mType), + mBufferOrNot(item.mBufferOrNot), + mFlusher(item.mFlusher), + mQueueKey(item.mQueueKey), + mStatus(item.mStatus.Get()) { + mEnqueTime = item.mEnqueTime; + mLastSendTime = item.mLastSendTime; + mTryCnt = item.mTryCnt; + } virtual ~SenderQueueItem() = default; virtual SenderQueueItem* Clone() { return new SenderQueueItem(*this); } diff --git a/core/pipeline/queue/SenderQueueManager.cpp b/core/pipeline/queue/SenderQueueManager.cpp index ec132e943d..fb9d646760 100644 --- a/core/pipeline/queue/SenderQueueManager.cpp +++ b/core/pipeline/queue/SenderQueueManager.cpp @@ -31,7 +31,7 @@ SenderQueueManager::SenderQueueManager() : mQueueParam(INT32_FLAG(sender_queue_c bool SenderQueueManager::CreateQueue(QueueKey key, const string& flusherId, const PipelineContext& ctx, - vector>&& concurrencyLimiters, + std::unordered_map>&& concurrencyLimitersMap, uint32_t maxRate) { lock_guard lock(mQueueMux); auto iter = mQueues.find(key); @@ -45,7 +45,7 @@ bool SenderQueueManager::CreateQueue(QueueKey key, ctx); iter = mQueues.find(key); } - iter->second.SetConcurrencyLimiters(std::move(concurrencyLimiters)); + iter->second.SetConcurrencyLimiters(std::move(concurrencyLimitersMap)); iter->second.SetRateLimiter(maxRate); return true; } @@ -106,14 +106,33 @@ int SenderQueueManager::PushQueue(QueueKey key, unique_ptr&& it return 0; } -void SenderQueueManager::GetAllAvailableItems(vector& items, bool withLimits) { +void SenderQueueManager::GetAvailableItems(vector& items, int32_t itemsCntLimit) { { lock_guard lock(mQueueMux); - for (auto iter = mQueues.begin(); iter != mQueues.end(); ++iter) { - iter->second.GetAllAvailableItems(items, withLimits); + if (mQueues.empty()) { + return; + } + if (itemsCntLimit == -1) { + for (auto iter = mQueues.begin(); iter != mQueues.end(); ++iter) { + iter->second.GetAvailableItems(items, -1); + } + } else { + int cntLimitPerQueue = std::max((int)(mQueueParam.GetCapacity() * 0.3), (int)(itemsCntLimit/mQueues.size())); + // must check index before moving iterator + mSenderQueueBeginIndex = mSenderQueueBeginIndex % mQueues.size(); + // here we set sender queue begin index, let the sender order be different each time + auto beginIter = mQueues.begin(); + std::advance(beginIter, mSenderQueueBeginIndex++); + + for (auto iter = beginIter; iter != mQueues.end(); ++iter) { + iter->second.GetAvailableItems(items, cntLimitPerQueue); + } + for (auto iter = mQueues.begin(); iter != beginIter; ++iter) { + iter->second.GetAvailableItems(items, cntLimitPerQueue); + } } } - ExactlyOnceQueueManager::GetInstance()->GetAllAvailableSenderQueueItems(items, withLimits); + ExactlyOnceQueueManager::GetInstance()->GetAvailableSenderQueueItems(items, itemsCntLimit); } bool SenderQueueManager::RemoveItem(QueueKey key, SenderQueueItem* item) { @@ -127,6 +146,14 @@ bool SenderQueueManager::RemoveItem(QueueKey key, SenderQueueItem* item) { return ExactlyOnceQueueManager::GetInstance()->RemoveSenderQueueItem(key, item); } +void SenderQueueManager::DecreaseConcurrencyLimiterInSendingCnt(QueueKey key) { + lock_guard lock(mQueueMux); + auto iter = mQueues.find(key); + if (iter != mQueues.end()) { + iter->second.DecreaseSendingCnt(); + } +} + bool SenderQueueManager::IsAllQueueEmpty() const { { lock_guard lock(mQueueMux); @@ -196,6 +223,16 @@ void SenderQueueManager::Trigger() { mCond.notify_one(); } +void SenderQueueManager::SetPipelineForItems(QueueKey key, const std::shared_ptr& p) { + lock_guard lock(mQueueMux); + auto iter = mQueues.find(key); + if (iter != mQueues.end()) { + iter->second.SetPipelineForItems(p); + } else { + ExactlyOnceQueueManager::GetInstance()->SetPipelineForSenderItems(key, p); + } +} + #ifdef APSARA_UNIT_TEST_MAIN void SenderQueueManager::Clear() { lock_guard lock(mQueueMux); diff --git a/core/pipeline/queue/SenderQueueManager.h b/core/pipeline/queue/SenderQueueManager.h index 0755a0b946..27b4a0c358 100644 --- a/core/pipeline/queue/SenderQueueManager.h +++ b/core/pipeline/queue/SenderQueueManager.h @@ -49,18 +49,21 @@ class SenderQueueManager : public FeedbackInterface { bool CreateQueue(QueueKey key, const std::string& flusherId, const PipelineContext& ctx, - std::vector>&& concurrencyLimiters - = std::vector>(), + std::unordered_map>&& concurrencyLimitersMap + = std::unordered_map>(), uint32_t maxRate = 0); SenderQueue* GetQueue(QueueKey key); bool DeleteQueue(QueueKey key); bool ReuseQueue(QueueKey key); // 0: success, 1: queue is full, 2: queue not found int PushQueue(QueueKey key, std::unique_ptr&& item); - void GetAllAvailableItems(std::vector& items, bool withLimits = true); + void GetAvailableItems(std::vector& items, int32_t itemsCntLimit); bool RemoveItem(QueueKey key, SenderQueueItem* item); + void DecreaseConcurrencyLimiterInSendingCnt(QueueKey key); bool IsAllQueueEmpty() const; void ClearUnusedQueues(); + void NotifyPipelineStop(QueueKey key, const std::string& configName); + void SetPipelineForItems(QueueKey key, const std::shared_ptr& p); bool Wait(uint64_t ms); void Trigger(); @@ -90,6 +93,7 @@ class SenderQueueManager : public FeedbackInterface { mutable std::mutex mStateMux; mutable std::condition_variable mCond; bool mValidToPop = false; + size_t mSenderQueueBeginIndex = 0; #ifdef APSARA_UNIT_TEST_MAIN friend class SenderQueueManagerUnittest; diff --git a/core/pipeline/route/Router.cpp b/core/pipeline/route/Router.cpp index 73de16bcda..78ff86810e 100644 --- a/core/pipeline/route/Router.cpp +++ b/core/pipeline/route/Router.cpp @@ -15,7 +15,7 @@ #include "pipeline/route/Router.h" #include "common/ParamExtractor.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/Pipeline.h" #include "pipeline/plugin/interface/Flusher.h" @@ -36,16 +36,16 @@ bool Router::Init(std::vector> configs, const P } WriteMetrics::GetInstance()->PrepareMetricsRecordRef(mMetricsRecordRef, - {{METRIC_LABEL_PROJECT, ctx.GetProjectName()}, - {METRIC_LABEL_CONFIG_NAME, ctx.GetConfigName()}, - {METRIC_LABEL_KEY_COMPONENT_NAME, "router"}}); - mInEventsCnt = mMetricsRecordRef.CreateCounter(METRIC_IN_EVENTS_CNT); - mInGroupDataSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_IN_EVENT_GROUP_SIZE_BYTES); + {{METRIC_LABEL_KEY_PROJECT, ctx.GetProjectName()}, + {METRIC_LABEL_KEY_PIPELINE_NAME, ctx.GetConfigName()}, + {METRIC_LABEL_KEY_COMPONENT_NAME, METRIC_LABEL_VALUE_COMPONENT_NAME_ROUTER}}); + mInEventsTotal = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_IN_EVENTS_TOTAL); + mInGroupDataSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_IN_SIZE_BYTES); return true; } vector Router::Route(const PipelineEventGroup& g) const { - mInEventsCnt->Add(g.GetEvents().size()); + mInEventsTotal->Add(g.GetEvents().size()); mInGroupDataSizeBytes->Add(g.DataSize()); vector res(mAlwaysMatchedFlusherIdx); diff --git a/core/pipeline/route/Router.h b/core/pipeline/route/Router.h index b036256551..4385ff4db5 100644 --- a/core/pipeline/route/Router.h +++ b/core/pipeline/route/Router.h @@ -39,7 +39,7 @@ class Router { std::vector mAlwaysMatchedFlusherIdx; mutable MetricsRecordRef mMetricsRecordRef; - CounterPtr mInEventsCnt; + CounterPtr mInEventsTotal; CounterPtr mInGroupDataSizeBytes; #ifdef APSARA_UNIT_TEST_MAIN diff --git a/core/pipeline/serializer/SLSSerializer.cpp b/core/pipeline/serializer/SLSSerializer.cpp index f24030c100..c17d7e4d01 100644 --- a/core/pipeline/serializer/SLSSerializer.cpp +++ b/core/pipeline/serializer/SLSSerializer.cpp @@ -43,19 +43,19 @@ bool Serializer>::DoSerialize(vectorAdd(1); + mInItemsTotal->Add(1); mInItemSizeBytes->Add(inputSize); auto before = std::chrono::system_clock::now(); auto res = Serialize(std::move(p), output, errorMsg); - mTotalDelayMs->Add( + mTotalProcessMs->Add( std::chrono::duration_cast(std::chrono::system_clock::now() - before).count()); if (res) { - mOutItemsCnt->Add(1); + mOutItemsTotal->Add(1); mOutItemSizeBytes->Add(output.size()); } else { - mDiscardedItemsCnt->Add(1); + mDiscardedItemsTotal->Add(1); mDiscardedItemSizeBytes->Add(inputSize); } return res; diff --git a/core/pipeline/serializer/Serializer.h b/core/pipeline/serializer/Serializer.h index b632124b7a..cd81fad597 100644 --- a/core/pipeline/serializer/Serializer.h +++ b/core/pipeline/serializer/Serializer.h @@ -20,7 +20,7 @@ #include #include "models/PipelineEventPtr.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/batch/BatchedEvents.h" #include "pipeline/plugin/interface/Flusher.h" @@ -49,35 +49,35 @@ class Serializer { Serializer(Flusher* f) : mFlusher(f) { WriteMetrics::GetInstance()->PrepareMetricsRecordRef( mMetricsRecordRef, - {{METRIC_LABEL_PROJECT, f->GetContext().GetProjectName()}, - {METRIC_LABEL_CONFIG_NAME, f->GetContext().GetConfigName()}, - {METRIC_LABEL_KEY_COMPONENT_NAME, "serializer"}, - {METRIC_LABEL_KEY_FLUSHER_NODE_ID, f->GetNodeID()}}); - mInItemsCnt = mMetricsRecordRef.CreateCounter(METRIC_IN_ITEMS_CNT); - mInItemSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_IN_ITEM_SIZE_BYTES); - mOutItemsCnt = mMetricsRecordRef.CreateCounter(METRIC_OUT_ITEMS_CNT); - mOutItemSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_OUT_ITEM_SIZE_BYTES); - mDiscardedItemsCnt = mMetricsRecordRef.CreateCounter("discarded_items_cnt"); - mDiscardedItemSizeBytes = mMetricsRecordRef.CreateCounter("discarded_item_data_size_bytes"); - mTotalDelayMs = mMetricsRecordRef.CreateCounter(METRIC_TOTAL_DELAY_MS); + {{METRIC_LABEL_KEY_PROJECT, f->GetContext().GetProjectName()}, + {METRIC_LABEL_KEY_PIPELINE_NAME, f->GetContext().GetConfigName()}, + {METRIC_LABEL_KEY_COMPONENT_NAME, METRIC_LABEL_VALUE_COMPONENT_NAME_SERIALIZER}, + {METRIC_LABEL_KEY_FLUSHER_PLUGIN_ID, f->GetPluginID()}}); + mInItemsTotal = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_IN_ITEMS_TOTAL); + mInItemSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_IN_SIZE_BYTES); + mOutItemsTotal = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_OUT_ITEMS_TOTAL); + mOutItemSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_OUT_SIZE_BYTES); + mTotalProcessMs = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_TOTAL_PROCESS_TIME_MS); + mDiscardedItemsTotal = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_DISCARDED_ITEMS_TOTAL); + mDiscardedItemSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_COMPONENT_DISCARDED_ITEMS_SIZE_BYTES); } virtual ~Serializer() = default; bool DoSerialize(T&& p, std::string& output, std::string& errorMsg) { auto inputSize = GetInputSize(p); - mInItemsCnt->Add(1); + mInItemsTotal->Add(1); mInItemSizeBytes->Add(inputSize); auto before = std::chrono::system_clock::now(); auto res = Serialize(std::move(p), output, errorMsg); - mTotalDelayMs->Add( + mTotalProcessMs->Add( std::chrono::duration_cast(std::chrono::system_clock::now() - before).count()); if (res) { - mOutItemsCnt->Add(1); + mOutItemsTotal->Add(1); mOutItemSizeBytes->Add(output.size()); } else { - mDiscardedItemsCnt->Add(1); + mDiscardedItemsTotal->Add(1); mDiscardedItemSizeBytes->Add(inputSize); } return res; @@ -88,13 +88,13 @@ class Serializer { const Flusher* mFlusher = nullptr; mutable MetricsRecordRef mMetricsRecordRef; - CounterPtr mInItemsCnt; + CounterPtr mInItemsTotal; CounterPtr mInItemSizeBytes; - CounterPtr mOutItemsCnt; + CounterPtr mOutItemsTotal; CounterPtr mOutItemSizeBytes; - CounterPtr mDiscardedItemsCnt; + CounterPtr mDiscardedItemsTotal; CounterPtr mDiscardedItemSizeBytes; - CounterPtr mTotalDelayMs; + CounterPtr mTotalProcessMs; private: virtual bool Serialize(T&& p, std::string& res, std::string& errorMsg) = 0; diff --git a/core/plugin/flusher/blackhole/FlusherBlackHole.cpp b/core/plugin/flusher/blackhole/FlusherBlackHole.cpp index 2f3a6510b5..779bca0527 100644 --- a/core/plugin/flusher/blackhole/FlusherBlackHole.cpp +++ b/core/plugin/flusher/blackhole/FlusherBlackHole.cpp @@ -25,7 +25,7 @@ const string FlusherBlackHole::sName = "flusher_blackhole"; bool FlusherBlackHole::Init(const Json::Value& config, Json::Value& optionalGoPipeline) { static uint32_t cnt = 0; GenerateQueueKey(to_string(++cnt)); - SenderQueueManager::GetInstance()->CreateQueue(mQueueKey, mNodeID, *mContext); + SenderQueueManager::GetInstance()->CreateQueue(mQueueKey, mPluginID, *mContext); return true; } diff --git a/core/plugin/flusher/sls/DiskBufferWriter.cpp b/core/plugin/flusher/sls/DiskBufferWriter.cpp index 94471e1e54..3d7c86a338 100644 --- a/core/plugin/flusher/sls/DiskBufferWriter.cpp +++ b/core/plugin/flusher/sls/DiskBufferWriter.cpp @@ -46,7 +46,6 @@ using namespace std; namespace logtail { -const string DiskBufferWriter::BUFFER_FILE_NAME_PREFIX = "logtail_buffer_file_"; const int32_t DiskBufferWriter::BUFFER_META_BASE_SIZE = 65536; void DiskBufferWriter::Init() { @@ -207,7 +206,7 @@ void DiskBufferWriter::BufferSenderThread() { void DiskBufferWriter::SetBufferFilePath(const std::string& bufferfilepath) { lock_guard lock(mBufferFileLock); if (bufferfilepath == "") { - mBufferFilePath = GetProcessExecutionDir(); + mBufferFilePath = GetAgentDataDir(); } else mBufferFilePath = bufferfilepath; @@ -234,7 +233,7 @@ std::string DiskBufferWriter::GetBufferFileName() { bool DiskBufferWriter::LoadFileToSend(time_t timeLine, std::vector& filesToSend) { string bufferFilePath = GetBufferFilePath(); if (!CheckExistance(bufferFilePath)) { - if (GetProcessExecutionDir().find(bufferFilePath) != 0) { + if (GetAgentDataDir().find(bufferFilePath) != 0) { LOG_WARNING(sLogger, ("buffer file path not exist", bufferFilePath)("logtail will not recreate external path", "local secondary does not work")); @@ -262,9 +261,9 @@ bool DiskBufferWriter::LoadFileToSend(time_t timeLine, std::vector& fsutil::Entry ent; while ((ent = dir.ReadNext())) { string filename = ent.Name(); - if (filename.find(BUFFER_FILE_NAME_PREFIX) == 0) { + if (filename.find(GetSendBufferFileNamePrefix()) == 0) { try { - int32_t filetime = StringTo(filename.substr(BUFFER_FILE_NAME_PREFIX.size())); + int32_t filetime = StringTo(filename.substr(GetSendBufferFileNamePrefix().size())); if (filetime < timeLine) filesToSend.push_back(filename); } catch (...) { @@ -388,6 +387,9 @@ bool DiskBufferWriter::ReadNextEncryption(int32_t& pos, if (!bufferMeta.has_compresstype()) { bufferMeta.set_compresstype(sls_logs::SlsCompressType::SLS_CMP_LZ4); } + if (!bufferMeta.has_telemetrytype()) { + bufferMeta.set_telemetrytype(sls_logs::SLS_TELEMETRY_TYPE_LOGS); + } buffer = new char[meta.mEncryptionSize + 1]; nbytes = fread(buffer, sizeof(char), meta.mEncryptionSize, fin); @@ -469,6 +471,7 @@ void DiskBufferWriter::SendEncryptionBuffer(const std::string& filename, int32_t bufferMeta.set_datatype(int(RawDataType::EVENT_GROUP)); bufferMeta.set_rawsize(meta.mLogDataSize); bufferMeta.set_compresstype(sls_logs::SLS_CMP_LZ4); + bufferMeta.set_telemetrytype(sls_logs::SLS_TELEMETRY_TYPE_LOGS); } } if (!sendResult) { @@ -539,7 +542,7 @@ bool DiskBufferWriter::CreateNewFile() { } } mBufferDivideTime = currentTime; - SetBufferFileName(GetBufferFilePath() + BUFFER_FILE_NAME_PREFIX + ToString(currentTime)); + SetBufferFileName(GetBufferFilePath() + GetSendBufferFileNamePrefix() + ToString(currentTime)); return true; } @@ -650,6 +653,7 @@ bool DiskBufferWriter::SendToBufferFile(SenderQueueItem* dataPtr) { bufferMeta.set_rawsize(data->mRawSize); bufferMeta.set_shardhashkey(data->mShardHashKey); bufferMeta.set_compresstype(ConvertCompressType(flusher->GetCompressType())); + bufferMeta.set_telemetrytype(flusher->mTelemetryType); string encodedInfo; bufferMeta.SerializeToString(&encodedInfo); @@ -727,7 +731,14 @@ SendResult DiskBufferWriter::SendToNetSync(sdk::Client* sendClient, ++retryTimes; try { if (bufferMeta.datatype() == int(RawDataType::EVENT_GROUP)) { - if (bufferMeta.has_shardhashkey() && !bufferMeta.shardhashkey().empty()) + if (bufferMeta.has_telemetrytype() + && bufferMeta.telemetrytype() == sls_logs::SLS_TELEMETRY_TYPE_METRICS) { + sendClient->PostMetricStoreLogs(bufferMeta.project(), + bufferMeta.logstore(), + bufferMeta.compresstype(), + logData, + bufferMeta.rawsize()); + } else if (bufferMeta.has_shardhashkey() && !bufferMeta.shardhashkey().empty()) sendClient->PostLogStoreLogs(bufferMeta.project(), bufferMeta.logstore(), bufferMeta.compresstype(), diff --git a/core/plugin/flusher/sls/DiskBufferWriter.h b/core/plugin/flusher/sls/DiskBufferWriter.h index d3c1a391fc..14d3d9cfe5 100644 --- a/core/plugin/flusher/sls/DiskBufferWriter.h +++ b/core/plugin/flusher/sls/DiskBufferWriter.h @@ -47,7 +47,6 @@ class DiskBufferWriter { bool PushToDiskBuffer(SenderQueueItem* item, uint32_t retryTimes); private: - static const std::string BUFFER_FILE_NAME_PREFIX; static const int32_t BUFFER_META_BASE_SIZE; struct EncryptionStateMeta { diff --git a/core/plugin/flusher/sls/FlusherSLS.cpp b/core/plugin/flusher/sls/FlusherSLS.cpp index 1650d9fb79..25d1bf440e 100644 --- a/core/plugin/flusher/sls/FlusherSLS.cpp +++ b/core/plugin/flusher/sls/FlusherSLS.cpp @@ -14,6 +14,8 @@ #include "plugin/flusher/sls/FlusherSLS.h" +#include "sls_logs.pb.h" + #ifdef __ENTERPRISE__ #include "config/provider/EnterpriseConfigProvider.h" #endif @@ -56,6 +58,7 @@ DEFINE_FLAG_INT32(discard_send_fail_interval, "discard data when send fail after DEFINE_FLAG_INT32(profile_data_send_retrytimes, "how many times should retry if profile data send fail", 5); DEFINE_FLAG_INT32(unknow_error_try_max, "discard data when try times > this value", 5); DEFINE_FLAG_BOOL(global_network_success, "global network success flag, default false", false); +DEFINE_FLAG_BOOL(enable_metricstore_channel, "only works for metrics data for enhance metrics query performance", true); DECLARE_FLAG_BOOL(send_prefer_real_ip); @@ -110,17 +113,41 @@ void FlusherSLS::RecycleResourceIfNotUsed() { mutex FlusherSLS::sMux; unordered_map> FlusherSLS::sProjectConcurrencyLimiterMap; unordered_map> FlusherSLS::sRegionConcurrencyLimiterMap; +unordered_map> FlusherSLS::sLogstoreConcurrencyLimiterMap; + + +shared_ptr GetConcurrencyLimiter() { + return make_shared(AppConfig::GetInstance()->GetSendRequestConcurrency()); +} + +shared_ptr FlusherSLS::GetLogstoreConcurrencyLimiter(const std::string& project, const std::string& logstore) { + lock_guard lock(sMux); + std::string key = project + "-" + logstore; + + auto iter = sLogstoreConcurrencyLimiterMap.find(key); + if (iter == sLogstoreConcurrencyLimiterMap.end()) { + auto limiter = GetConcurrencyLimiter(); + sLogstoreConcurrencyLimiterMap.try_emplace(key, limiter); + return limiter; + } + if (iter->second.expired()) { + auto limiter = GetConcurrencyLimiter(); + iter->second = limiter; + return limiter; + } + return iter->second.lock(); +} shared_ptr FlusherSLS::GetProjectConcurrencyLimiter(const string& project) { lock_guard lock(sMux); auto iter = sProjectConcurrencyLimiterMap.find(project); if (iter == sProjectConcurrencyLimiterMap.end()) { - auto limiter = make_shared(); + auto limiter = GetConcurrencyLimiter(); sProjectConcurrencyLimiterMap.try_emplace(project, limiter); return limiter; } if (iter->second.expired()) { - auto limiter = make_shared(); + auto limiter = GetConcurrencyLimiter(); iter->second = limiter; return limiter; } @@ -131,12 +158,12 @@ shared_ptr FlusherSLS::GetRegionConcurrencyLimiter(const str lock_guard lock(sMux); auto iter = sRegionConcurrencyLimiterMap.find(region); if (iter == sRegionConcurrencyLimiterMap.end()) { - auto limiter = make_shared(); + auto limiter = GetConcurrencyLimiter(); sRegionConcurrencyLimiterMap.try_emplace(region, limiter); return limiter; } if (iter->second.expired()) { - auto limiter = make_shared(); + auto limiter = GetConcurrencyLimiter(); iter->second = limiter; return limiter; } @@ -159,6 +186,13 @@ void FlusherSLS::ClearInvalidConcurrencyLimiters() { ++iter; } } + for (auto iter = sLogstoreConcurrencyLimiterMap.begin(); iter != sLogstoreConcurrencyLimiterMap.end();) { + if (iter->second.expired()) { + iter = sLogstoreConcurrencyLimiterMap.erase(iter); + } else { + ++iter; + } + } } mutex FlusherSLS::sDefaultRegionLock; @@ -360,6 +394,7 @@ bool FlusherSLS::Init(const Json::Value& config, Json::Value& optionalGoPipeline // TelemetryType string telemetryType; + if (!GetOptionalStringParam(config, "TelemetryType", telemetryType, errorMsg)) { PARAM_WARNING_DEFAULT(mContext->GetLogger(), mContext->GetAlarm(), @@ -371,7 +406,8 @@ bool FlusherSLS::Init(const Json::Value& config, Json::Value& optionalGoPipeline mContext->GetLogstoreName(), mContext->GetRegion()); } else if (telemetryType == "metrics") { - mTelemetryType = TelemetryType::METRIC; + mTelemetryType = BOOL_FLAG(enable_metricstore_channel) ? sls_logs::SLS_TELEMETRY_TYPE_METRICS + : sls_logs::SLS_TELEMETRY_TYPE_LOGS; } else if (!telemetryType.empty() && telemetryType != "logs") { PARAM_WARNING_DEFAULT(mContext->GetLogger(), mContext->GetAlarm(), @@ -445,7 +481,7 @@ bool FlusherSLS::Init(const Json::Value& config, Json::Value& optionalGoPipeline // CompressType if (BOOL_FLAG(sls_client_send_compress)) { - mCompressor = CompressorFactory::GetInstance()->Create(config, *mContext, sName, mNodeID, CompressType::LZ4); + mCompressor = CompressorFactory::GetInstance()->Create(config, *mContext, sName, mPluginID, CompressType::LZ4); } mGroupSerializer = make_unique(this); @@ -470,10 +506,13 @@ bool FlusherSLS::Init(const Json::Value& config, Json::Value& optionalGoPipeline GenerateQueueKey(mProject + "#" + mLogstore); SenderQueueManager::GetInstance()->CreateQueue( mQueueKey, - mNodeID, + mPluginID, *mContext, - vector>{GetRegionConcurrencyLimiter(mRegion), - GetProjectConcurrencyLimiter(mProject)}, + { + {"region", GetRegionConcurrencyLimiter(mRegion)}, + {"project", GetProjectConcurrencyLimiter(mProject)}, + {"logstore", GetLogstoreConcurrencyLimiter(mProject, mLogstore)} + }, mMaxSendRate); } @@ -492,12 +531,24 @@ bool FlusherSLS::Init(const Json::Value& config, Json::Value& optionalGoPipeline GenerateGoPlugin(config, optionalGoPipeline); + mSendCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_OUT_EVENT_GROUPS_TOTAL); + mSendDoneCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_SEND_DONE_TOTAL); + mSuccessCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_SUCCESS_TOTAL); + mNetworkErrorCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_NETWORK_ERROR_TOTAL); + mServerErrorCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_SERVER_ERROR_TOTAL); + mShardWriteQuotaErrorCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_SLS_SHARD_WRITE_QUOTA_ERROR_TOTAL); + mProjectQuotaErrorCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_SLS_PROJECT_QUOTA_ERROR_TOTAL); + mUnauthErrorCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_UNAUTH_ERROR_TOTAL); + mParamsErrorCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_PARAMS_ERROR_TOTAL); + mSequenceIDErrorCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_SLS_SEQUENCE_ID_ERROR_TOTAL); + mRequestExpiredErrorCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_SLS_REQUEST_EXPRIRED_ERROR_TOTAL); + mOtherErrorCnt = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_FLUSHER_OTHER_ERROR_TOTAL); + return true; } bool FlusherSLS::Start() { Flusher::Start(); - InitResource(); IncreaseProjectReferenceCnt(mProject); @@ -551,6 +602,9 @@ unique_ptr FlusherSLS::BuildRequest(SenderQueueItem* item) cons lastResetEndpointTime = curTime; } } + if (mSendCnt) { + mSendCnt->Add(1); + } if (BOOL_FLAG(send_prefer_real_ip)) { if (curTime - sendClient->GetSlsRealIpUpdateTime() >= INT32_FLAG(send_check_real_ip_interval)) { SLSClientManager::GetInstance()->UpdateSendClientRealIp(sendClient, mRegion); @@ -559,20 +613,29 @@ unique_ptr FlusherSLS::BuildRequest(SenderQueueItem* item) cons } if (data->mType == RawDataType::EVENT_GROUP) { - if (data->mShardHashKey.empty()) { - return sendClient->CreatePostLogStoreLogsRequest( + if (mTelemetryType == sls_logs::SLS_TELEMETRY_TYPE_METRICS) { + return sendClient->CreatePostMetricStoreLogsRequest( mProject, data->mLogstore, ConvertCompressType(GetCompressType()), data->mData, data->mRawSize, item); } else { - auto& exactlyOnceCpt = data->mExactlyOnceCheckpoint; - int64_t hashKeySeqID = exactlyOnceCpt ? exactlyOnceCpt->data.sequence_id() : sdk::kInvalidHashKeySeqID; - return sendClient->CreatePostLogStoreLogsRequest(mProject, - data->mLogstore, - ConvertCompressType(GetCompressType()), - data->mData, - data->mRawSize, - item, - data->mShardHashKey, - hashKeySeqID); + if (data->mShardHashKey.empty()) { + return sendClient->CreatePostLogStoreLogsRequest(mProject, + data->mLogstore, + ConvertCompressType(GetCompressType()), + data->mData, + data->mRawSize, + item); + } else { + auto& exactlyOnceCpt = data->mExactlyOnceCheckpoint; + int64_t hashKeySeqID = exactlyOnceCpt ? exactlyOnceCpt->data.sequence_id() : sdk::kInvalidHashKeySeqID; + return sendClient->CreatePostLogStoreLogsRequest(mProject, + data->mLogstore, + ConvertCompressType(GetCompressType()), + data->mData, + data->mRawSize, + item, + data->mShardHashKey, + hashKeySeqID); + } } } else { if (data->mShardHashKey.empty()) @@ -589,6 +652,9 @@ unique_ptr FlusherSLS::BuildRequest(SenderQueueItem* item) cons } void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) { + if (mSendDoneCnt) { + mSendDoneCnt->Add(1); + } SLSResponse slsResponse; if (AppConfig::GetInstance()->IsResponseVerificationEnabled() && !IsSLSResponse(response)) { slsResponse.mStatusCode = 0; @@ -628,7 +694,13 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) + "ms")("try cnt", data->mTryCnt)("endpoint", data->mCurrentEndpoint)("is profile data", isProfileData)); GetRegionConcurrencyLimiter(mRegion)->OnSuccess(); + GetProjectConcurrencyLimiter(mProject)->OnSuccess(); + GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnSuccess(); + SenderQueueManager::GetInstance()->DecreaseConcurrencyLimiterInSendingCnt(item->mQueueKey); DealSenderQueueItemAfterSend(item, false); + if (mSuccessCnt) { + mSuccessCnt->Add(1); + } } else { OperationOnFail operation; SendResult sendResult = ConvertErrorCode(slsResponse.mErrorCode); @@ -637,8 +709,14 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) if (sendResult == SEND_NETWORK_ERROR || sendResult == SEND_SERVER_ERROR) { if (sendResult == SEND_NETWORK_ERROR) { failDetail << "network error"; + if (mNetworkErrorCnt) { + mNetworkErrorCnt->Add(1); + } } else { failDetail << "server error"; + if (mServerErrorCnt) { + mServerErrorCnt->Add(1); + } } suggestion << "check network connection to endpoint"; if (BOOL_FLAG(send_prefer_real_ip) && data->mRealIpFlag) { @@ -658,15 +736,30 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) } } operation = data->mBufferOrNot ? OperationOnFail::RETRY_LATER : OperationOnFail::DISCARD; + GetRegionConcurrencyLimiter(mRegion)->OnFail(); + GetProjectConcurrencyLimiter(mProject)->OnSuccess(); + GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnSuccess(); } else if (sendResult == SEND_QUOTA_EXCEED) { BOOL_FLAG(global_network_success) = true; if (slsResponse.mErrorCode == sdk::LOGE_SHARD_WRITE_QUOTA_EXCEED) { failDetail << "shard write quota exceed"; suggestion << "Split logstore shards. https://help.aliyun.com/zh/sls/user-guide/expansion-of-resources"; + GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnFail(); + GetRegionConcurrencyLimiter(mRegion)->OnSuccess(); + GetProjectConcurrencyLimiter(mProject)->OnSuccess(); + if (mShardWriteQuotaErrorCnt) { + mShardWriteQuotaErrorCnt->Add(1); + } } else { failDetail << "project write quota exceed"; suggestion << "Submit quota modification request. " "https://help.aliyun.com/zh/sls/user-guide/expansion-of-resources"; + GetProjectConcurrencyLimiter(mProject)->OnFail(); + GetRegionConcurrencyLimiter(mRegion)->OnSuccess(); + GetLogstoreConcurrencyLimiter(mProject, mLogstore)->OnSuccess(); + if (mProjectQuotaErrorCnt) { + mProjectQuotaErrorCnt->Add(1); + } } LogtailAlarm::GetInstance()->SendAlarm(SEND_QUOTA_EXCEED_ALARM, "error_code: " + slsResponse.mErrorCode @@ -700,12 +793,21 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) } #endif } + if (mUnauthErrorCnt) { + mUnauthErrorCnt->Add(1); + } } else if (sendResult == SEND_PARAMETER_INVALID) { failDetail << "invalid paramters"; suggestion << "check input parameters"; operation = DefaultOperation(item->mTryCnt); + if (mParamsErrorCnt) { + mParamsErrorCnt->Add(1); + } } else if (sendResult == SEND_INVALID_SEQUENCE_ID) { failDetail << "invalid exactly-once sequence id"; + if (mSequenceIDErrorCnt) { + mSequenceIDErrorCnt->Add(1); + } do { auto& cpt = data->mExactlyOnceCheckpoint; if (!cpt) { @@ -726,8 +828,8 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) // the possibility of hash key conflict is very low, so data is // dropped here. cpt->Commit(); - failDetail << ", drop exactly once log group and commit checkpoint" << " checkpointKey:" << cpt->key - << " checkpoint:" << cpt->data.DebugString(); + failDetail << ", drop exactly once log group and commit checkpoint" + << " checkpointKey:" << cpt->key << " checkpoint:" << cpt->data.DebugString(); suggestion << "no suggestion"; LogtailAlarm::GetInstance()->SendAlarm( EXACTLY_ONCE_ALARM, @@ -744,6 +846,9 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) failDetail << "write request expired, will retry"; suggestion << "check local system time"; operation = OperationOnFail::RETRY_IMMEDIATELY; + if (mRequestExpiredErrorCnt) { + mRequestExpiredErrorCnt->Add(1); + } } else { failDetail << "other error"; suggestion << "no suggestion"; @@ -752,6 +857,9 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) // then we record error and retry latter // when retry times > unknow_error_try_max, we will drop this data operation = DefaultOperation(item->mTryCnt); + if (mOtherErrorCnt) { + mOtherErrorCnt->Add(1); + } } if (chrono::duration_cast(curSystemTime - item->mEnqueTime).count() > INT32_FLAG(discard_send_fail_interval)) { @@ -782,6 +890,7 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) LOG_WARNING(sLogger, LOG_PATTERN); data->mLastLogWarningTime = curTime; } + SenderQueueManager::GetInstance()->DecreaseConcurrencyLimiterInSendingCnt(item->mQueueKey); DealSenderQueueItemAfterSend(item, true); break; case OperationOnFail::DISCARD: @@ -799,6 +908,7 @@ void FlusherSLS::OnSendDone(const HttpResponse& response, SenderQueueItem* item) data->mLogstore, mRegion); } + SenderQueueManager::GetInstance()->DecreaseConcurrencyLimiterInSendingCnt(item->mQueueKey); DealSenderQueueItemAfterSend(item, false); break; } @@ -830,7 +940,7 @@ bool FlusherSLS::Send(string&& data, const string& shardHashKey, const string& l key = QueueKeyManager::GetInstance()->GetKey(mProject + "-" + mLogstore); if (SenderQueueManager::GetInstance()->GetQueue(key) == nullptr) { PipelineContext ctx; - SenderQueueManager::GetInstance()->CreateQueue(key, "", ctx, vector>()); + SenderQueueManager::GetInstance()->CreateQueue(key, "", ctx, std::unordered_map>()); } } return Flusher::PushToQueue(make_unique(std::move(compressedData), @@ -1009,18 +1119,6 @@ bool FlusherSLS::SerializeAndPush(vector&& groupLists) { } bool FlusherSLS::PushToQueue(QueueKey key, unique_ptr&& item, uint32_t retryTimes) { -#ifndef APSARA_UNIT_TEST_MAIN - // TODO: temporarily set here, should be removed after independent config update refactor - if (item->mFlusher->HasContext()) { - item->mPipeline - = PipelineManager::GetInstance()->FindConfigByName(item->mFlusher->GetContext().GetConfigName()); - if (!item->mPipeline) { - // should not happen - return false; - } - } -#endif - const string& str = QueueKeyManager::GetInstance()->GetName(key); for (size_t i = 0; i < retryTimes; ++i) { int rst = SenderQueueManager::GetInstance()->PushQueue(key, std::move(item)); diff --git a/core/plugin/flusher/sls/FlusherSLS.h b/core/plugin/flusher/sls/FlusherSLS.h index 7e3d4658a1..cd1c227614 100644 --- a/core/plugin/flusher/sls/FlusherSLS.h +++ b/core/plugin/flusher/sls/FlusherSLS.h @@ -31,13 +31,14 @@ #include "pipeline/limiter/ConcurrencyLimiter.h" #include "pipeline/plugin/interface/HttpFlusher.h" #include "pipeline/serializer/SLSSerializer.h" +#include "protobuf/sls/sls_logs.pb.h" namespace logtail { class FlusherSLS : public HttpFlusher { public: - enum class TelemetryType { LOG, METRIC }; + static std::shared_ptr GetLogstoreConcurrencyLimiter(const std::string& project, const std::string& logstore); static std::shared_ptr GetProjectConcurrencyLimiter(const std::string& project); static std::shared_ptr GetRegionConcurrencyLimiter(const std::string& region); static void ClearInvalidConcurrencyLimiters(); @@ -77,7 +78,7 @@ class FlusherSLS : public HttpFlusher { std::string mRegion; std::string mEndpoint; std::string mAliuid; - TelemetryType mTelemetryType = TelemetryType::LOG; + sls_logs::SlsTelemetryType mTelemetryType = sls_logs::SlsTelemetryType::SLS_TELEMETRY_TYPE_LOGS; std::vector mShardHashKeys; uint32_t mMaxSendRate = 0; // preserved only for exactly once uint32_t mFlowControlExpireTime = 0; @@ -98,6 +99,7 @@ class FlusherSLS : public HttpFlusher { static std::mutex sMux; static std::unordered_map> sProjectConcurrencyLimiterMap; static std::unordered_map> sRegionConcurrencyLimiterMap; + static std::unordered_map> sLogstoreConcurrencyLimiterMap; static std::mutex sDefaultRegionLock; static std::string sDefaultRegion; @@ -125,6 +127,19 @@ class FlusherSLS : public HttpFlusher { std::unique_ptr mGroupSerializer; std::unique_ptr>> mGroupListSerializer; + CounterPtr mSendCnt; + CounterPtr mSendDoneCnt; + CounterPtr mSuccessCnt; + CounterPtr mNetworkErrorCnt; + CounterPtr mServerErrorCnt; + CounterPtr mShardWriteQuotaErrorCnt; + CounterPtr mProjectQuotaErrorCnt; + CounterPtr mUnauthErrorCnt; + CounterPtr mParamsErrorCnt; + CounterPtr mSequenceIDErrorCnt; + CounterPtr mRequestExpiredErrorCnt; + CounterPtr mOtherErrorCnt; + #ifdef APSARA_UNIT_TEST_MAIN friend class FlusherSLSUnittest; #endif diff --git a/core/plugin/flusher/sls/SLSClientManager.cpp b/core/plugin/flusher/sls/SLSClientManager.cpp index f729bbeae4..5f04a51947 100644 --- a/core/plugin/flusher/sls/SLSClientManager.cpp +++ b/core/plugin/flusher/sls/SLSClientManager.cpp @@ -20,10 +20,10 @@ #include "common/LogtailCommonFlags.h" #include "common/StringTools.h" #include "common/TimeUtil.h" -#include "plugin/flusher/sls/FlusherSLS.h" -#include "plugin/flusher/sls/SendResult.h" #include "logger/Logger.h" #include "monitor/LogFileProfiler.h" +#include "plugin/flusher/sls/FlusherSLS.h" +#include "plugin/flusher/sls/SendResult.h" #include "sdk/Exception.h" #include "sls_control/SLSControl.h" diff --git a/core/plugin/input/InputContainerStdio.cpp b/core/plugin/input/InputContainerStdio.cpp index a4147c82f1..46a018e067 100644 --- a/core/plugin/input/InputContainerStdio.cpp +++ b/core/plugin/input/InputContainerStdio.cpp @@ -19,7 +19,7 @@ #include "common/LogtailCommonFlags.h" #include "common/ParamExtractor.h" #include "file_server/FileServer.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/Pipeline.h" #include "pipeline/plugin/PluginRegistry.h" #include "plugin/processor/inner/ProcessorMergeMultilineLogNative.h" @@ -161,16 +161,17 @@ bool InputContainerStdio::Init(const Json::Value& config, Json::Value& optionalG // init PluginMetricManager static const std::unordered_map inputFileMetricKeys = { - {METRIC_INPUT_RECORDS_SIZE_BYTES, MetricType::METRIC_TYPE_COUNTER}, - {METRIC_INPUT_READ_TOTAL, MetricType::METRIC_TYPE_COUNTER}, - {METRIC_INPUT_FILE_SIZE_BYTES, MetricType::METRIC_TYPE_INT_GAUGE}, - {METRIC_INPUT_FILE_OFFSET_BYTES, MetricType::METRIC_TYPE_INT_GAUGE}, + {METRIC_PLUGIN_OUT_EVENTS_TOTAL, MetricType::METRIC_TYPE_COUNTER}, + {METRIC_PLUGIN_OUT_EVENT_GROUPS_TOTAL, MetricType::METRIC_TYPE_COUNTER}, + {METRIC_PLUGIN_OUT_SIZE_BYTES, MetricType::METRIC_TYPE_COUNTER}, + {METRIC_PLUGIN_SOURCE_SIZE_BYTES, MetricType::METRIC_TYPE_INT_GAUGE}, + {METRIC_PLUGIN_SOURCE_READ_OFFSET_BYTES, MetricType::METRIC_TYPE_INT_GAUGE}, }; mPluginMetricManager = std::make_shared(GetMetricsRecordRef()->GetLabels(), inputFileMetricKeys); // Register a Gauge metric to record PluginMetricManager‘s map size - mInputFileMonitorTotal = GetMetricsRecordRef().CreateIntGauge(METRIC_INPUT_FILE_MONITOR_TOTAL); - mPluginMetricManager->RegisterSizeGauge(mInputFileMonitorTotal); + mMonitorFileTotal = GetMetricsRecordRef().CreateIntGauge(METRIC_PLUGIN_MONITOR_FILE_TOTAL); + mPluginMetricManager->RegisterSizeGauge(mMonitorFileTotal); return CreateInnerProcessors(); } diff --git a/core/plugin/input/InputContainerStdio.h b/core/plugin/input/InputContainerStdio.h index 0ee1ea11fd..a9d1e51aed 100644 --- a/core/plugin/input/InputContainerStdio.h +++ b/core/plugin/input/InputContainerStdio.h @@ -53,7 +53,7 @@ class InputContainerStdio : public Input { FileDiscoveryOptions mFileDiscovery; PluginMetricManagerPtr mPluginMetricManager; - IntGaugePtr mInputFileMonitorTotal; + IntGaugePtr mMonitorFileTotal; bool CreateInnerProcessors(); diff --git a/core/plugin/input/InputFile.cpp b/core/plugin/input/InputFile.cpp index d21c1ce483..c5b0039a19 100644 --- a/core/plugin/input/InputFile.cpp +++ b/core/plugin/input/InputFile.cpp @@ -23,7 +23,7 @@ #include "common/ParamExtractor.h" #include "file_server/ConfigManager.h" #include "file_server/FileServer.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineManager.h" #include "pipeline/plugin/PluginRegistry.h" @@ -157,19 +157,17 @@ bool InputFile::Init(const Json::Value& config, Json::Value& optionalGoPipeline) mContext->SetExactlyOnceFlag(true); } - mInputFileMonitorTotal = GetMetricsRecordRef().CreateIntGauge(METRIC_INPUT_FILE_MONITOR_TOTAL); + mMonitorFileTotal = GetMetricsRecordRef().CreateIntGauge(METRIC_PLUGIN_MONITOR_FILE_TOTAL); static const std::unordered_map inputFileMetricKeys = { - // {METRIC_INPUT_RECORDS_TOTAL, MetricType::METRIC_TYPE_COUNTER}, - {METRIC_INPUT_RECORDS_SIZE_BYTES, MetricType::METRIC_TYPE_COUNTER}, - // {METRIC_INPUT_BATCH_TOTAL, MetricType::METRIC_TYPE_COUNTER}, - {METRIC_INPUT_READ_TOTAL, MetricType::METRIC_TYPE_COUNTER}, - {METRIC_INPUT_FILE_SIZE_BYTES, MetricType::METRIC_TYPE_INT_GAUGE}, - // {METRIC_INPUT_FILE_READ_DELAY_TIME_MS, MetricType::METRIC_TYPE_INT_GAUGE}, - {METRIC_INPUT_FILE_OFFSET_BYTES, MetricType::METRIC_TYPE_INT_GAUGE}, + {METRIC_PLUGIN_OUT_EVENTS_TOTAL, MetricType::METRIC_TYPE_COUNTER}, + {METRIC_PLUGIN_OUT_EVENT_GROUPS_TOTAL, MetricType::METRIC_TYPE_COUNTER}, + {METRIC_PLUGIN_OUT_SIZE_BYTES, MetricType::METRIC_TYPE_COUNTER}, + {METRIC_PLUGIN_SOURCE_SIZE_BYTES, MetricType::METRIC_TYPE_INT_GAUGE}, + {METRIC_PLUGIN_SOURCE_READ_OFFSET_BYTES, MetricType::METRIC_TYPE_INT_GAUGE}, }; mPluginMetricManager = std::make_shared(GetMetricsRecordRef()->GetLabels(), inputFileMetricKeys); - mPluginMetricManager->RegisterSizeGauge(mInputFileMonitorTotal); + mPluginMetricManager->RegisterSizeGauge(mMonitorFileTotal); return CreateInnerProcessors(); } diff --git a/core/plugin/input/InputFile.h b/core/plugin/input/InputFile.h index 5410776de2..ee8275ef7c 100644 --- a/core/plugin/input/InputFile.h +++ b/core/plugin/input/InputFile.h @@ -48,7 +48,7 @@ class InputFile : public Input { FileReaderOptions mFileReader; MultilineOptions mMultiline; PluginMetricManagerPtr mPluginMetricManager; - IntGaugePtr mInputFileMonitorTotal; + IntGaugePtr mMonitorFileTotal; // others uint32_t mMaxCheckpointDirSearchDepth = 0; uint32_t mExactlyOnceConcurrency = 0; diff --git a/core/plugin/input/InputPrometheus.cpp b/core/plugin/input/InputPrometheus.cpp index 5257ccc2a3..a1d2af138c 100644 --- a/core/plugin/input/InputPrometheus.cpp +++ b/core/plugin/input/InputPrometheus.cpp @@ -68,8 +68,9 @@ bool InputPrometheus::Start() { PrometheusInputRunner::GetInstance()->Init(); mTargetSubscirber->mQueueKey = mContext->GetProcessQueueKey(); + auto defaultLabels = GetMetricsRecordRef()->GetLabels(); - PrometheusInputRunner::GetInstance()->UpdateScrapeInput(std::move(mTargetSubscirber)); + PrometheusInputRunner::GetInstance()->UpdateScrapeInput(std::move(mTargetSubscirber), *defaultLabels, mContext->GetProjectName()); return true; } diff --git a/core/plugin/input/InputPrometheus.h b/core/plugin/input/InputPrometheus.h index e0ef5a8b10..9ae51128b6 100644 --- a/core/plugin/input/InputPrometheus.h +++ b/core/plugin/input/InputPrometheus.h @@ -17,7 +17,7 @@ class InputPrometheus : public Input { bool Init(const Json::Value& config, Json::Value& optionalGoPipeline) override; bool Start() override; bool Stop(bool isPipelineRemoving) override; - bool SupportAck() const override { return false; } + bool SupportAck() const override { return true; } private: bool CreateInnerProcessors(const Json::Value& inputConfig); diff --git a/core/plugin/input/input.cmake b/core/plugin/input/input.cmake index eb213b46b5..b986267eca 100644 --- a/core/plugin/input/input.cmake +++ b/core/plugin/input/input.cmake @@ -23,17 +23,10 @@ list(APPEND THIS_SOURCE_FILES_LIST ${THIS_SOURCE_FILES}) if(MSVC) # remove observer related files in input list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/plugin/input/InputObserverNetwork.cpp ${CMAKE_SOURCE_DIR}/plugin/input/InputObserverNetwork.h) - if (ENABLE_ENTERPRISE) - list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/plugin/input/InputStream.cpp ${CMAKE_SOURCE_DIR}/plugin/input/InputStream.h) - endif () elseif(UNIX) if (NOT LINUX) # remove observer related files in input list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/plugin/input/InputObserverNetwork.cpp ${CMAKE_SOURCE_DIR}/plugin/input/InputObserverNetwork.h) - # remove inputStream in input - if (ENABLE_ENTERPRISE) - list(REMOVE_ITEM THIS_SOURCE_FILES_LIST ${CMAKE_SOURCE_DIR}/plugin/input/InputStream.cpp ${CMAKE_SOURCE_DIR}/plugin/input/InputStream.h) - endif () endif() endif() diff --git a/core/plugin/processor/ProcessorDesensitizeNative.cpp b/core/plugin/processor/ProcessorDesensitizeNative.cpp index 172bb2d025..39ba99c9aa 100644 --- a/core/plugin/processor/ProcessorDesensitizeNative.cpp +++ b/core/plugin/processor/ProcessorDesensitizeNative.cpp @@ -18,7 +18,7 @@ #include "common/Constants.h" #include "common/ParamExtractor.h" #include "models/LogEvent.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/plugin/instance/ProcessorInstance.h" #include "sdk/Common.h" @@ -136,7 +136,10 @@ bool ProcessorDesensitizeNative::Init(const Json::Value& config) { mContext->GetRegion()); } - mProcDesensitizeRecodesTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_DESENSITIZE_RECORDS_TOTAL); + mDiscardedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_DISCARDED_EVENTS_TOTAL); + mOutFailedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_FAILED_EVENTS_TOTAL); + mOutKeyNotFoundEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_KEY_NOT_FOUND_EVENTS_TOTAL); + mOutSuccessfulEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_SUCCESSFUL_EVENTS_TOTAL); return true; } @@ -156,16 +159,21 @@ void ProcessorDesensitizeNative::Process(PipelineEventGroup& logGroup) { void ProcessorDesensitizeNative::ProcessEvent(PipelineEventPtr& e) { if (!IsSupportedEvent(e)) { + mOutFailedEventsTotal->Add(1); return; } auto& sourceEvent = e.Cast(); + bool hasKey = false; + bool processed = false; // Traverse all fields and desensitize sensitive fields. for (auto& item : sourceEvent) { // Only perform desensitization processing on specified fields. if (item.first != mSourceKey) { continue; + } else { + hasKey = true; } // Only perform desensitization processing on non-empty fields. if (item.second.empty()) { @@ -173,9 +181,18 @@ void ProcessorDesensitizeNative::ProcessEvent(PipelineEventPtr& e) { } std::string value = item.second.to_string(); CastOneSensitiveWord(&value); - mProcDesensitizeRecodesTotal->Add(1); StringBuffer valueBuffer = sourceEvent.GetSourceBuffer()->CopyString(value); sourceEvent.SetContentNoCopy(item.first, StringView(valueBuffer.data, valueBuffer.size)); + processed = true; + } + if (processed) { + mOutSuccessfulEventsTotal->Add(1); + } else { + if (hasKey) { + mOutKeyNotFoundEventsTotal->Add(1); + } else { + mOutFailedEventsTotal->Add(1); + } } } diff --git a/core/plugin/processor/ProcessorDesensitizeNative.h b/core/plugin/processor/ProcessorDesensitizeNative.h index 2352b40d5a..b720fd7569 100644 --- a/core/plugin/processor/ProcessorDesensitizeNative.h +++ b/core/plugin/processor/ProcessorDesensitizeNative.h @@ -56,7 +56,10 @@ class ProcessorDesensitizeNative : public Processor { std::shared_ptr mRegex; - CounterPtr mProcDesensitizeRecodesTotal; + CounterPtr mDiscardedEventsTotal; + CounterPtr mOutFailedEventsTotal; + CounterPtr mOutKeyNotFoundEventsTotal; + CounterPtr mOutSuccessfulEventsTotal; #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorParseApsaraNativeUnittest; diff --git a/core/plugin/processor/ProcessorFilterNative.cpp b/core/plugin/processor/ProcessorFilterNative.cpp index a1929e78e3..0b14998fe5 100644 --- a/core/plugin/processor/ProcessorFilterNative.cpp +++ b/core/plugin/processor/ProcessorFilterNative.cpp @@ -21,7 +21,7 @@ #include "common/ParamExtractor.h" #include "logger/Logger.h" #include "models/LogEvent.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" namespace logtail { @@ -181,9 +181,6 @@ bool ProcessorFilterNative::Init(const Json::Value& config) { mContext->GetRegion()); } - mProcFilterErrorTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_FILTER_ERROR_TOTAL); - mProcFilterRecordsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_FILTER_RECORDS_TOTAL); - return true; } @@ -201,8 +198,6 @@ void ProcessorFilterNative::Process(PipelineEventGroup& logGroup) { events[wIdx] = std::move(events[rIdx]); } ++wIdx; - } else { - mProcFilterRecordsTotal->Add(1); } } events.resize(wIdx); @@ -265,7 +260,6 @@ bool ProcessorFilterNative::FilterExpressionRoot(LogEvent& sourceEvent, const Ba try { return node->Match(sourceEvent, GetContext()); } catch (...) { - mProcFilterErrorTotal->Add(1); LOG_ERROR(GetContext().GetLogger(), ("filter error ", "")); return false; } @@ -284,7 +278,6 @@ bool ProcessorFilterNative::FilterFilterRule(LogEvent& sourceEvent, const LogFil try { return IsMatched(sourceEvent, *filterRule); } catch (...) { - mProcFilterErrorTotal->Add(1); LOG_ERROR(GetContext().GetLogger(), ("filter error ", "")); return false; } diff --git a/core/plugin/processor/ProcessorFilterNative.h b/core/plugin/processor/ProcessorFilterNative.h index c5969d427b..216bf902af 100644 --- a/core/plugin/processor/ProcessorFilterNative.h +++ b/core/plugin/processor/ProcessorFilterNative.h @@ -141,9 +141,6 @@ class ProcessorFilterNative : public Processor { std::shared_ptr mFilterRule; - CounterPtr mProcFilterErrorTotal; - CounterPtr mProcFilterRecordsTotal; - #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorFilterNativeUnittest; #endif diff --git a/core/plugin/processor/ProcessorParseApsaraNative.cpp b/core/plugin/processor/ProcessorParseApsaraNative.cpp index 402d698704..43d3854266 100644 --- a/core/plugin/processor/ProcessorParseApsaraNative.cpp +++ b/core/plugin/processor/ProcessorParseApsaraNative.cpp @@ -21,7 +21,7 @@ #include "common/ParamExtractor.h" #include "common/TimeUtil.h" #include "models/LogEvent.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/plugin/instance/ProcessorInstance.h" namespace logtail { @@ -78,11 +78,11 @@ bool ProcessorParseApsaraNative::Init(const Json::Value& config) { mParseFailures = &(GetContext().GetProcessProfile().parseFailures); mHistoryFailures = &(GetContext().GetProcessProfile().historyFailures); - mProcParseInSizeBytes = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_IN_SIZE_BYTES); - mProcParseOutSizeBytes = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_OUT_SIZE_BYTES); - mProcDiscardRecordsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_DISCARD_RECORDS_TOTAL); - mProcParseErrorTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_ERROR_TOTAL); - mProcHistoryFailureTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_HISTORY_FAILURE_TOTAL); + mDiscardedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_DISCARDED_EVENTS_TOTAL); + mOutFailedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_FAILED_EVENTS_TOTAL); + mOutKeyNotFoundEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_KEY_NOT_FOUND_EVENTS_TOTAL); + mOutSuccessfulEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_SUCCESSFUL_EVENTS_TOTAL); + mHistoryFailureTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_HISTORY_FAILURE_TOTAL); return true; } @@ -122,18 +122,20 @@ bool ProcessorParseApsaraNative::ProcessEvent(const StringView& logPath, LogtailTime& cachedLogTime, StringView& timeStrCache) { if (!IsSupportedEvent(e)) { + mOutFailedEventsTotal->Add(1); return true; } LogEvent& sourceEvent = e.Cast(); if (!sourceEvent.HasContent(mSourceKey)) { + mOutKeyNotFoundEventsTotal->Add(1); return true; } bool sourceKeyOverwritten = false; StringView buffer = sourceEvent.GetContent(mSourceKey); if (buffer.size() == 0) { + mOutFailedEventsTotal->Add(1); return true; } - mProcParseInSizeBytes->Add(buffer.size()); int64_t logTime_in_micro = 0; time_t logTime = ApsaraEasyReadLogTimeParser(buffer, timeStrCache, cachedLogTime, logTime_in_micro); if (logTime <= 0) // this case will handle empty apsara log line @@ -156,7 +158,7 @@ bool ProcessorParseApsaraNative::ProcessEvent(const StringView& logPath, GetContext().GetProjectName(), GetContext().GetLogstoreName(), GetContext().GetRegion()); - mProcParseErrorTotal->Add(1); + mOutFailedEventsTotal->Add(1); ++(*mParseFailures); sourceEvent.DelContent(mSourceKey); if (mCommonParserOptions.ShouldAddSourceContent(false)) { @@ -166,7 +168,7 @@ bool ProcessorParseApsaraNative::ProcessEvent(const StringView& logPath, AddLog(mCommonParserOptions.legacyUnmatchedRawLogKey, buffer, sourceEvent, false); } if (mCommonParserOptions.ShouldEraseEvent(false, sourceEvent)) { - mProcDiscardRecordsTotal->Add(1); + mDiscardedEventsTotal->Add(1); return false; } return true; @@ -194,8 +196,8 @@ bool ProcessorParseApsaraNative::ProcessEvent(const StringView& logPath, GetContext().GetRegion()); } ++(*mHistoryFailures); - mProcHistoryFailureTotal->Add(1); - mProcDiscardRecordsTotal->Add(1); + mHistoryFailureTotal->Add(1); + mDiscardedEventsTotal->Add(1); return false; } @@ -237,6 +239,7 @@ bool ProcessorParseApsaraNative::ProcessEvent(const StringView& logPath, if (mCommonParserOptions.ShouldAddSourceContent(true)) { AddLog(mCommonParserOptions.mRenamedSourceKey, buffer, sourceEvent, false); } + mOutSuccessfulEventsTotal->Add(1); return true; } @@ -471,7 +474,6 @@ void ProcessorParseApsaraNative::AddLog(const StringView& key, } targetEvent.AppendContentNoCopy(key, value); *mLogGroupSize += key.size() + value.size() + 5; - mProcParseOutSizeBytes->Add(key.size() + value.size()); } bool ProcessorParseApsaraNative::IsSupportedEvent(const PipelineEventPtr& e) const { diff --git a/core/plugin/processor/ProcessorParseApsaraNative.h b/core/plugin/processor/ProcessorParseApsaraNative.h index b081df6b78..4bd77ee8e1 100644 --- a/core/plugin/processor/ProcessorParseApsaraNative.h +++ b/core/plugin/processor/ProcessorParseApsaraNative.h @@ -54,11 +54,12 @@ class ProcessorParseApsaraNative : public Processor { int* mLogGroupSize = nullptr; int* mParseFailures = nullptr; int* mHistoryFailures = nullptr; - CounterPtr mProcParseInSizeBytes; - CounterPtr mProcParseOutSizeBytes; - CounterPtr mProcDiscardRecordsTotal; - CounterPtr mProcParseErrorTotal; - CounterPtr mProcHistoryFailureTotal; + + CounterPtr mDiscardedEventsTotal; + CounterPtr mOutFailedEventsTotal; + CounterPtr mOutKeyNotFoundEventsTotal; + CounterPtr mOutSuccessfulEventsTotal; + CounterPtr mHistoryFailureTotal; #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorParseApsaraNativeUnittest; diff --git a/core/plugin/processor/ProcessorParseDelimiterNative.cpp b/core/plugin/processor/ProcessorParseDelimiterNative.cpp index 2e73690fa8..4f9b44fdf8 100644 --- a/core/plugin/processor/ProcessorParseDelimiterNative.cpp +++ b/core/plugin/processor/ProcessorParseDelimiterNative.cpp @@ -18,7 +18,7 @@ #include "common/ParamExtractor.h" #include "models/LogEvent.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/plugin/instance/ProcessorInstance.h" namespace logtail { @@ -178,10 +178,10 @@ bool ProcessorParseDelimiterNative::Init(const Json::Value& config) { mParseFailures = &(GetContext().GetProcessProfile().parseFailures); mLogGroupSize = &(GetContext().GetProcessProfile().logGroupSize); - mProcParseInSizeBytes = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_IN_SIZE_BYTES); - mProcParseOutSizeBytes = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_OUT_SIZE_BYTES); - mProcDiscardRecordsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_DISCARD_RECORDS_TOTAL); - mProcParseErrorTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_ERROR_TOTAL); + mDiscardedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_DISCARDED_EVENTS_TOTAL); + mOutFailedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_FAILED_EVENTS_TOTAL); + mOutKeyNotFoundEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_KEY_NOT_FOUND_EVENTS_TOTAL); + mOutSuccessfulEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_SUCCESSFUL_EVENTS_TOTAL); return true; } @@ -208,17 +208,21 @@ void ProcessorParseDelimiterNative::Process(PipelineEventGroup& logGroup) { bool ProcessorParseDelimiterNative::ProcessEvent(const StringView& logPath, PipelineEventPtr& e) { if (!IsSupportedEvent(e)) { + mOutFailedEventsTotal->Add(1); return true; } LogEvent& sourceEvent = e.Cast(); if (!sourceEvent.HasContent(mSourceKey)) { + mOutKeyNotFoundEventsTotal->Add(1); return true; } StringView buffer = sourceEvent.GetContent(mSourceKey); - mProcParseInSizeBytes->Add(buffer.size()); + int32_t endIdx = buffer.size(); - if (endIdx == 0) + if (endIdx == 0) { + mOutFailedEventsTotal->Add(1); return true; + } for (int32_t i = endIdx - 1; i >= 0; --i) { if (buffer.data()[i] == ' ' || '\r' == buffer.data()[i]) @@ -233,8 +237,11 @@ bool ProcessorParseDelimiterNative::ProcessEvent(const StringView& logPath, Pipe else break; } - if (begIdx >= endIdx) + if (begIdx >= endIdx) { + mOutFailedEventsTotal->Add(1); return true; + } + size_t reserveSize = mOverflowedFieldsTreatment == OverflowedFieldsTreatment::EXTEND ? (mKeys.size() + 10) : (mKeys.size() + 1); std::vector columnValues; @@ -290,7 +297,6 @@ bool ProcessorParseDelimiterNative::ProcessEvent(const StringView& logPath, Pipe GetContext().GetProjectName(), GetContext().GetLogstoreName(), GetContext().GetRegion()); - mProcParseErrorTotal->Add(1); ++(*mParseFailures); parseSuccess = false; } @@ -301,7 +307,6 @@ bool ProcessorParseDelimiterNative::ProcessEvent(const StringView& logPath, Pipe GetContext().GetProjectName(), GetContext().GetLogstoreName(), GetContext().GetRegion()); - mProcParseErrorTotal->Add(1); ++(*mParseFailures); parseSuccess = false; } @@ -314,7 +319,6 @@ bool ProcessorParseDelimiterNative::ProcessEvent(const StringView& logPath, Pipe LOG_WARNING(sLogger, ("parse delimiter log fail", "no column keys defined")("project", GetContext().GetProjectName())( "logstore", GetContext().GetLogstoreName())("file", logPath)); - mProcParseErrorTotal->Add(1); ++(*mParseFailures); parseSuccess = false; } @@ -339,7 +343,11 @@ bool ProcessorParseDelimiterNative::ProcessEvent(const StringView& logPath, Pipe sourceEvent); } } + mOutSuccessfulEventsTotal->Add(1); + } else { + mOutFailedEventsTotal->Add(1); } + if (!parseSuccess || !mSourceKeyOverwritten) { sourceEvent.DelContent(mSourceKey); } @@ -350,7 +358,7 @@ bool ProcessorParseDelimiterNative::ProcessEvent(const StringView& logPath, Pipe AddLog(mCommonParserOptions.legacyUnmatchedRawLogKey, buffer, sourceEvent, false); } if (mCommonParserOptions.ShouldEraseEvent(parseSuccess, sourceEvent)) { - mProcDiscardRecordsTotal->Add(1); + mDiscardedEventsTotal->Add(1); return false; } return true; @@ -410,7 +418,6 @@ void ProcessorParseDelimiterNative::AddLog(const StringView& key, } targetEvent.SetContentNoCopy(key, value); *mLogGroupSize += key.size() + value.size() + 5; - mProcParseOutSizeBytes->Add(key.size() + value.size()); } bool ProcessorParseDelimiterNative::IsSupportedEvent(const PipelineEventPtr& e) const { diff --git a/core/plugin/processor/ProcessorParseDelimiterNative.h b/core/plugin/processor/ProcessorParseDelimiterNative.h index 6815ef7561..cebed7a582 100644 --- a/core/plugin/processor/ProcessorParseDelimiterNative.h +++ b/core/plugin/processor/ProcessorParseDelimiterNative.h @@ -76,10 +76,11 @@ class ProcessorParseDelimiterNative : public Processor { int* mLogGroupSize = nullptr; int* mParseFailures = nullptr; - CounterPtr mProcParseInSizeBytes; - CounterPtr mProcParseOutSizeBytes; - CounterPtr mProcDiscardRecordsTotal; - CounterPtr mProcParseErrorTotal; + + CounterPtr mDiscardedEventsTotal; + CounterPtr mOutFailedEventsTotal; + CounterPtr mOutKeyNotFoundEventsTotal; + CounterPtr mOutSuccessfulEventsTotal; #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorParseDelimiterNativeUnittest; diff --git a/core/plugin/processor/ProcessorParseJsonNative.cpp b/core/plugin/processor/ProcessorParseJsonNative.cpp index ec83a88696..90b3d075c5 100644 --- a/core/plugin/processor/ProcessorParseJsonNative.cpp +++ b/core/plugin/processor/ProcessorParseJsonNative.cpp @@ -21,7 +21,7 @@ #include "common/ParamExtractor.h" #include "models/LogEvent.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/plugin/instance/ProcessorInstance.h" namespace logtail { @@ -50,10 +50,10 @@ bool ProcessorParseJsonNative::Init(const Json::Value& config) { mParseFailures = &(GetContext().GetProcessProfile().parseFailures); mLogGroupSize = &(GetContext().GetProcessProfile().logGroupSize); - mProcParseInSizeBytes = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_IN_SIZE_BYTES); - mProcParseOutSizeBytes = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_OUT_SIZE_BYTES); - mProcDiscardRecordsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_DISCARD_RECORDS_TOTAL); - mProcParseErrorTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_ERROR_TOTAL); + mDiscardedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_DISCARDED_EVENTS_TOTAL); + mOutFailedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_FAILED_EVENTS_TOTAL); + mOutKeyNotFoundEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_KEY_NOT_FOUND_EVENTS_TOTAL); + mOutSuccessfulEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_SUCCESSFUL_EVENTS_TOTAL); return true; } @@ -80,10 +80,12 @@ void ProcessorParseJsonNative::Process(PipelineEventGroup& logGroup) { bool ProcessorParseJsonNative::ProcessEvent(const StringView& logPath, PipelineEventPtr& e) { if (!IsSupportedEvent(e)) { + mOutFailedEventsTotal->Add(1); return true; } auto& sourceEvent = e.Cast(); if (!sourceEvent.HasContent(mSourceKey)) { + mOutKeyNotFoundEventsTotal->Add(1); return true; } @@ -102,9 +104,10 @@ bool ProcessorParseJsonNative::ProcessEvent(const StringView& logPath, PipelineE AddLog(mCommonParserOptions.legacyUnmatchedRawLogKey, rawContent, sourceEvent, false); } if (mCommonParserOptions.ShouldEraseEvent(parseSuccess, sourceEvent)) { - mProcDiscardRecordsTotal->Add(1); + mDiscardedEventsTotal->Add(1); return false; } + mOutSuccessfulEventsTotal->Add(1); return true; } @@ -117,8 +120,6 @@ bool ProcessorParseJsonNative::JsonLogLineParser(LogEvent& sourceEvent, if (buffer.empty()) return false; - mProcParseInSizeBytes->Add(buffer.size()); - bool parseSuccess = true; rapidjson::Document doc; doc.Parse(buffer.data(), buffer.size()); @@ -135,7 +136,7 @@ bool ProcessorParseJsonNative::JsonLogLineParser(LogEvent& sourceEvent, GetContext().GetRegion()); } ++(*mParseFailures); - mProcParseErrorTotal->Add(1); + mOutFailedEventsTotal->Add(1); parseSuccess = false; } else if (!doc.IsObject()) { if (LogtailAlarm::GetInstance()->IsLowLevelAlarmValid()) { @@ -149,7 +150,7 @@ bool ProcessorParseJsonNative::JsonLogLineParser(LogEvent& sourceEvent, GetContext().GetRegion()); } ++(*mParseFailures); - mProcParseErrorTotal->Add(1); + mOutFailedEventsTotal->Add(1); parseSuccess = false; } if (!parseSuccess) { @@ -209,7 +210,6 @@ void ProcessorParseJsonNative::AddLog(const StringView& key, } targetEvent.SetContentNoCopy(key, value); *mLogGroupSize += key.size() + value.size() + 5; - mProcParseOutSizeBytes->Add(key.size() + value.size()); } bool ProcessorParseJsonNative::IsSupportedEvent(const PipelineEventPtr& e) const { diff --git a/core/plugin/processor/ProcessorParseJsonNative.h b/core/plugin/processor/ProcessorParseJsonNative.h index b071f2775a..b9d1902696 100644 --- a/core/plugin/processor/ProcessorParseJsonNative.h +++ b/core/plugin/processor/ProcessorParseJsonNative.h @@ -47,10 +47,10 @@ class ProcessorParseJsonNative : public Processor { int* mParseFailures = nullptr; int* mLogGroupSize = nullptr; - CounterPtr mProcParseInSizeBytes; - CounterPtr mProcParseOutSizeBytes; - CounterPtr mProcDiscardRecordsTotal; - CounterPtr mProcParseErrorTotal; + CounterPtr mDiscardedEventsTotal; + CounterPtr mOutFailedEventsTotal; + CounterPtr mOutKeyNotFoundEventsTotal; + CounterPtr mOutSuccessfulEventsTotal; #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorParseJsonNativeUnittest; diff --git a/core/plugin/processor/ProcessorParseRegexNative.cpp b/core/plugin/processor/ProcessorParseRegexNative.cpp index e3c5790d99..f04d8ad0dd 100644 --- a/core/plugin/processor/ProcessorParseRegexNative.cpp +++ b/core/plugin/processor/ProcessorParseRegexNative.cpp @@ -18,7 +18,7 @@ #include "app_config/AppConfig.h" #include "common/ParamExtractor.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" namespace logtail { @@ -96,11 +96,10 @@ bool ProcessorParseRegexNative::Init(const Json::Value& config) { mRegexMatchFailures = &(GetContext().GetProcessProfile().regexMatchFailures); mLogGroupSize = &(GetContext().GetProcessProfile().logGroupSize); - mProcParseInSizeBytes = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_IN_SIZE_BYTES); - mProcParseOutSizeBytes = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_OUT_SIZE_BYTES); - mProcDiscardRecordsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_DISCARD_RECORDS_TOTAL); - mProcParseErrorTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_ERROR_TOTAL); - mProcKeyCountNotMatchErrorTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_KEY_COUNT_NOT_MATCH_ERROR_TOTAL); + mDiscardedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_DISCARDED_EVENTS_TOTAL); + mOutFailedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_FAILED_EVENTS_TOTAL); + mOutKeyNotFoundEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_KEY_NOT_FOUND_EVENTS_TOTAL); + mOutSuccessfulEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_SUCCESSFUL_EVENTS_TOTAL); return true; } @@ -131,10 +130,12 @@ bool ProcessorParseRegexNative::IsSupportedEvent(const PipelineEventPtr& e) cons bool ProcessorParseRegexNative::ProcessEvent(const StringView& logPath, PipelineEventPtr& e) { if (!IsSupportedEvent(e)) { + mOutFailedEventsTotal->Add(1); return true; } LogEvent& sourceEvent = e.Cast(); if (!sourceEvent.HasContent(mSourceKey)) { + mOutKeyNotFoundEventsTotal->Add(1); return true; } auto rawContent = sourceEvent.GetContent(mSourceKey); @@ -156,16 +157,16 @@ bool ProcessorParseRegexNative::ProcessEvent(const StringView& logPath, Pipeline AddLog(mCommonParserOptions.legacyUnmatchedRawLogKey, rawContent, sourceEvent, false); } if (mCommonParserOptions.ShouldEraseEvent(parseSuccess, sourceEvent)) { - mProcDiscardRecordsTotal->Add(1); + mDiscardedEventsTotal->Add(1); return false; } + mOutSuccessfulEventsTotal->Add(1); return true; } bool ProcessorParseRegexNative::WholeLineModeParser(LogEvent& sourceEvent, const std::string& key) { StringView buffer = sourceEvent.GetContent(mSourceKey); AddLog(StringView(key), buffer, sourceEvent); - mProcParseInSizeBytes->Add(buffer.size()); return true; } @@ -178,7 +179,6 @@ void ProcessorParseRegexNative::AddLog(const StringView& key, } targetEvent.SetContentNoCopy(key, value); *mLogGroupSize += key.size() + value.size() + 5; - mProcParseOutSizeBytes->Add(key.size() + value.size()); } bool ProcessorParseRegexNative::RegexLogLineParser(LogEvent& sourceEvent, @@ -189,7 +189,6 @@ bool ProcessorParseRegexNative::RegexLogLineParser(LogEvent& sourceEvent, std::string exception; StringView buffer = sourceEvent.GetContent(mSourceKey); bool parseSuccess = true; - mProcParseInSizeBytes->Add(buffer.size()); if (!BoostRegexMatch(buffer.data(), buffer.size(), reg, exception, what, boost::match_default)) { if (!exception.empty()) { if (AppConfig::GetInstance()->IsLogParseAlarmValid()) { @@ -221,7 +220,7 @@ bool ProcessorParseRegexNative::RegexLogLineParser(LogEvent& sourceEvent, } ++(*mRegexMatchFailures); ++(*mParseFailures); - mProcParseErrorTotal->Add(1); + mOutFailedEventsTotal->Add(1); parseSuccess = false; } else if (what.size() <= keys.size()) { if (AppConfig::GetInstance()->IsLogParseAlarmValid()) { @@ -240,7 +239,6 @@ bool ProcessorParseRegexNative::RegexLogLineParser(LogEvent& sourceEvent, } ++(*mRegexMatchFailures); ++(*mParseFailures); - mProcKeyCountNotMatchErrorTotal->Add(1); parseSuccess = false; } if (!parseSuccess) { diff --git a/core/plugin/processor/ProcessorParseRegexNative.h b/core/plugin/processor/ProcessorParseRegexNative.h index fa50d27820..c2a8fc8d88 100644 --- a/core/plugin/processor/ProcessorParseRegexNative.h +++ b/core/plugin/processor/ProcessorParseRegexNative.h @@ -62,11 +62,11 @@ class ProcessorParseRegexNative : public Processor { int* mParseFailures = nullptr; int* mRegexMatchFailures = nullptr; int* mLogGroupSize = nullptr; - CounterPtr mProcParseInSizeBytes; - CounterPtr mProcParseOutSizeBytes; - CounterPtr mProcDiscardRecordsTotal; - CounterPtr mProcParseErrorTotal; - CounterPtr mProcKeyCountNotMatchErrorTotal; + + CounterPtr mDiscardedEventsTotal; + CounterPtr mOutFailedEventsTotal; + CounterPtr mOutKeyNotFoundEventsTotal; + CounterPtr mOutSuccessfulEventsTotal; #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorParseRegexNativeUnittest; diff --git a/core/plugin/processor/ProcessorParseTimestampNative.cpp b/core/plugin/processor/ProcessorParseTimestampNative.cpp index b37343f1db..88306a3b6b 100644 --- a/core/plugin/processor/ProcessorParseTimestampNative.cpp +++ b/core/plugin/processor/ProcessorParseTimestampNative.cpp @@ -19,7 +19,7 @@ #include "app_config/AppConfig.h" #include "common/LogtailCommonFlags.h" #include "common/ParamExtractor.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/plugin/instance/ProcessorInstance.h" namespace logtail { @@ -90,11 +90,11 @@ bool ProcessorParseTimestampNative::Init(const Json::Value& config) { mParseTimeFailures = &(GetContext().GetProcessProfile().parseTimeFailures); mHistoryFailures = &(GetContext().GetProcessProfile().historyFailures); - mProcParseInSizeBytes = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_IN_SIZE_BYTES); - mProcParseOutSizeBytes = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_OUT_SIZE_BYTES); - mProcDiscardRecordsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_DISCARD_RECORDS_TOTAL); - mProcParseErrorTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_ERROR_TOTAL); - mProcHistoryFailureTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_HISTORY_FAILURE_TOTAL); + mDiscardedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_DISCARDED_EVENTS_TOTAL); + mOutFailedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_FAILED_EVENTS_TOTAL); + mOutKeyNotFoundEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_KEY_NOT_FOUND_EVENTS_TOTAL); + mOutSuccessfulEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_SUCCESSFUL_EVENTS_TOTAL); + mHistoryFailureTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_HISTORY_FAILURE_TOTAL); return true; } @@ -130,17 +130,19 @@ bool ProcessorParseTimestampNative::ProcessEvent(StringView logPath, LogtailTime& logTime, StringView& timeStrCache) { if (!IsSupportedEvent(e)) { + mOutFailedEventsTotal->Add(1); return true; } LogEvent& sourceEvent = e.Cast(); if (!sourceEvent.HasContent(mSourceKey)) { + mOutKeyNotFoundEventsTotal->Add(1); return true; } const StringView& timeStr = sourceEvent.GetContent(mSourceKey); - mProcParseInSizeBytes->Add(timeStr.size()); uint64_t preciseTimestamp = 0; bool parseSuccess = ParseLogTime(timeStr, logPath, logTime, preciseTimestamp, timeStrCache); if (!parseSuccess) { + mOutFailedEventsTotal->Add(1); return true; } if (logTime.tv_sec <= 0 @@ -163,8 +165,8 @@ bool ProcessorParseTimestampNative::ProcessEvent(StringView logPath, GetContext().GetRegion()); } ++(*mHistoryFailures); - mProcHistoryFailureTotal->Add(1); - mProcDiscardRecordsTotal->Add(1); + mHistoryFailureTotal->Add(1); + mDiscardedEventsTotal->Add(1); return false; } sourceEvent.SetTimestamp(logTime.tv_sec, logTime.tv_nsec); @@ -173,7 +175,7 @@ bool ProcessorParseTimestampNative::ProcessEvent(StringView logPath, // sb.size = std::min(20, snprintf(sb.data, sb.capacity, "%lu", preciseTimestamp)); // sourceEvent.SetContentNoCopy(mLegacyPreciseTimestampConfig.key, StringView(sb.data, sb.size)); // } - mProcParseOutSizeBytes->Add(sizeof(logTime.tv_sec) + sizeof(logTime.tv_nsec)); + mOutSuccessfulEventsTotal->Add(1); return true; } @@ -219,8 +221,6 @@ bool ProcessorParseTimestampNative::ParseLogTime(const StringView& curTimeStr, / GetContext().GetLogstoreName(), GetContext().GetRegion()); } - - mProcParseErrorTotal->Add(1); ++(*mParseTimeFailures); return false; } diff --git a/core/plugin/processor/ProcessorParseTimestampNative.h b/core/plugin/processor/ProcessorParseTimestampNative.h index c66c3b5118..0f7c9ea6f0 100644 --- a/core/plugin/processor/ProcessorParseTimestampNative.h +++ b/core/plugin/processor/ProcessorParseTimestampNative.h @@ -55,11 +55,12 @@ class ProcessorParseTimestampNative : public Processor { int* mParseTimeFailures = nullptr; int* mHistoryFailures = nullptr; - CounterPtr mProcParseInSizeBytes; - CounterPtr mProcParseOutSizeBytes; - CounterPtr mProcDiscardRecordsTotal; - CounterPtr mProcParseErrorTotal; - CounterPtr mProcHistoryFailureTotal; + + CounterPtr mDiscardedEventsTotal; + CounterPtr mOutFailedEventsTotal; + CounterPtr mOutKeyNotFoundEventsTotal; + CounterPtr mOutSuccessfulEventsTotal; + CounterPtr mHistoryFailureTotal; #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorParseTimestampNativeUnittest; diff --git a/core/plugin/processor/ProcessorSPL.cpp b/core/plugin/processor/ProcessorSPL.cpp index d3864de236..c2d13caadc 100644 --- a/core/plugin/processor/ProcessorSPL.cpp +++ b/core/plugin/processor/ProcessorSPL.cpp @@ -28,7 +28,7 @@ #include "common/Flags.h" #include "common/ParamExtractor.h" #include "logger/Logger.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "spl/PipelineEventGroupInput.h" #include "spl/PipelineEventGroupOutput.h" #include "spl/SplConstants.h" diff --git a/core/plugin/processor/inner/ProcessorMergeMultilineLogNative.cpp b/core/plugin/processor/inner/ProcessorMergeMultilineLogNative.cpp index 077e266099..754202979d 100644 --- a/core/plugin/processor/inner/ProcessorMergeMultilineLogNative.cpp +++ b/core/plugin/processor/inner/ProcessorMergeMultilineLogNative.cpp @@ -23,7 +23,7 @@ #include "common/ParamExtractor.h" #include "logger/Logger.h" #include "models/LogEvent.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" namespace logtail { @@ -75,9 +75,9 @@ bool ProcessorMergeMultilineLogNative::Init(const Json::Value& config) { mSplitLines = &(GetContext().GetProcessProfile().splitLines); - mProcMergedEventsCnt = GetMetricsRecordRef().CreateCounter(METRIC_PROC_MERGE_MULTILINE_LOG_MERGED_RECORDS_TOTAL); - mProcUnmatchedEventsCnt - = GetMetricsRecordRef().CreateCounter(METRIC_PROC_MERGE_MULTILINE_LOG_UNMATCHED_RECORDS_TOTAL); + mMergedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_MERGED_EVENTS_TOTAL); + mUnmatchedEventsTotal + = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_UNMATCHED_EVENTS_TOTAL); return true; } @@ -235,7 +235,7 @@ void ProcessorMergeMultilineLogNative::MergeLogsByRegex(PipelineEventGroup& logG // case: continue + end // current line is matched against the end pattern rather than the continue pattern begin = cur; - mProcMergedEventsCnt->Add(1); + mMergedEventsTotal->Add(1); sourceEvents[newSize++] = std::move(sourceEvents[begin]); } else { HandleUnmatchLogs(sourceEvents, newSize, cur, cur, logPath); @@ -328,7 +328,7 @@ void ProcessorMergeMultilineLogNative::MergeEvents(std::vector& logEv if (logEvents.size() == 0) { return; } - mProcMergedEventsCnt->Add(logEvents.size()); + mMergedEventsTotal->Add(logEvents.size()); if (logEvents.size() == 1) { logEvents.clear(); return; @@ -353,7 +353,7 @@ void ProcessorMergeMultilineLogNative::MergeEvents(std::vector& logEv void ProcessorMergeMultilineLogNative::HandleUnmatchLogs( std::vector& logEvents, size_t& newSize, size_t begin, size_t end, StringView logPath) { - mProcUnmatchedEventsCnt->Add(end - begin + 1); + mUnmatchedEventsTotal->Add(end - begin + 1); if (mMultiline.mUnmatchedContentTreatment == MultilineOptions::UnmatchedContentTreatment::DISCARD && mMultiline.mIgnoringUnmatchWarning) { return; diff --git a/core/plugin/processor/inner/ProcessorMergeMultilineLogNative.h b/core/plugin/processor/inner/ProcessorMergeMultilineLogNative.h index 2f67ab233e..fe1334dde2 100644 --- a/core/plugin/processor/inner/ProcessorMergeMultilineLogNative.h +++ b/core/plugin/processor/inner/ProcessorMergeMultilineLogNative.h @@ -50,9 +50,9 @@ class ProcessorMergeMultilineLogNative : public Processor { void MergeEvents(std::vector& logEvents, bool insertLineBreak = true); - CounterPtr mProcMergedEventsCnt; // 成功合并了多少条日志 + CounterPtr mMergedEventsTotal; // 成功合并了多少条日志 // CounterPtr mProcMergedEventsBytes; // 成功合并了多少字节的日志 - CounterPtr mProcUnmatchedEventsCnt; // 未成功合并的日志条数 + CounterPtr mUnmatchedEventsTotal; // 未成功合并的日志条数 // CounterPtr mProcUnmatchedEventsBytes; // 未成功合并的日志字节数 int* mSplitLines = nullptr; diff --git a/core/plugin/processor/inner/ProcessorParseContainerLogNative.cpp b/core/plugin/processor/inner/ProcessorParseContainerLogNative.cpp index efd965791b..6170321736 100644 --- a/core/plugin/processor/inner/ProcessorParseContainerLogNative.cpp +++ b/core/plugin/processor/inner/ProcessorParseContainerLogNative.cpp @@ -26,7 +26,7 @@ #include "common/JsonUtil.h" #include "common/ParamExtractor.h" #include "models/LogEvent.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "plugin/processor/inner/ProcessorMergeMultilineLogNative.h" namespace logtail { @@ -115,11 +115,9 @@ bool ProcessorParseContainerLogNative::Init(const Json::Value& config) { mContext->GetRegion()); } - mProcParseInSizeBytes = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_IN_SIZE_BYTES); - mProcParseOutSizeBytes = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_OUT_SIZE_BYTES); - mProcParseErrorTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_ERROR_TOTAL); - mProcParseStdoutTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_STDOUT_TOTAL); - mProcParseStderrTotal = GetMetricsRecordRef().CreateCounter(METRIC_PROC_PARSE_STDERR_TOTAL); + mOutFailedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_OUT_FAILED_EVENTS_TOTAL); + mParseStdoutTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_PARSE_STDOUT_TOTAL); + mParseStderrTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_PARSE_STDERR_TOTAL); return true; } @@ -152,7 +150,6 @@ bool ProcessorParseContainerLogNative::ProcessEvent(StringView containerType, if (!sourceEvent.HasContent(mSourceKey)) { return true; } - mProcParseInSizeBytes->Add(mSourceKey.size() + sourceEvent.GetContent(mSourceKey).size()); std::string errorMsg; bool shouldKeepEvent = true; @@ -162,7 +159,7 @@ bool ProcessorParseContainerLogNative::ProcessEvent(StringView containerType, shouldKeepEvent = ParseDockerJsonLogLine(sourceEvent, errorMsg); } if (!errorMsg.empty()) { - mProcParseErrorTotal->Add(1); + mOutFailedEventsTotal->Add(1); } if (!mIgnoreParseWarning && !errorMsg.empty() && LogtailAlarm::GetInstance()->IsLowLevelAlarmValid()) { @@ -219,12 +216,12 @@ bool ProcessorParseContainerLogNative::ParseContainerdTextLogLine(LogEvent& sour } if (sourceValue == "stdout") { - mProcParseStdoutTotal->Add(1); + mParseStdoutTotal->Add(1); if (mIgnoringStdout) { return false; } } else { - mProcParseStderrTotal->Add(1); + mParseStderrTotal->Add(1); if (mIgnoringStderr) { return false; } @@ -492,12 +489,12 @@ bool ProcessorParseContainerLogNative::ParseDockerJsonLogLine(LogEvent& sourceEv } if (sourceValue == "stdout") { - mProcParseStdoutTotal->Add(1); + mParseStdoutTotal->Add(1); if (mIgnoringStdout) { return false; } } else { - mProcParseStderrTotal->Add(1); + mParseStderrTotal->Add(1); if (mIgnoringStderr) { return false; } @@ -513,18 +510,15 @@ bool ProcessorParseContainerLogNative::ParseDockerJsonLogLine(LogEvent& sourceEv // time sourceEvent.SetContent(containerTimeKey, timeValue); - mProcParseOutSizeBytes->Add(containerTimeKey.size() + timeValue.size()); // source sourceEvent.SetContent(containerSourceKey, sourceValue); - mProcParseOutSizeBytes->Add(containerSourceKey.size() + sourceValue.size()); // content if (!content.empty() && content.back() == '\n') { content = StringView(content.data(), content.size() - 1); } sourceEvent.SetContentNoCopy(containerLogKey, content); - mProcParseOutSizeBytes->Add(containerLogKey.size() + content.size()); return true; } @@ -532,15 +526,11 @@ bool ProcessorParseContainerLogNative::ParseDockerJsonLogLine(LogEvent& sourceEv void ProcessorParseContainerLogNative::ResetContainerdTextLog( StringView time, StringView source, StringView content, bool isPartialLog, LogEvent& sourceEvent) { sourceEvent.SetContentNoCopy(containerTimeKey, time); - mProcParseOutSizeBytes->Add(containerTimeKey.size() + time.size()); sourceEvent.SetContentNoCopy(containerSourceKey, source); - mProcParseOutSizeBytes->Add(containerSourceKey.size() + source.size()); if (isPartialLog) { sourceEvent.SetContentNoCopy(ProcessorMergeMultilineLogNative::PartLogFlag, StringView()); - mProcParseOutSizeBytes->Add(ProcessorMergeMultilineLogNative::PartLogFlag.size()); } sourceEvent.SetContentNoCopy(containerLogKey, content); - mProcParseOutSizeBytes->Add(containerLogKey.size() + content.size()); } bool ProcessorParseContainerLogNative::IsSupportedEvent(const PipelineEventPtr& e) const { diff --git a/core/plugin/processor/inner/ProcessorParseContainerLogNative.h b/core/plugin/processor/inner/ProcessorParseContainerLogNative.h index 8fdbcfc0fa..8cd55ef4ba 100644 --- a/core/plugin/processor/inner/ProcessorParseContainerLogNative.h +++ b/core/plugin/processor/inner/ProcessorParseContainerLogNative.h @@ -74,11 +74,9 @@ class ProcessorParseContainerLogNative : public Processor { bool ParseContainerdTextLogLine(LogEvent& sourceEvent, std::string& errorMsg, PipelineEventGroup& logGroup); bool ParseDockerJsonLogLine(LogEvent& sourceEvent, std::string& errorMsg); - CounterPtr mProcParseInSizeBytes; // 成功且保留的日志中,解析字段的INBYTES - CounterPtr mProcParseOutSizeBytes; // 成功且保留的日志中,解析出来字段的OUTBYTES - CounterPtr mProcParseErrorTotal; // 解析失败条数 - CounterPtr mProcParseStdoutTotal; - CounterPtr mProcParseStderrTotal; + CounterPtr mOutFailedEventsTotal; // 解析失败条数 + CounterPtr mParseStdoutTotal; + CounterPtr mParseStderrTotal; // CounterPtr mProcParseSuccessSizeBytes; // 成功bytes // CounterPtr mProcParseErrorSizeBytes; // 失败bytes diff --git a/core/plugin/processor/inner/ProcessorPromParseMetricNative.cpp b/core/plugin/processor/inner/ProcessorPromParseMetricNative.cpp index 768b0a9f05..b7da0d739a 100644 --- a/core/plugin/processor/inner/ProcessorPromParseMetricNative.cpp +++ b/core/plugin/processor/inner/ProcessorPromParseMetricNative.cpp @@ -15,7 +15,11 @@ namespace logtail { const string ProcessorPromParseMetricNative::sName = "processor_prom_parse_metric_native"; // only for inner processor -bool ProcessorPromParseMetricNative::Init(const Json::Value&) { +bool ProcessorPromParseMetricNative::Init(const Json::Value& config) { + mScrapeConfigPtr = std::make_unique(); + if (!mScrapeConfigPtr->InitStaticConfig(config)) { + return false; + } return true; } @@ -27,9 +31,11 @@ void ProcessorPromParseMetricNative::Process(PipelineEventGroup& eGroup) { auto timestampMilliSec = StringTo(scrapeTimestampMilliSecStr.to_string()); auto timestamp = timestampMilliSec / 1000; auto nanoSec = timestampMilliSec % 1000 * 1000000; + TextParser parser(mScrapeConfigPtr->mHonorTimestamps); + parser.SetDefaultTimestamp(timestamp, nanoSec); for (auto& e : events) { - ProcessEvent(e, newEvents, eGroup, timestamp, nanoSec); + ProcessEvent(e, newEvents, eGroup, parser); } events.swap(newEvents); eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SAMPLES_SCRAPED, ToString(events.size())); @@ -39,14 +45,17 @@ bool ProcessorPromParseMetricNative::IsSupportedEvent(const PipelineEventPtr& e) return e.Is(); } -bool ProcessorPromParseMetricNative::ProcessEvent( - PipelineEventPtr& e, EventsContainer& newEvents, PipelineEventGroup& eGroup, uint64_t timestamp, uint32_t nanoSec) { +bool ProcessorPromParseMetricNative::ProcessEvent(PipelineEventPtr& e, + EventsContainer& newEvents, + PipelineEventGroup& eGroup, + TextParser& parser) { if (!IsSupportedEvent(e)) { return false; } auto& sourceEvent = e.Cast(); std::unique_ptr metricEvent = eGroup.CreateMetricEvent(); - if (mParser.ParseLine(sourceEvent.GetContent(prometheus::PROMETHEUS), timestamp, nanoSec, *metricEvent)) { + if (parser.ParseLine(sourceEvent.GetContent(prometheus::PROMETHEUS), *metricEvent)) { + metricEvent->SetTag(string(prometheus::NAME), metricEvent->GetName()); newEvents.emplace_back(std::move(metricEvent)); } return true; diff --git a/core/plugin/processor/inner/ProcessorPromParseMetricNative.h b/core/plugin/processor/inner/ProcessorPromParseMetricNative.h index da14bf4939..f9c036c58a 100644 --- a/core/plugin/processor/inner/ProcessorPromParseMetricNative.h +++ b/core/plugin/processor/inner/ProcessorPromParseMetricNative.h @@ -6,6 +6,7 @@ #include "models/PipelineEventPtr.h" #include "pipeline/plugin/interface/Processor.h" #include "prometheus/labels/TextParser.h" +#include "prometheus/schedulers/ScrapeConfig.h" namespace logtail { class ProcessorPromParseMetricNative : public Processor { @@ -20,8 +21,8 @@ class ProcessorPromParseMetricNative : public Processor { bool IsSupportedEvent(const PipelineEventPtr&) const override; private: - bool ProcessEvent(PipelineEventPtr&, EventsContainer&, PipelineEventGroup&, uint64_t timestamp, uint32_t nanoSec); - TextParser mParser; + bool ProcessEvent(PipelineEventPtr&, EventsContainer&, PipelineEventGroup&, TextParser& parser); + std::unique_ptr mScrapeConfigPtr; #ifdef APSARA_UNIT_TEST_MAIN friend class InputPrometheusUnittest; diff --git a/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp index 536f78a442..587dc0dce8 100644 --- a/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp +++ b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.cpp @@ -19,14 +19,15 @@ #include +#include "common/Flags.h" #include "common/StringTools.h" #include "models/MetricEvent.h" #include "models/PipelineEventGroup.h" #include "models/PipelineEventPtr.h" #include "prometheus/Constants.h" -#include "prometheus/Utils.h" using namespace std; +DECLARE_FLAG_STRING(_pod_name_); namespace logtail { const string ProcessorPromRelabelMetricNative::sName = "processor_prom_relabel_metric_native"; @@ -34,101 +35,101 @@ const string ProcessorPromRelabelMetricNative::sName = "processor_prom_relabel_m // only for inner processor bool ProcessorPromRelabelMetricNative::Init(const Json::Value& config) { std::string errorMsg; - if (config.isMember(prometheus::METRIC_RELABEL_CONFIGS) && config[prometheus::METRIC_RELABEL_CONFIGS].isArray() - && config[prometheus::METRIC_RELABEL_CONFIGS].size() > 0) { - for (const auto& item : config[prometheus::METRIC_RELABEL_CONFIGS]) { - mRelabelConfigs.emplace_back(item); - if (!mRelabelConfigs.back().Validate()) { - errorMsg = "metric_relabel_configs is invalid"; - LOG_ERROR(sLogger, ("init prometheus processor failed", errorMsg)); - return false; - } - } - } - - - if (config.isMember(prometheus::JOB_NAME) && config[prometheus::JOB_NAME].isString()) { - mJobName = config[prometheus::JOB_NAME].asString(); - } else { + mScrapeConfigPtr = std::make_unique(); + if (!mScrapeConfigPtr->InitStaticConfig(config)) { return false; } - if (config.isMember(prometheus::SCRAPE_TIMEOUT) && config[prometheus::SCRAPE_TIMEOUT].isString()) { - string tmpScrapeTimeoutString = config[prometheus::SCRAPE_TIMEOUT].asString(); - mScrapeTimeoutSeconds = DurationToSecond(tmpScrapeTimeoutString); - } else { - mScrapeTimeoutSeconds = 10; - } - if (config.isMember(prometheus::SAMPLE_LIMIT) && config[prometheus::SAMPLE_LIMIT].isInt64()) { - mSampleLimit = config[prometheus::SAMPLE_LIMIT].asInt64(); - } else { - mSampleLimit = -1; - } - if (config.isMember(prometheus::SERIES_LIMIT) && config[prometheus::SERIES_LIMIT].isInt64()) { - mSeriesLimit = config[prometheus::SERIES_LIMIT].asInt64(); - } else { - mSeriesLimit = -1; - } + + mLoongCollectorScraper = STRING_FLAG(_pod_name_); return true; } void ProcessorPromRelabelMetricNative::Process(PipelineEventGroup& metricGroup) { - auto instance = metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_INSTANCE); - - EventsContainer& events = metricGroup.MutableEvents(); - - size_t wIdx = 0; - for (size_t rIdx = 0; rIdx < events.size(); ++rIdx) { - if (ProcessEvent(events[rIdx], instance)) { - if (wIdx != rIdx) { - events[wIdx] = std::move(events[rIdx]); + // if mMetricRelabelConfigs is empty and honor_labels is true, skip it + auto targetTags = metricGroup.GetTags(); + if (!mScrapeConfigPtr->mMetricRelabelConfigs.Empty() || !targetTags.empty()) { + EventsContainer& events = metricGroup.MutableEvents(); + size_t wIdx = 0; + for (size_t rIdx = 0; rIdx < events.size(); ++rIdx) { + if (ProcessEvent(events[rIdx], targetTags)) { + if (wIdx != rIdx) { + events[wIdx] = std::move(events[rIdx]); + } + ++wIdx; } - ++wIdx; + } + events.resize(wIdx); + } + + // delete mTags when key starts with __ + for (const auto& [k, v] : targetTags) { + if (k.starts_with("__")) { + metricGroup.DelTag(k); } } - events.resize(wIdx); AddAutoMetrics(metricGroup); + + // delete all tags + for (const auto& [k, v] : targetTags) { + metricGroup.DelTag(k); + } } bool ProcessorPromRelabelMetricNative::IsSupportedEvent(const PipelineEventPtr& e) const { return e.Is(); } -bool ProcessorPromRelabelMetricNative::ProcessEvent(PipelineEventPtr& e, StringView instance) { +bool ProcessorPromRelabelMetricNative::ProcessEvent(PipelineEventPtr& e, const GroupTags& targetTags) { if (!IsSupportedEvent(e)) { return false; } auto& sourceEvent = e.Cast(); - - Labels labels; - - labels.Reset(&sourceEvent); - Labels result; - - // if keep this sourceEvent - if (prometheus::Process(labels, mRelabelConfigs, result)) { - // if k/v in labels by not result, then delete it - labels.Range([&result, &sourceEvent](const Label& label) { - if (result.Get(label.name).empty()) { - sourceEvent.DelTag(StringView(label.name)); + if (!mScrapeConfigPtr->mHonorLabels) { + // metric event labels is secondary + // if confiliction, then rename it exported_ + for (const auto& [k, v] : targetTags) { + if (sourceEvent.HasTag(k)) { + auto key = prometheus::EXPORTED_PREFIX + k.to_string(); + sourceEvent.SetTag(key, sourceEvent.GetTag(k).to_string()); + sourceEvent.DelTag(k); + } else { + sourceEvent.SetTag(k, v); + } + } + } else { + // if mHonorLabels is true, then keep sourceEvent labels + for (const auto& [k, v] : targetTags) { + if (!sourceEvent.HasTag(k)) { + sourceEvent.SetTag(k, v); } - }); + } + } - // for each k/v in result, set it to sourceEvent - result.Range([&sourceEvent](const Label& label) { sourceEvent.SetTag(label.name, label.value); }); + if (!mScrapeConfigPtr->mMetricRelabelConfigs.Empty() + && !mScrapeConfigPtr->mMetricRelabelConfigs.Process(sourceEvent)) { + return false; + } + // set metricEvent name + sourceEvent.SetNameNoCopy(sourceEvent.GetTag(prometheus::NAME)); - // set metricEvent name - if (!result.Get(prometheus::NAME).empty()) { - sourceEvent.SetName(result.Get(prometheus::NAME)); + + // delete tag __ + vector toDelete; + for (auto it = sourceEvent.TagsBegin(); it != sourceEvent.TagsEnd(); ++it) { + if (it->first.starts_with("__")) { + toDelete.push_back(it->first); } + } + for (const auto& k : toDelete) { + sourceEvent.DelTag(k); + } - sourceEvent.SetTag(prometheus::JOB, mJobName); - sourceEvent.SetTag(prometheus::INSTANCE, instance); + // set metricEvent name + sourceEvent.SetTag(prometheus::NAME, sourceEvent.GetName()); - return true; - } - return false; + return true; } void ProcessorPromRelabelMetricNative::AddAutoMetrics(PipelineEventGroup& metricGroup) { @@ -137,27 +138,32 @@ void ProcessorPromRelabelMetricNative::AddAutoMetrics(PipelineEventGroup& metric return; } + auto targetTags = metricGroup.GetTags(); + StringView scrapeTimestampMilliSecStr = metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC); auto timestampMilliSec = StringTo(scrapeTimestampMilliSecStr.to_string()); auto timestamp = timestampMilliSec / 1000; auto nanoSec = timestampMilliSec % 1000 * 1000000; - auto instance = metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_INSTANCE); - uint64_t samplesPostMetricRelabel = metricGroup.GetEvents().size(); auto scrapeDurationSeconds = StringTo(metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_DURATION).to_string()); - AddMetric(metricGroup, prometheus::SCRAPE_DURATION_SECONDS, scrapeDurationSeconds, timestamp, nanoSec, instance); + AddMetric(metricGroup, prometheus::SCRAPE_DURATION_SECONDS, scrapeDurationSeconds, timestamp, nanoSec, targetTags); auto scrapeResponseSize = StringTo(metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_RESPONSE_SIZE).to_string()); - AddMetric(metricGroup, prometheus::SCRAPE_RESPONSE_SIZE_BYTES, scrapeResponseSize, timestamp, nanoSec, instance); - - if (mSampleLimit > 0) { - AddMetric(metricGroup, prometheus::SCRAPE_SAMPLES_LIMIT, mSampleLimit, timestamp, nanoSec, instance); + AddMetric(metricGroup, prometheus::SCRAPE_RESPONSE_SIZE_BYTES, scrapeResponseSize, timestamp, nanoSec, targetTags); + + if (mScrapeConfigPtr->mSampleLimit > 0) { + AddMetric(metricGroup, + prometheus::SCRAPE_SAMPLES_LIMIT, + mScrapeConfigPtr->mSampleLimit, + timestamp, + nanoSec, + targetTags); } AddMetric(metricGroup, @@ -165,19 +171,24 @@ void ProcessorPromRelabelMetricNative::AddAutoMetrics(PipelineEventGroup& metric samplesPostMetricRelabel, timestamp, nanoSec, - instance); + targetTags); auto samplesScraped = StringTo(metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_SAMPLES_SCRAPED).to_string()); - AddMetric(metricGroup, prometheus::SCRAPE_SAMPLES_SCRAPED, samplesScraped, timestamp, nanoSec, instance); + AddMetric(metricGroup, prometheus::SCRAPE_SAMPLES_SCRAPED, samplesScraped, timestamp, nanoSec, targetTags); - AddMetric(metricGroup, prometheus::SCRAPE_TIMEOUT_SECONDS, mScrapeTimeoutSeconds, timestamp, nanoSec, instance); + AddMetric(metricGroup, + prometheus::SCRAPE_TIMEOUT_SECONDS, + mScrapeConfigPtr->mScrapeTimeoutSeconds, + timestamp, + nanoSec, + targetTags); // up metric must be the last one bool upState = StringTo(metricGroup.GetMetadata(EventGroupMetaKey::PROMETHEUS_UP_STATE).to_string()); - AddMetric(metricGroup, prometheus::UP, 1.0 * upState, timestamp, nanoSec, instance); + AddMetric(metricGroup, prometheus::UP, 1.0 * upState, timestamp, nanoSec, targetTags); } void ProcessorPromRelabelMetricNative::AddMetric(PipelineEventGroup& metricGroup, @@ -185,13 +196,16 @@ void ProcessorPromRelabelMetricNative::AddMetric(PipelineEventGroup& metricGroup double value, time_t timestamp, uint32_t nanoSec, - StringView instance) { + const GroupTags& targetTags) { auto* metricEvent = metricGroup.AddMetricEvent(); metricEvent->SetName(name); metricEvent->SetValue(value); metricEvent->SetTimestamp(timestamp, nanoSec); - metricEvent->SetTag(prometheus::JOB, mJobName); - metricEvent->SetTag(prometheus::INSTANCE, instance); + metricEvent->SetTag(prometheus::NAME, name); + metricEvent->SetTag(prometheus::LC_SCRAPER, mLoongCollectorScraper); + for (const auto& [k, v] : targetTags) { + metricEvent->SetTag(k, v); + } } } // namespace logtail diff --git a/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.h b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.h index 82aaa0ce20..8062c883c0 100644 --- a/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.h +++ b/core/plugin/processor/inner/ProcessorPromRelabelMetricNative.h @@ -21,7 +21,7 @@ #include "models/PipelineEventGroup.h" #include "models/PipelineEventPtr.h" #include "pipeline/plugin/interface/Processor.h" -#include "prometheus/labels/Relabel.h" +#include "prometheus/schedulers/ScrapeConfig.h" namespace logtail { class ProcessorPromRelabelMetricNative : public Processor { @@ -36,7 +36,7 @@ class ProcessorPromRelabelMetricNative : public Processor { bool IsSupportedEvent(const PipelineEventPtr& e) const override; private: - bool ProcessEvent(PipelineEventPtr& e, StringView instance); + bool ProcessEvent(PipelineEventPtr& e, const GroupTags& targetTags); void AddAutoMetrics(PipelineEventGroup& metricGroup); void AddMetric(PipelineEventGroup& metricGroup, @@ -44,15 +44,10 @@ class ProcessorPromRelabelMetricNative : public Processor { double value, time_t timestamp, uint32_t nanoSec, - StringView instance); + const GroupTags& targetTags); - std::vector mRelabelConfigs; - - // from config - std::string mJobName; - int64_t mScrapeTimeoutSeconds; - int64_t mSampleLimit; - int64_t mSeriesLimit; + std::unique_ptr mScrapeConfigPtr; + std::string mLoongCollectorScraper; #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorPromRelabelMetricNativeUnittest; diff --git a/core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.cpp b/core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.cpp index 840b6479dd..c5970b6dc2 100644 --- a/core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.cpp +++ b/core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.cpp @@ -24,7 +24,7 @@ #include "common/ParamExtractor.h" #include "logger/Logger.h" #include "models/LogEvent.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/plugin/instance/ProcessorInstance.h" namespace logtail { @@ -64,9 +64,9 @@ bool ProcessorSplitMultilineLogStringNative::Init(const Json::Value& config) { mContext->GetRegion()); } - mProcMatchedEventsCnt = GetMetricsRecordRef().CreateCounter(METRIC_PROC_SPLIT_MULTILINE_LOG_MATCHED_RECORDS_TOTAL); - mProcMatchedLinesCnt = GetMetricsRecordRef().CreateCounter(METRIC_PROC_SPLIT_MULTILINE_LOG_MATCHED_LINES_TOTAL); - mProcUnmatchedLinesCnt = GetMetricsRecordRef().CreateCounter(METRIC_PROC_SPLIT_MULTILINE_LOG_UNMATCHED_LINES_TOTAL); + mMatchedEventsTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_MATCHED_EVENTS_TOTAL); + mMatchedLinesTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_MATCHED_LINES_TOTAL); + mUnmatchedLinesTotal = GetMetricsRecordRef().CreateCounter(METRIC_PLUGIN_UNMATCHED_LINES_TOTAL); mSplitLines = &(mContext->GetProcessProfile().splitLines); @@ -90,8 +90,8 @@ void ProcessorSplitMultilineLogStringNative::Process(PipelineEventGroup& logGrou for (PipelineEventPtr& e : logGroup.MutableEvents()) { ProcessEvent(logGroup, logPath, std::move(e), newEvents, &inputLines, &unmatchLines); } - mProcMatchedLinesCnt->Add(inputLines - unmatchLines); - mProcUnmatchedLinesCnt->Add(unmatchLines); + mMatchedLinesTotal->Add(inputLines - unmatchLines); + mUnmatchedLinesTotal->Add(unmatchLines); *mSplitLines = newEvents.size(); logGroup.SwapEvents(newEvents); } @@ -177,7 +177,7 @@ void ProcessorSplitMultilineLogStringNative::ProcessEvent(PipelineEventGroup& lo // case: continue + end CreateNewEvent(content, isLastLog, sourceKey, sourceEvent, logGroup, newEvents); multiStartIndex = content.data() + content.size() + 1; - mProcMatchedEventsCnt->Add(1); + mMatchedEventsTotal->Add(1); } else { HandleUnmatchLogs( content, isLastLog, sourceKey, sourceEvent, logGroup, newEvents, logPath, unmatchLines); @@ -201,7 +201,7 @@ void ProcessorSplitMultilineLogStringNative::ProcessEvent(PipelineEventGroup& lo sourceEvent, logGroup, newEvents); - mProcMatchedEventsCnt->Add(1); + mMatchedEventsTotal->Add(1); } else { HandleUnmatchLogs( StringView(multiStartIndex, content.data() + content.size() - multiStartIndex), @@ -228,7 +228,7 @@ void ProcessorSplitMultilineLogStringNative::ProcessEvent(PipelineEventGroup& lo } else { multiStartIndex = content.data() + content.size() + 1; } - mProcMatchedEventsCnt->Add(1); + mMatchedEventsTotal->Add(1); // if only end pattern is given, start another log automatically } // no continue pattern given, and the current line in not matched against the end pattern, @@ -245,7 +245,7 @@ void ProcessorSplitMultilineLogStringNative::ProcessEvent(PipelineEventGroup& lo logGroup, newEvents); multiStartIndex = content.data(); - mProcMatchedEventsCnt->Add(1); + mMatchedEventsTotal->Add(1); } } else { // case: start + continue @@ -256,7 +256,7 @@ void ProcessorSplitMultilineLogStringNative::ProcessEvent(PipelineEventGroup& lo sourceEvent, logGroup, newEvents); - mProcMatchedEventsCnt->Add(1); + mMatchedEventsTotal->Add(1); if (!BoostRegexSearch( content.data(), content.size(), *mMultiline.GetStartPatternReg(), exception)) { // when no end pattern is given, the only chance to enter unmatched state is when both @@ -283,7 +283,7 @@ void ProcessorSplitMultilineLogStringNative::ProcessEvent(PipelineEventGroup& lo sourceEvent, logGroup, newEvents); - mProcMatchedEventsCnt->Add(1); + mMatchedEventsTotal->Add(1); } else { HandleUnmatchLogs(StringView(multiStartIndex, sourceVal.data() + sourceVal.size() - multiStartIndex), true, diff --git a/core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h b/core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h index 6848df98df..5fea3ea02c 100644 --- a/core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h +++ b/core/plugin/processor/inner/ProcessorSplitMultilineLogStringNative.h @@ -66,9 +66,9 @@ class ProcessorSplitMultilineLogStringNative : public Processor { int* mSplitLines = nullptr; - CounterPtr mProcMatchedEventsCnt; - CounterPtr mProcMatchedLinesCnt; - CounterPtr mProcUnmatchedLinesCnt; + CounterPtr mMatchedEventsTotal; + CounterPtr mMatchedLinesTotal; + CounterPtr mUnmatchedLinesTotal; #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorSplitMultilineLogStringNativeUnittest; diff --git a/core/profile_sender/EnterpriseProfileSender.cpp b/core/profile_sender/EnterpriseProfileSender.cpp index d18670e636..43352bd5ec 100644 --- a/core/profile_sender/EnterpriseProfileSender.cpp +++ b/core/profile_sender/EnterpriseProfileSender.cpp @@ -51,7 +51,7 @@ void EnterpriseProfileSender::SendToProfileProject(const string& region, sls_log false); if (SenderQueueManager::GetInstance()->GetQueue(key) == nullptr) { PipelineContext ctx; - SenderQueueManager::GetInstance()->CreateQueue(key, "", ctx, vector>()); + SenderQueueManager::GetInstance()->CreateQueue(key, "", ctx, std::unordered_map>()); } for (size_t i = 0; i < 500; ++i) { if (SenderQueueManager::GetInstance()->IsValidToPush(key)) { diff --git a/core/prometheus/Constants.h b/core/prometheus/Constants.h index 6a60e3a161..f4a823b4f4 100644 --- a/core/prometheus/Constants.h +++ b/core/prometheus/Constants.h @@ -22,11 +22,7 @@ const char* const REPLACEMENT = "replacement"; const char* const ACTION = "action"; const char* const MODULUS = "modulus"; const char* const NAME = "__name__"; - -// prometheus env -const char* const OPERATOR_HOST = "OPERATOR_HOST"; -const char* const OPERATOR_PORT = "OPERATOR_PORT"; -const char* const POD_NAME = "POD_NAME"; +const std::string EXPORTED_PREFIX = "exported_"; // prometheus api const char* const PROMETHEUS_PREFIX = "prometheus_"; @@ -71,6 +67,8 @@ const char* const USERNAME_FILE = "username_file"; const char* const PASSWORD = "password"; const char* const PASSWORD_FILE = "password_file"; const char* const BASIC_PREFIX = "Basic "; +const char* const HONOR_LABELS = "honor_labels"; +const char* const HONOR_TIMESTAMPS = "honor_timestamps"; // scrape protocols, from https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config // text/plain, application/openmetrics-text will be used @@ -100,6 +98,7 @@ const char* const SCRAPE_SAMPLES_POST_METRIC_RELABELING = "scrape_samples_post_m const char* const SCRAPE_SAMPLES_SCRAPED = "scrape_samples_scraped"; const char* const SCRAPE_TIMEOUT_SECONDS = "scrape_timeout_seconds"; const char* const UP = "up"; +const char* const LC_SCRAPER = "lc_scraper"; const char* const SCRAPE_TIMESTAMP_MILLISEC = "scrape_timestamp_millisec"; diff --git a/core/prometheus/PromSelfMonitor.cpp b/core/prometheus/PromSelfMonitor.cpp new file mode 100644 index 0000000000..39c1473014 --- /dev/null +++ b/core/prometheus/PromSelfMonitor.cpp @@ -0,0 +1,71 @@ +#include "prometheus/PromSelfMonitor.h" + +#include +#include +#include + +#include "monitor/LoongCollectorMetricTypes.h" +#include "monitor/metric_constants/MetricConstants.h" +using namespace std; + +namespace logtail { + +void PromSelfMonitorUnsafe::InitMetricManager(const std::unordered_map& metricKeys, + const MetricLabels& labels) { + auto metricLabels = std::make_shared(labels); + mPluginMetricManagerPtr = std::make_shared(metricLabels, metricKeys); +} + +void PromSelfMonitorUnsafe::AddCounter(const std::string& metricName, uint64_t statusCode, uint64_t val) { + auto& status = StatusToString(statusCode); + if (!mMetricsCounterMap.count(metricName) || !mMetricsCounterMap[metricName].count(status)) { + mMetricsCounterMap[metricName][status] = GetOrCreateReentrantMetricsRecordRef(status)->GetCounter(metricName); + } + mMetricsCounterMap[metricName][status]->Add(val); +} + +void PromSelfMonitorUnsafe::SetIntGauge(const std::string& metricName, uint64_t statusCode, uint64_t value) { + auto& status = StatusToString(statusCode); + if (!mMetricsIntGaugeMap.count(metricName) || !mMetricsIntGaugeMap[metricName].count(status)) { + mMetricsIntGaugeMap[metricName][status] = GetOrCreateReentrantMetricsRecordRef(status)->GetIntGauge(metricName); + } + mMetricsIntGaugeMap[metricName][status]->Set(value); +} + +ReentrantMetricsRecordRef PromSelfMonitorUnsafe::GetOrCreateReentrantMetricsRecordRef(const std::string& status) { + if (mPluginMetricManagerPtr == nullptr) { + return nullptr; + } + if (!mPromStatusMap.count(status)) { + mPromStatusMap[status] + = mPluginMetricManagerPtr->GetOrCreateReentrantMetricsRecordRef({{METRIC_LABEL_KEY_STATUS, status}}); + } + return mPromStatusMap[status]; +} + +std::string& PromSelfMonitorUnsafe::StatusToString(uint64_t status) { + static string sHttp0XX = "0XX"; + static string sHttp1XX = "1XX"; + static string sHttp2XX = "2XX"; + static string sHttp3XX = "3XX"; + static string sHttp4XX = "4XX"; + static string sHttp5XX = "5XX"; + static string sHttpOther = "other"; + if (status < 100) { + return sHttp0XX; + } else if (status < 200) { + return sHttp1XX; + } else if (status < 300) { + return sHttp2XX; + } else if (status < 400) { + return sHttp3XX; + } else if (status < 500) { + return sHttp4XX; + } else if (status < 500) { + return sHttp5XX; + } else { + return sHttpOther; + } +} + +} // namespace logtail \ No newline at end of file diff --git a/core/prometheus/PromSelfMonitor.h b/core/prometheus/PromSelfMonitor.h new file mode 100644 index 0000000000..5c07db5005 --- /dev/null +++ b/core/prometheus/PromSelfMonitor.h @@ -0,0 +1,38 @@ +#pragma once + +#include +#include +#include + +#include "monitor/LogtailMetric.h" +#include "monitor/PluginMetricManager.h" + +namespace logtail { + +// manage status metrics, thread unsafe +class PromSelfMonitorUnsafe { +public: + PromSelfMonitorUnsafe() = default; + + void InitMetricManager(const std::unordered_map& metricKeys, const MetricLabels& labels); + + void AddCounter(const std::string& metricName, uint64_t status, uint64_t val = 1); + + void SetIntGauge(const std::string& metricName, uint64_t status, uint64_t value); + +private: + ReentrantMetricsRecordRef GetOrCreateReentrantMetricsRecordRef(const std::string& status); + std::string& StatusToString(uint64_t status); + + PluginMetricManagerPtr mPluginMetricManagerPtr; + std::map mPromStatusMap; + std::map> mMetricsCounterMap; + std::map> mMetricsIntGaugeMap; + MetricLabelsPtr mDefaultLabels; + +#ifdef APSARA_UNIT_TEST_MAIN + friend class PromSelfMonitorUnittest; +#endif +}; + +} // namespace logtail \ No newline at end of file diff --git a/core/prometheus/PrometheusInputRunner.cpp b/core/prometheus/PrometheusInputRunner.cpp index 3d0853ac8a..99fdb779d8 100644 --- a/core/prometheus/PrometheusInputRunner.cpp +++ b/core/prometheus/PrometheusInputRunner.cpp @@ -20,6 +20,7 @@ #include #include +#include "application/Application.h" #include "common/Flags.h" #include "common/JsonUtil.h" #include "common/StringTools.h" @@ -27,6 +28,8 @@ #include "common/http/AsynCurlRunner.h" #include "common/timer/Timer.h" #include "logger/Logger.h" +#include "monitor/metric_constants/MetricConstants.h" +#include "plugin/flusher/sls/FlusherSLS.h" #include "prometheus/Constants.h" #include "prometheus/Utils.h" #include "sdk/Common.h" @@ -40,23 +43,45 @@ DECLARE_FLAG_STRING(_pod_name_); namespace logtail { -PrometheusInputRunner::PrometheusInputRunner() : mUnRegisterMs(0) { +PrometheusInputRunner::PrometheusInputRunner() + : mServiceHost(STRING_FLAG(loong_collector_operator_service)), + mServicePort(INT32_FLAG(loong_collector_operator_service_port)), + mPodName(STRING_FLAG(_pod_name_)), + mUnRegisterMs(0) { mClient = std::make_unique(); - - mServiceHost = STRING_FLAG(loong_collector_operator_service); - mServicePort = INT32_FLAG(loong_collector_operator_service_port); - mPodName = STRING_FLAG(_pod_name_); mTimer = std::make_shared(); + + // self monitor + MetricLabels labels; + labels.emplace_back(METRIC_LABEL_KEY_RUNNER_NAME, METRIC_LABEL_VALUE_RUNNER_NAME_PROMETHEUS); + labels.emplace_back(METRIC_LABEL_KEY_INSTANCE_ID, Application::GetInstance()->GetInstanceId()); + labels.emplace_back(METRIC_LABEL_KEY_POD_NAME, mPodName); + labels.emplace_back(METRIC_LABEL_KEY_SERVICE_HOST, mServiceHost); + labels.emplace_back(METRIC_LABEL_KEY_SERVICE_PORT, ToString(mServicePort)); + + DynamicMetricLabels dynamicLabels; + dynamicLabels.emplace_back(METRIC_LABEL_KEY_PROJECT, [this]() -> std::string { return this->GetAllProjects(); }); + + WriteMetrics::GetInstance()->PrepareMetricsRecordRef( + mMetricsRecordRef, std::move(labels), std::move(dynamicLabels)); + + mPromRegisterState = mMetricsRecordRef.CreateIntGauge(METRIC_RUNNER_CLIENT_REGISTER_STATE); + mPromJobNum = mMetricsRecordRef.CreateIntGauge(METRIC_RUNNER_JOB_NUM); + mPromRegisterRetryTotal = mMetricsRecordRef.CreateCounter(METRIC_RUNNER_CLIENT_REGISTER_RETRY_TOTAL); } /// @brief receive scrape jobs from input plugins and update scrape jobs -void PrometheusInputRunner::UpdateScrapeInput(std::shared_ptr targetSubscriber) { +void PrometheusInputRunner::UpdateScrapeInput(std::shared_ptr targetSubscriber, + const MetricLabels& defaultLabels, + const string& projectName) { RemoveScrapeInput(targetSubscriber->GetId()); targetSubscriber->mServiceHost = mServiceHost; targetSubscriber->mServicePort = mServicePort; targetSubscriber->mPodName = mPodName; + targetSubscriber->InitSelfMonitor(defaultLabels); + targetSubscriber->mUnRegisterMs = mUnRegisterMs.load(); targetSubscriber->SetTimer(mTimer); auto randSleepMilliSec = GetRandSleepMilliSec( @@ -70,13 +95,31 @@ void PrometheusInputRunner::UpdateScrapeInput(std::shared_ptrScheduleNext(); + { + ReadLock lock(mSubscriberMapRWLock); + mPromJobNum->Set(mTargetSubscriberSchedulerMap.size()); + } + // 3. add project name to mJobNameToProjectNameMap for self monitor + { + WriteLock lock(mProjectRWLock); + mJobNameToProjectNameMap[targetSubscriber->GetId()] = projectName; + } } void PrometheusInputRunner::RemoveScrapeInput(const std::string& jobName) { - WriteLock lock(mSubscriberMapRWLock); - if (mTargetSubscriberSchedulerMap.count(jobName)) { - mTargetSubscriberSchedulerMap[jobName]->Cancel(); - mTargetSubscriberSchedulerMap.erase(jobName); + { + WriteLock lock(mSubscriberMapRWLock); + if (mTargetSubscriberSchedulerMap.count(jobName)) { + mTargetSubscriberSchedulerMap[jobName]->Cancel(); + mTargetSubscriberSchedulerMap.erase(jobName); + mPromJobNum->Set(mTargetSubscriberSchedulerMap.size()); + } + } + { + WriteLock lock(mProjectRWLock); + if (mJobNameToProjectNameMap.count(jobName)) { + mJobNameToProjectNameMap.erase(jobName); + } } } @@ -88,8 +131,11 @@ void PrometheusInputRunner::Init() { } LOG_INFO(sLogger, ("PrometheusInputRunner", "Start")); mIsStarted = true; + +#ifndef APSARA_UNIT_TEST_MAIN mTimer->Init(); AsynCurlRunner::GetInstance()->Init(); +#endif LOG_INFO(sLogger, ("PrometheusInputRunner", "register")); // only register when operator exist @@ -102,9 +148,9 @@ void PrometheusInputRunner::Init() { ++retry; sdk::HttpMessage httpResponse = SendRegisterMessage(prometheus::REGISTER_COLLECTOR_PATH); if (httpResponse.statusCode != 200) { - LOG_ERROR(sLogger, ("register failed, statusCode", httpResponse.statusCode)); - if (retry % 3 == 0) { - LOG_INFO(sLogger, ("register failed, retried", ToString(retry))); + mPromRegisterRetryTotal->Add(1); + if (retry % 10 == 0) { + LOG_INFO(sLogger, ("register failed, retried", retry)("statusCode", httpResponse.statusCode)); } } else { // register success @@ -127,6 +173,7 @@ void PrometheusInputRunner::Init() { } } } + mPromRegisterState->Set(1); LOG_INFO(sLogger, ("Register Success", mPodName)); // subscribe immediately SubscribeOnce(); @@ -147,10 +194,12 @@ void PrometheusInputRunner::Stop() { mIsStarted = false; mIsThreadRunning.store(false); - mTimer->Stop(); +#ifndef APSARA_UNIT_TEST_MAIN + mTimer->Stop(); LOG_INFO(sLogger, ("PrometheusInputRunner", "stop asyn curl runner")); AsynCurlRunner::GetInstance()->Stop(); +#endif LOG_INFO(sLogger, ("PrometheusInputRunner", "cancel all target subscribers")); CancelAllTargetSubscriber(); @@ -170,6 +219,7 @@ void PrometheusInputRunner::Stop() { LOG_ERROR(sLogger, ("unregister failed, statusCode", httpResponse.statusCode)); } else { LOG_INFO(sLogger, ("Unregister Success", mPodName)); + mPromRegisterState->Set(0); break; } std::this_thread::sleep_for(std::chrono::seconds(1)); @@ -226,4 +276,19 @@ void PrometheusInputRunner::SubscribeOnce() { } } +string PrometheusInputRunner::GetAllProjects() { + string result; + set existProjects; + ReadLock lock(mProjectRWLock); + for (auto& [k, v] : mJobNameToProjectNameMap) { + if (existProjects.find(v) == existProjects.end()) { + if (!result.empty()) { + result += " "; + } + existProjects.insert(v); + result += v; + } + } + return result; +} }; // namespace logtail \ No newline at end of file diff --git a/core/prometheus/PrometheusInputRunner.h b/core/prometheus/PrometheusInputRunner.h index 0b01c7c01c..3bd95df388 100644 --- a/core/prometheus/PrometheusInputRunner.h +++ b/core/prometheus/PrometheusInputRunner.h @@ -22,6 +22,8 @@ #include "common/Lock.h" #include "common/timer/Timer.h" +#include "monitor/LogtailMetric.h" +#include "monitor/LoongCollectorMetricTypes.h" #include "prometheus/schedulers/TargetSubscriberScheduler.h" #include "runner/InputRunner.h" #include "sdk/Common.h" @@ -42,7 +44,9 @@ class PrometheusInputRunner : public InputRunner { } // input plugin update - void UpdateScrapeInput(std::shared_ptr targetSubscriber); + void UpdateScrapeInput(std::shared_ptr targetSubscriber, + const MetricLabels& defaultLabels, + const std::string& projectName); void RemoveScrapeInput(const std::string& jobName); // target discover and scrape @@ -57,6 +61,8 @@ class PrometheusInputRunner : public InputRunner { void CancelAllTargetSubscriber(); void SubscribeOnce(); + std::string GetAllProjects(); + bool mIsStarted = false; std::mutex mStartMutex; @@ -76,6 +82,14 @@ class PrometheusInputRunner : public InputRunner { std::atomic mUnRegisterMs; + // self monitor + ReadWriteLock mProjectRWLock; + std::map mJobNameToProjectNameMap; + MetricsRecordRef mMetricsRecordRef; + CounterPtr mPromRegisterRetryTotal; + IntGaugePtr mPromRegisterState; + IntGaugePtr mPromJobNum; + #ifdef APSARA_UNIT_TEST_MAIN friend class PrometheusInputRunnerUnittest; friend class InputPrometheusUnittest; diff --git a/core/prometheus/Utils.cpp b/core/prometheus/Utils.cpp index f88cf24761..05486394e0 100644 --- a/core/prometheus/Utils.cpp +++ b/core/prometheus/Utils.cpp @@ -33,13 +33,69 @@ std::string SecondToDuration(uint64_t duration) { } uint64_t DurationToSecond(const std::string& duration) { + // check duration format s or m + if (duration.size() <= 1 || !IsNumber(duration.substr(0, duration.size() - 1))) { + return 0; + } if (EndWith(duration, "s")) { return stoll(duration.substr(0, duration.find('s'))); } if (EndWith(duration, "m")) { return stoll(duration.substr(0, duration.find('m'))) * 60; } - return 60; + return 0; +} + +// : a size in bytes, e.g. 512MB. A unit is required. Supported units: B, KB, MB, GB, TB, PB, EB. +uint64_t SizeToByte(const std::string& size) { + auto inputSize = size; + uint64_t res = 0; + if (size.empty()) { + res = 0; + } else if (EndWith(inputSize, "KiB") || EndWith(inputSize, "K") || EndWith(inputSize, "KB")) { + if (!IsNumber(inputSize.substr(0, inputSize.find('K')))) { + return 0; + } + inputSize = inputSize.substr(0, inputSize.find('K')); + res = stoll(inputSize) * 1024; + } else if (EndWith(inputSize, "MiB") || EndWith(inputSize, "M") || EndWith(inputSize, "MB")) { + if (!IsNumber(inputSize.substr(0, inputSize.find('M')))) { + return 0; + } + inputSize = inputSize.substr(0, inputSize.find('M')); + res = stoll(inputSize) * 1024 * 1024; + } else if (EndWith(inputSize, "GiB") || EndWith(inputSize, "G") || EndWith(inputSize, "GB")) { + if (!IsNumber(inputSize.substr(0, inputSize.find('G')))) { + return 0; + } + inputSize = inputSize.substr(0, inputSize.find('G')); + res = stoll(inputSize) * 1024 * 1024 * 1024; + } else if (EndWith(inputSize, "TiB") || EndWith(inputSize, "T") || EndWith(inputSize, "TB")) { + if (!IsNumber(inputSize.substr(0, inputSize.find('T')))) { + return 0; + } + inputSize = inputSize.substr(0, inputSize.find('T')); + res = stoll(inputSize) * 1024 * 1024 * 1024 * 1024; + } else if (EndWith(inputSize, "PiB") || EndWith(inputSize, "P") || EndWith(inputSize, "PB")) { + if (!IsNumber(inputSize.substr(0, inputSize.find('P')))) { + return 0; + } + inputSize = inputSize.substr(0, inputSize.find('P')); + res = stoll(inputSize) * 1024 * 1024 * 1024 * 1024 * 1024; + } else if (EndWith(inputSize, "EiB") || EndWith(inputSize, "E") || EndWith(inputSize, "EB")) { + if (!IsNumber(inputSize.substr(0, inputSize.find('E')))) { + return 0; + } + inputSize = inputSize.substr(0, inputSize.find('E')); + res = stoll(inputSize) * 1024 * 1024 * 1024 * 1024 * 1024 * 1024; + } else if (EndWith(inputSize, "B")) { + if (!IsNumber(inputSize.substr(0, inputSize.find('B')))) { + return 0; + } + inputSize = inputSize.substr(0, inputSize.find('B')); + res = stoll(inputSize); + } + return res; } bool IsValidMetric(const StringView& line) { @@ -68,6 +124,10 @@ void SplitStringView(const std::string& s, char delimiter, std::vector(std::numeric_limits::max()); diff --git a/core/prometheus/Utils.h b/core/prometheus/Utils.h index 0ed1495238..e7c9382d76 100644 --- a/core/prometheus/Utils.h +++ b/core/prometheus/Utils.h @@ -12,8 +12,11 @@ std::string URLEncode(const std::string& value); std::string SecondToDuration(uint64_t duration); uint64_t DurationToSecond(const std::string& duration); +uint64_t SizeToByte(const std::string& size); + bool IsValidMetric(const StringView& line); void SplitStringView(const std::string& s, char delimiter, std::vector& result); +bool IsNumber(const std::string& str); uint64_t GetRandSleepMilliSec(const std::string& key, uint64_t intervalSeconds, uint64_t currentMilliSeconds); } // namespace logtail diff --git a/core/prometheus/async/PromFuture.cpp b/core/prometheus/async/PromFuture.cpp index ca7339d319..46185a004a 100644 --- a/core/prometheus/async/PromFuture.cpp +++ b/core/prometheus/async/PromFuture.cpp @@ -1,28 +1,38 @@ #include "prometheus/async/PromFuture.h" #include "common/Lock.h" +#include "common/http/HttpResponse.h" namespace logtail { -void PromFuture::Process(const HttpResponse& response, uint64_t timestampMilliSec) { +template +bool PromFuture::Process(Args... args) { WriteLock lock(mStateRWLock); if (mState == PromFutureState::New) { for (auto& callback : mDoneCallbacks) { - callback(response, timestampMilliSec); + if (!callback(std::forward(args)...)) { + mState = PromFutureState::Done; + return false; + } } mState = PromFutureState::Done; - } else { - return; } + + return true; } -void PromFuture::AddDoneCallback(std::function&& callback) { +template +void PromFuture::AddDoneCallback(CallbackSignature&& callback) { mDoneCallbacks.emplace_back(std::move(callback)); } -void PromFuture::Cancel() { +template +void PromFuture::Cancel() { WriteLock lock(mStateRWLock); mState = PromFutureState::Done; } +template class PromFuture; +template class PromFuture<>; + } // namespace logtail \ No newline at end of file diff --git a/core/prometheus/async/PromFuture.h b/core/prometheus/async/PromFuture.h index 63de4b59d1..f9b5a14846 100644 --- a/core/prometheus/async/PromFuture.h +++ b/core/prometheus/async/PromFuture.h @@ -1,18 +1,21 @@ #pragma once +#include + #include "common/Lock.h" -#include "common/http/HttpResponse.h" namespace logtail { enum class PromFutureState { New, Processing, Done }; +template class PromFuture { public: + using CallbackSignature = std::function; // Process should support oneshot and streaming mode. - void Process(const HttpResponse&, uint64_t timestampMilliSec); + bool Process(Args...); - void AddDoneCallback(std::function&& callback); + void AddDoneCallback(CallbackSignature&&); void Cancel(); @@ -20,7 +23,7 @@ class PromFuture { PromFutureState mState = {PromFutureState::New}; ReadWriteLock mStateRWLock; - std::vector> mDoneCallbacks; + std::vector mDoneCallbacks; #ifdef APSARA_UNIT_TEST_MAIN friend class ScrapeSchedulerUnittest; diff --git a/core/prometheus/async/PromHttpRequest.cpp b/core/prometheus/async/PromHttpRequest.cpp index 9cfe68452e..151ebfabe8 100644 --- a/core/prometheus/async/PromHttpRequest.cpp +++ b/core/prometheus/async/PromHttpRequest.cpp @@ -18,17 +18,24 @@ PromHttpRequest::PromHttpRequest(const std::string& method, const std::string& body, uint32_t timeout, uint32_t maxTryCnt, - std::shared_ptr future) + std::shared_ptr> future, + std::shared_ptr> isContextValidFuture) : AsynHttpRequest(method, httpsFlag, host, port, url, query, header, body, timeout, maxTryCnt), - mFuture(std::move(future)) { + mFuture(std::move(future)), + mIsContextValidFuture(std::move(isContextValidFuture)) { } void PromHttpRequest::OnSendDone(const HttpResponse& response) { - mFuture->Process(response, - std::chrono::duration_cast(mLastSendTime.time_since_epoch()).count()); + if (mFuture != nullptr) { + mFuture->Process( + response, std::chrono::duration_cast(mLastSendTime.time_since_epoch()).count()); + } } [[nodiscard]] bool PromHttpRequest::IsContextValid() const { + if (mIsContextValidFuture != nullptr) { + return mIsContextValidFuture->Process(); + } return true; } diff --git a/core/prometheus/async/PromHttpRequest.h b/core/prometheus/async/PromHttpRequest.h index b7b8115774..8c546e5f0a 100644 --- a/core/prometheus/async/PromHttpRequest.h +++ b/core/prometheus/async/PromHttpRequest.h @@ -20,7 +20,8 @@ class PromHttpRequest : public AsynHttpRequest { const std::string& body, uint32_t timeout, uint32_t maxTryCnt, - std::shared_ptr future); + std::shared_ptr> future, + std::shared_ptr> isContextValidFuture = nullptr); PromHttpRequest(const PromHttpRequest&) = default; ~PromHttpRequest() override = default; @@ -30,7 +31,8 @@ class PromHttpRequest : public AsynHttpRequest { private: void SetNextExecTime(std::chrono::steady_clock::time_point execTime); - std::shared_ptr mFuture; + std::shared_ptr> mFuture; + std::shared_ptr> mIsContextValidFuture; }; } // namespace logtail \ No newline at end of file diff --git a/core/prometheus/labels/Labels.cpp b/core/prometheus/labels/Labels.cpp index 5e8fce4687..383cd7cc08 100644 --- a/core/prometheus/labels/Labels.cpp +++ b/core/prometheus/labels/Labels.cpp @@ -16,7 +16,6 @@ #include "prometheus/labels/Labels.h" -#include #include #include "prometheus/Constants.h" @@ -25,6 +24,35 @@ using namespace std; namespace logtail { +// mMetricEventPtr can not be copied +Labels::Labels(const Labels& other) : mLabels(other.mLabels) { +} + +Labels& Labels::operator=(const Labels& other) { + if (this != &other) { + mLabels = other.mLabels; + mMetricEventPtr = nullptr; + } + return *this; +} + +// metricEventPtr can be moved +Labels::Labels(Labels&& other) noexcept : mLabels(std::move(other.mLabels)), mMetricEventPtr(other.mMetricEventPtr) { + other.mLabels.clear(); + other.mMetricEventPtr = nullptr; +} + +Labels& Labels::operator=(Labels&& other) noexcept { + if (this != &other) { + mLabels = std::move(other.mLabels); + mMetricEventPtr = other.mMetricEventPtr; + other.mLabels.clear(); + other.mMetricEventPtr = nullptr; + } + return *this; +} + + size_t Labels::Size() const { if (mMetricEventPtr) { return mMetricEventPtr->TagsSize(); @@ -34,7 +62,7 @@ size_t Labels::Size() const { std::string Labels::Get(const string& name) { if (mMetricEventPtr) { - return string(mMetricEventPtr->GetTag(name)); + return mMetricEventPtr->GetTag(name).to_string(); } if (mLabels.count(name)) { return mLabels[name]; @@ -43,146 +71,47 @@ std::string Labels::Get(const string& name) { } void Labels::Reset(MetricEvent* metricEvent) { - for (auto it = metricEvent->TagsBegin(); it != metricEvent->TagsEnd(); it++) { - Push(Label(it->first.to_string(), it->second.to_string())); - } - Push(Label(prometheus::NAME, metricEvent->GetName().to_string())); + mMetricEventPtr = metricEvent; + Set(prometheus::NAME, metricEvent->GetName().to_string()); } -void Labels::Push(const Label& l) { +void Labels::Set(const string& k, const string& v) { if (mMetricEventPtr) { - mMetricEventPtr->SetTag(l.name, l.value); + mMetricEventPtr->SetTag(k, v); return; } - mLabels[l.name] = l.value; + mLabels[k] = v; } -void Labels::Range(const std::function& f) { +void Labels::Del(const string& k) { if (mMetricEventPtr) { - for (auto l = mMetricEventPtr->TagsBegin(); l != mMetricEventPtr->TagsEnd(); l++) { - f(Label(string(l->first), string(l->second))); + if(mMetricEventPtr->HasTag(k)){ + mMetricEventPtr->DelTag(k); } return; } - for (const auto& l : mLabels) { - f(Label(l.first, l.second)); - } -} - -LabelMap::const_iterator Labels::Begin() const { - return mLabels.begin(); -} - -LabelMap::const_iterator Labels::End() const { - return mLabels.end(); -} - - -LabelsBuilder::LabelsBuilder() { -} - -// Del deletes the label of the given name. -void LabelsBuilder::DeleteLabel(const vector& nameList) { - for (const auto& name : nameList) { - DeleteLabel(name); + if(mLabels.count(k)){ + mLabels.erase(k); } } -void LabelsBuilder::DeleteLabel(std::string name) { - auto it = mAddLabelList.find(name); - if (it != mAddLabelList.end()) { - mAddLabelList.erase(it); - } - mDeleteLabelNameList.insert(name); -} -std::string LabelsBuilder::Get(const std::string& name) { - // Del() removes entries from .add but Set() does not remove from .del, so check .add first. - for (const auto& [k, v] : mAddLabelList) { - if (k == name) { - return v; +void Labels::Range(const std::function& f) { + if (mMetricEventPtr) { + for (auto l = mMetricEventPtr->TagsBegin(); l != mMetricEventPtr->TagsEnd(); l++) { + f(l->first.to_string(), l->second.to_string()); } - } - auto it = find(mDeleteLabelNameList.begin(), mDeleteLabelNameList.end(), name); - if (it != mDeleteLabelNameList.end()) { - return ""; - } - return mBase.Get(name); -} - - -// Set the name/value pair as a label. A value of "" means delete that label. -void LabelsBuilder::Set(const std::string& name, const std::string& value) { - if (value.empty()) { - DeleteLabel(name); - return; - } - if (mAddLabelList.find(name) != mAddLabelList.end()) { - mAddLabelList[name] = value; return; } - mAddLabelList.emplace(name, value); -} - -void LabelsBuilder::Reset(Labels l) { - mBase = l; - mBase.Range([this](const Label& l) { - if (l.value == "") { - mDeleteLabelNameList.insert(l.name); - } - }); -} - -void LabelsBuilder::Reset(MetricEvent* metricEvent) { - mBase.Reset(metricEvent); - mBase.Range([this](const Label& l) { - if (l.value == "") { - mDeleteLabelNameList.insert(l.name); - } - }); -} - -Labels LabelsBuilder::GetLabels() { - if (mDeleteLabelNameList.empty() && mAddLabelList.empty()) { - return mBase; - } - - auto res = Labels(); - for (auto l = mBase.Begin(); l != mBase.End(); ++l) { - if (mDeleteLabelNameList.find(l->first) != mDeleteLabelNameList.end() - || mAddLabelList.find(l->first) != mAddLabelList.end()) { - continue; - } - res.Push(Label(l->first, l->second)); - } - - for (const auto& [k, v] : mAddLabelList) { - res.Push(Label{k, v}); - } - - return res; -} - - -/// @brief Range calls f on each label in the Builder -void LabelsBuilder::Range(const std::function& closure) { - // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). - auto originAdd = mAddLabelList; - auto originDel = mDeleteLabelNameList; - mBase.Range([&originAdd, &originDel, &closure](const Label& l) { - if (originAdd.find(l.name) == originAdd.end() && originDel.find(l.name) == originDel.end()) { - closure(l); - } - }); - for (const auto& [k, v] : originAdd) { - closure(Label{k, v}); + for (const auto& l : mLabels) { + f(l.first, l.second); } } uint64_t Labels::Hash() { string hash; uint64_t sum = prometheus::OFFSET64; - Range([&hash](const Label& l) { hash += l.name + "\xff" + l.value + "\xff"; }); + Range([&hash](const string& k, const string& v) { hash += k + "\xff" + v + "\xff"; }); for (auto i : hash) { sum ^= (uint64_t)i; sum *= prometheus::PRIME64; @@ -191,6 +120,7 @@ uint64_t Labels::Hash() { } void Labels::RemoveMetaLabels() { + // for mLabels only for (auto it = mLabels.begin(); it != mLabels.end();) { if (it->first.find(prometheus::META) == 0) { it = mLabels.erase(it); diff --git a/core/prometheus/labels/Labels.h b/core/prometheus/labels/Labels.h index 80727dc1ab..43081e7617 100644 --- a/core/prometheus/labels/Labels.h +++ b/core/prometheus/labels/Labels.h @@ -20,40 +20,37 @@ #include #include #include -#include -#include -#include #include "models/MetricEvent.h" namespace logtail { -// Label is a key/value pair of strings. -struct Label { - std::string name; - std::string value; - Label(std::string name, std::string value) : name(std::move(name)), value(std::move(value)) {} -}; using LabelMap = std::map; /// @brief Labels is a sorted set of labels. Order has to be guaranteed upon instantiation class Labels { public: Labels() = default; - size_t Size() const; + Labels(const Labels&); + Labels& operator=(const Labels&); + + Labels(Labels&&) noexcept; + Labels& operator=(Labels&&) noexcept; + + [[nodiscard]] size_t Size() const; uint64_t Hash(); void RemoveMetaLabels(); std::string Get(const std::string&); - void Reset(MetricEvent*); - void Push(const Label&); + void Set(const std::string&, const std::string&); + void Del(const std::string&); - void Range(const std::function&); + void Reset(MetricEvent*); - LabelMap::const_iterator Begin() const; - LabelMap::const_iterator End() const; + void Range(const std::function&); private: + LabelMap mLabels; MetricEvent* mMetricEventPtr = nullptr; @@ -63,31 +60,4 @@ class Labels { #endif }; -class LabelsBuilder { -public: - LabelsBuilder(); - void DeleteLabel(const std::vector&); - void DeleteLabel(std::string); - - std::string Get(const std::string&); - void Set(const std::string&, const std::string&); - - void Reset(Labels); - void Reset(MetricEvent*); - - Labels GetLabels(); - - void Range(const std::function& closure); - -private: - Labels mBase; - - std::unordered_set mDeleteLabelNameList; - std::unordered_map mAddLabelList; - -#ifdef APSARA_UNIT_TEST_MAIN - friend class LabelsBuilderUnittest; -#endif -}; - } // namespace logtail diff --git a/core/prometheus/labels/Relabel.cpp b/core/prometheus/labels/Relabel.cpp index b50b318637..f93772b0b3 100644 --- a/core/prometheus/labels/Relabel.cpp +++ b/core/prometheus/labels/Relabel.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include "common/ParamExtractor.h" #include "common/StringTools.h" @@ -33,55 +34,54 @@ using namespace std; #define ENUM_TO_STRING_CASE(EnumValue) \ { Action::EnumValue, ToLowerCaseString(#EnumValue) } -#define STRING_TO_ENUM__CASE(EnumValue) \ +#define STRING_TO_ENUM_CASE(EnumValue) \ { ToLowerCaseString(#EnumValue), Action::EnumValue } namespace logtail { -Action StringToAction(string action) { - static std::map actionStrings{STRING_TO_ENUM__CASE(REPLACE), - STRING_TO_ENUM__CASE(KEEP), - STRING_TO_ENUM__CASE(DROP), - STRING_TO_ENUM__CASE(KEEPEQUAL), - STRING_TO_ENUM__CASE(DROPEQUAL), - STRING_TO_ENUM__CASE(HASHMOD), - STRING_TO_ENUM__CASE(LABELMAP), - STRING_TO_ENUM__CASE(LABELDROP), - STRING_TO_ENUM__CASE(LABELKEEP), - STRING_TO_ENUM__CASE(LOWERCASE), - STRING_TO_ENUM__CASE(UPPERCASE)}; - - auto it = actionStrings.find(action); - if (it != actionStrings.end()) { +Action StringToAction(const string& action) { + static std::map sActionStrings{STRING_TO_ENUM_CASE(REPLACE), + STRING_TO_ENUM_CASE(KEEP), + STRING_TO_ENUM_CASE(DROP), + STRING_TO_ENUM_CASE(KEEPEQUAL), + STRING_TO_ENUM_CASE(DROPEQUAL), + STRING_TO_ENUM_CASE(HASHMOD), + STRING_TO_ENUM_CASE(LABELMAP), + STRING_TO_ENUM_CASE(LABELDROP), + STRING_TO_ENUM_CASE(LABELKEEP), + STRING_TO_ENUM_CASE(LOWERCASE), + STRING_TO_ENUM_CASE(UPPERCASE)}; + + auto it = sActionStrings.find(action); + if (it != sActionStrings.end()) { return it->second; } return Action::UNDEFINED; } const std::string& ActionToString(Action action) { - static std::map actionStrings{ENUM_TO_STRING_CASE(REPLACE), - ENUM_TO_STRING_CASE(KEEP), - ENUM_TO_STRING_CASE(DROP), - ENUM_TO_STRING_CASE(KEEPEQUAL), - ENUM_TO_STRING_CASE(DROPEQUAL), - ENUM_TO_STRING_CASE(HASHMOD), - ENUM_TO_STRING_CASE(LABELMAP), - ENUM_TO_STRING_CASE(LABELDROP), - ENUM_TO_STRING_CASE(LABELKEEP), - ENUM_TO_STRING_CASE(LOWERCASE), - ENUM_TO_STRING_CASE(UPPERCASE)}; - static string undefined = prometheus::UNDEFINED; - auto it = actionStrings.find(action); - if (it != actionStrings.end()) { + static std::map sActionStrings{ENUM_TO_STRING_CASE(REPLACE), + ENUM_TO_STRING_CASE(KEEP), + ENUM_TO_STRING_CASE(DROP), + ENUM_TO_STRING_CASE(KEEPEQUAL), + ENUM_TO_STRING_CASE(DROPEQUAL), + ENUM_TO_STRING_CASE(HASHMOD), + ENUM_TO_STRING_CASE(LABELMAP), + ENUM_TO_STRING_CASE(LABELDROP), + ENUM_TO_STRING_CASE(LABELKEEP), + ENUM_TO_STRING_CASE(LOWERCASE), + ENUM_TO_STRING_CASE(UPPERCASE)}; + static string sUndefined = prometheus::UNDEFINED; + auto it = sActionStrings.find(action); + if (it != sActionStrings.end()) { return it->second; } - return undefined; + return sUndefined; } - -RelabelConfig::RelabelConfig() { +RelabelConfig::RelabelConfig() : mSeparator(";"), mReplacement("$1"), mAction(Action::REPLACE) { + mRegex = boost::regex("().*"); } - -RelabelConfig::RelabelConfig(const Json::Value& config) { +bool RelabelConfig::Init(const Json::Value& config) { string errorMsg; if (config.isMember(prometheus::SOURCE_LABELS) && config[prometheus::SOURCE_LABELS].isArray()) { @@ -113,90 +113,63 @@ RelabelConfig::RelabelConfig(const Json::Value& config) { if (config.isMember(prometheus::MODULUS) && config[prometheus::MODULUS].isUInt64()) { mModulus = config[prometheus::MODULUS].asUInt64(); } -} - -bool RelabelConfig::Validate() { - return true; -} - -bool prometheus::Process(const Labels& lbls, const std::vector& cfgs, Labels& ret) { - auto lb = LabelsBuilder(); - lb.Reset(lbls); - if (!ProcessBuilder(lb, cfgs)) { - ret = Labels(); - return false; - } - ret = lb.GetLabels(); return true; } -bool prometheus::ProcessBuilder(LabelsBuilder& lb, const std::vector& cfgs) { - for (const RelabelConfig& cfg : cfgs) { - bool keep = Relabel(cfg, lb); - if (!keep) { - return false; - } - } - return true; -} - -bool prometheus::Relabel(const RelabelConfig& cfg, LabelsBuilder& lb) { +bool RelabelConfig::Process(Labels& l) const { vector values; - for (auto item : cfg.mSourceLabels) { - values.push_back(lb.Get(item)); + values.reserve(mSourceLabels.size()); + for (const auto& item : mSourceLabels) { + values.push_back(l.Get(item)); } - string val = boost::algorithm::join(values, cfg.mSeparator); + string val = boost::algorithm::join(values, mSeparator); - switch (cfg.mAction) { + switch (mAction) { case Action::DROP: { - if (boost::regex_match(val, cfg.mRegex)) { + if (boost::regex_match(val, mRegex)) { return false; } break; } case Action::KEEP: { - if (!boost::regex_match(val, cfg.mRegex)) { + if (!boost::regex_match(val, mRegex)) { return false; } break; } case Action::DROPEQUAL: { - if (lb.Get(cfg.mTargetLabel) == val) { + if (l.Get(mTargetLabel) == val) { return false; } break; } case Action::KEEPEQUAL: { - if (lb.Get(cfg.mTargetLabel) != val) { + if (l.Get(mTargetLabel) != val) { return false; } break; } case Action::REPLACE: { - bool indexes = boost::regex_search(val, cfg.mRegex); + bool indexes = boost::regex_search(val, mRegex); // If there is no match no replacement must take place. if (!indexes) { break; } - LabelName target - = LabelName(boost::regex_replace(val, cfg.mRegex, cfg.mTargetLabel, boost::format_first_only)); - if (!target.Validate()) { - break; - } - string res = boost::regex_replace(val, cfg.mRegex, cfg.mReplacement, boost::format_first_only); + string target = string(boost::regex_replace(val, mRegex, mTargetLabel, boost::format_first_only)); + string res = boost::regex_replace(val, mRegex, mReplacement, boost::format_first_only); if (res.size() == 0) { - lb.DeleteLabel(target.mLabelName); + l.Del(target); break; } - lb.Set(target.mLabelName, string(res)); + l.Set(target, string(res)); break; } case Action::LOWERCASE: { - lb.Set(cfg.mTargetLabel, boost::to_lower_copy(val)); + l.Set(mTargetLabel, boost::to_lower_copy(val)); break; } case Action::UPPERCASE: { - lb.Set(cfg.mTargetLabel, boost::to_upper_copy(val)); + l.Set(mTargetLabel, boost::to_upper_copy(val)); break; } case Action::HASHMOD: { @@ -207,49 +180,84 @@ bool prometheus::Relabel(const RelabelConfig& cfg, LabelsBuilder& lb) { for (int i = 8; i < MD5_DIGEST_LENGTH; ++i) { hashVal = (hashVal << 8) | digest[i]; } - uint64_t mod = hashVal % cfg.mModulus; - lb.Set(cfg.mTargetLabel, to_string(mod)); + uint64_t mod = hashVal % mModulus; + l.Set(mTargetLabel, to_string(mod)); break; } case Action::LABELMAP: { - lb.Range([&cfg, &lb](Label label) { - if (boost::regex_match(label.name, cfg.mRegex)) { - string res = boost::regex_replace( - label.name, cfg.mRegex, cfg.mReplacement, boost::match_default | boost::format_all); - lb.Set(res, label.value); + l.Range([&](const string& key, const string& value) { + if (boost::regex_match(key, mRegex)) { + string res + = boost::regex_replace(key, mRegex, mReplacement, boost::match_default | boost::format_all); + l.Set(res, value); } }); break; } case Action::LABELDROP: { - lb.Range([&cfg, &lb](Label label) { - if (boost::regex_match(label.name, cfg.mRegex)) { - lb.DeleteLabel(label.name); + vector toDel; + l.Range([&](const string& key, const string& value) { + if (boost::regex_match(key, mRegex)) { + toDel.push_back(key); } }); + for (const auto& item : toDel) { + l.Del(item); + } break; } case Action::LABELKEEP: { - lb.Range([&cfg, &lb](Label label) { - if (!boost::regex_match(label.name, cfg.mRegex)) { - lb.DeleteLabel(label.name); + vector toDel; + l.Range([&](const string& key, const string& value) { + if (!boost::regex_match(key, mRegex)) { + toDel.push_back(key); } }); + for (const auto& item : toDel) { + l.Del(item); + } break; } default: // error - LOG_ERROR(sLogger, ("relabel: unknown relabel action type", ActionToString(cfg.mAction))); + LOG_ERROR(sLogger, ("relabel: unknown relabel action type", ActionToString(mAction))); break; } return true; } -LabelName::LabelName() { -} -LabelName::LabelName(std::string labelName) : mLabelName(labelName) { + +bool RelabelConfigList::Init(const Json::Value& relabelConfigs) { + if (!relabelConfigs.isArray()) { + return false; + } + for (const auto& relabelConfig : relabelConfigs) { + RelabelConfig rc; + if (rc.Init(relabelConfig)) { + mRelabelConfigs.push_back(rc); + } else { + return false; + } + } + return true; } -bool LabelName::Validate() { +bool RelabelConfigList::Process(Labels& l) const { + for (const auto& cfg : mRelabelConfigs) { + if (!cfg.Process(l)) { + return false; + } + } return true; } + +bool RelabelConfigList::Process(MetricEvent& event) const { + Labels labels; + labels.Reset(&event); + return Process(labels); +} + +bool RelabelConfigList::Empty() const { + return mRelabelConfigs.empty(); +} + } // namespace logtail diff --git a/core/prometheus/labels/Relabel.h b/core/prometheus/labels/Relabel.h index 7a82cab806..f93eeddd0b 100644 --- a/core/prometheus/labels/Relabel.h +++ b/core/prometheus/labels/Relabel.h @@ -40,26 +40,13 @@ enum class Action { }; const std::string& ActionToString(Action action); -Action StringToAction(std::string action); - -class LabelName { -public: - LabelName(); - LabelName(std::string); - - bool Validate(); - - std::string mLabelName; - -private: -}; +Action StringToAction(const std::string& action); class RelabelConfig { public: RelabelConfig(); - RelabelConfig(const Json::Value&); - - bool Validate(); + bool Init(const Json::Value&); + bool Process(Labels&) const; // A list of labels from which values are taken and concatenated // with the configured separator in order. @@ -81,12 +68,23 @@ class RelabelConfig { private: }; +class RelabelConfigList { +public: + bool Init(const Json::Value& relabelConfigs); + bool Process(MetricEvent&) const; + bool Process(Labels&) const; + + [[nodiscard]] bool Empty() const; -namespace prometheus { - bool Process(const Labels& lbls, const std::vector& cfgs, Labels& ret); - bool ProcessBuilder(LabelsBuilder& lb, const std::vector& cfgs); - bool Relabel(const RelabelConfig& cfg, LabelsBuilder& lb); -} // namespace prometheus +private: + std::vector mRelabelConfigs; + +#ifdef APSARA_UNIT_TEST_MAIN + friend class RelabelConfigListTest; + friend class InputPrometheusUnittest; + friend class ScrapeConfigUnittest; +#endif +}; } // namespace logtail diff --git a/core/prometheus/labels/TextParser.cpp b/core/prometheus/labels/TextParser.cpp index 03329cd244..67b7b335bb 100644 --- a/core/prometheus/labels/TextParser.cpp +++ b/core/prometheus/labels/TextParser.cpp @@ -40,7 +40,16 @@ bool IsValidNumberChar(char c) { return sValidChars.count(c); }; -PipelineEventGroup TextParser::Parse(const string& content, uint64_t defaultTimestamp, uint32_t defaultNanoTs) { +TextParser::TextParser(bool honorTimestamps) : mHonorTimestamps(honorTimestamps) { +} + +void TextParser::SetDefaultTimestamp(uint64_t defaultTimestamp, uint32_t defaultNanoSec) { + mDefaultTimestamp = defaultTimestamp; + mDefaultNanoTimestamp = defaultNanoSec; +} + +PipelineEventGroup TextParser::Parse(const string& content, uint64_t defaultTimestamp, uint32_t defaultNanoSec) { + SetDefaultTimestamp(defaultTimestamp, defaultNanoSec); auto eGroup = PipelineEventGroup(make_shared()); vector lines; // pre-reserve vector size by 1024 which is experience value per line @@ -51,7 +60,7 @@ PipelineEventGroup TextParser::Parse(const string& content, uint64_t defaultTime continue; } auto metricEvent = eGroup.CreateMetricEvent(); - if (ParseLine(line, defaultTimestamp, defaultNanoTs, *metricEvent)) { + if (ParseLine(line, *metricEvent)) { eGroup.MutableEvents().emplace_back(std::move(metricEvent)); } } @@ -77,19 +86,12 @@ PipelineEventGroup TextParser::BuildLogGroup(const string& content) { return eGroup; } -bool TextParser::ParseLine(StringView line, - uint64_t defaultTimestamp, - uint32_t defaultNanoTs, - MetricEvent& metricEvent) { +bool TextParser::ParseLine(StringView line, MetricEvent& metricEvent) { mLine = line; mPos = 0; mState = TextState::Start; mLabelName.clear(); mTokenLength = 0; - if (defaultTimestamp > 0) { - mTimestamp = defaultTimestamp; - mNanoTimestamp = defaultNanoTs; - } HandleStart(metricEvent); @@ -282,8 +284,8 @@ void TextParser::HandleSampleValue(MetricEvent& metricEvent) { metricEvent.SetValue(mSampleValue); mTokenLength = 0; SkipLeadingWhitespace(); - if (mPos == mLine.size() || mLine[mPos] == '#') { - metricEvent.SetTimestamp(mTimestamp, mNanoTimestamp); + if (mPos == mLine.size() || mLine[mPos] == '#' || !mHonorTimestamps) { + metricEvent.SetTimestamp(mDefaultTimestamp, mDefaultNanoTimestamp); mState = TextState::Done; } else { HandleTimestamp(metricEvent); @@ -329,7 +331,11 @@ void TextParser::HandleTimestamp(MetricEvent& metricEvent) { } time_t timestamp = (int64_t)milliTimestamp / 1000; auto ns = ((int64_t)milliTimestamp % 1000) * 1000000; - metricEvent.SetTimestamp(timestamp, ns); + if (mHonorTimestamps) { + metricEvent.SetTimestamp(timestamp, ns); + } else { + metricEvent.SetTimestamp(mDefaultTimestamp, mDefaultNanoTimestamp); + } mTokenLength = 0; diff --git a/core/prometheus/labels/TextParser.h b/core/prometheus/labels/TextParser.h index b0f158248e..e8f7b86bd4 100644 --- a/core/prometheus/labels/TextParser.h +++ b/core/prometheus/labels/TextParser.h @@ -28,11 +28,14 @@ enum class TextState { Start, Done, Error }; class TextParser { public: TextParser() = default; + explicit TextParser(bool honorTimestamps); - PipelineEventGroup Parse(const std::string& content, uint64_t defaultTimestamp, uint32_t defaultNanoTs); + void SetDefaultTimestamp(uint64_t defaultTimestamp, uint32_t defaultNanoSec); + + PipelineEventGroup Parse(const std::string& content, uint64_t defaultTimestamp, uint32_t defaultNanoSec); PipelineEventGroup BuildLogGroup(const std::string& content); - bool ParseLine(StringView line, uint64_t defaultTimestamp, uint32_t defaultNanoTs, MetricEvent& metricEvent); + bool ParseLine(StringView line, MetricEvent& metricEvent); private: void HandleError(const std::string& errMsg); @@ -57,11 +60,13 @@ class TextParser { StringView mLabelName; std::string mEscapedLabelValue; double mSampleValue{0.0}; - time_t mTimestamp{0}; - uint32_t mNanoTimestamp{0}; std::size_t mTokenLength{0}; std::string mDoubleStr; + bool mHonorTimestamps{true}; + time_t mDefaultTimestamp{0}; + uint32_t mDefaultNanoTimestamp{0}; + #ifdef APSARA_UNIT_TEST_MAIN friend class TextParserUnittest; #endif diff --git a/core/prometheus/schedulers/BaseScheduler.cpp b/core/prometheus/schedulers/BaseScheduler.cpp index c18d542d37..db7de4ae79 100644 --- a/core/prometheus/schedulers/BaseScheduler.cpp +++ b/core/prometheus/schedulers/BaseScheduler.cpp @@ -3,14 +3,20 @@ namespace logtail { void BaseScheduler::ExecDone() { mExecCount++; + mLatestExecTime = mFirstExecTime + std::chrono::seconds(mExecCount * mInterval); } std::chrono::steady_clock::time_point BaseScheduler::GetNextExecTime() { - return mFirstExecTime + std::chrono::seconds(mExecCount * mInterval); + return mLatestExecTime; } void BaseScheduler::SetFirstExecTime(std::chrono::steady_clock::time_point firstExecTime) { mFirstExecTime = firstExecTime; + mLatestExecTime = mFirstExecTime; +} + +void BaseScheduler::DelayExecTime(uint64_t delaySeconds) { + mLatestExecTime = mLatestExecTime + std::chrono::seconds(delaySeconds); } void BaseScheduler::Cancel() { diff --git a/core/prometheus/schedulers/BaseScheduler.h b/core/prometheus/schedulers/BaseScheduler.h index 2a83bb29a1..4203d8d730 100644 --- a/core/prometheus/schedulers/BaseScheduler.h +++ b/core/prometheus/schedulers/BaseScheduler.h @@ -3,6 +3,7 @@ #include +#include "common/http/HttpResponse.h" #include "prometheus/async/PromFuture.h" namespace logtail { @@ -18,6 +19,7 @@ class BaseScheduler { std::chrono::steady_clock::time_point GetNextExecTime(); void SetFirstExecTime(std::chrono::steady_clock::time_point firstExecTime); + void DelayExecTime(uint64_t delaySeconds); virtual void Cancel(); @@ -25,11 +27,13 @@ class BaseScheduler { bool IsCancelled(); std::chrono::steady_clock::time_point mFirstExecTime; + std::chrono::steady_clock::time_point mLatestExecTime; int64_t mExecCount = 0; int64_t mInterval = 0; ReadWriteLock mLock; bool mValidState = true; - std::shared_ptr mFuture; + std::shared_ptr> mFuture; + std::shared_ptr> mIsContextValidFuture; }; } // namespace logtail \ No newline at end of file diff --git a/core/prometheus/schedulers/ScrapeConfig.cpp b/core/prometheus/schedulers/ScrapeConfig.cpp index ff6ee77b15..027c534226 100644 --- a/core/prometheus/schedulers/ScrapeConfig.cpp +++ b/core/prometheus/schedulers/ScrapeConfig.cpp @@ -19,31 +19,18 @@ ScrapeConfig::ScrapeConfig() : mScrapeIntervalSeconds(60), mScrapeTimeoutSeconds(10), mMetricsPath("/metrics"), + mHonorLabels(false), + mHonorTimestamps(true), mScheme("http"), - mMaxScrapeSizeBytes(-1), - mSampleLimit(-1), - mSeriesLimit(-1) { + mMaxScrapeSizeBytes(0), + mSampleLimit(0), + mSeriesLimit(0) { } bool ScrapeConfig::Init(const Json::Value& scrapeConfig) { - if (scrapeConfig.isMember(prometheus::JOB_NAME) && scrapeConfig[prometheus::JOB_NAME].isString()) { - mJobName = scrapeConfig[prometheus::JOB_NAME].asString(); - if (mJobName.empty()) { - LOG_ERROR(sLogger, ("job name is empty", "")); - return false; - } - } else { + if (!InitStaticConfig(scrapeConfig)) { return false; } - - if (scrapeConfig.isMember(prometheus::SCRAPE_INTERVAL) && scrapeConfig[prometheus::SCRAPE_INTERVAL].isString()) { - string tmpScrapeIntervalString = scrapeConfig[prometheus::SCRAPE_INTERVAL].asString(); - mScrapeIntervalSeconds = DurationToSecond(tmpScrapeIntervalString); - } - if (scrapeConfig.isMember(prometheus::SCRAPE_TIMEOUT) && scrapeConfig[prometheus::SCRAPE_TIMEOUT].isString()) { - string tmpScrapeTimeoutString = scrapeConfig[prometheus::SCRAPE_TIMEOUT].asString(); - mScrapeTimeoutSeconds = DurationToSecond(tmpScrapeTimeoutString); - } if (scrapeConfig.isMember(prometheus::SCRAPE_PROTOCOLS) && scrapeConfig[prometheus::SCRAPE_PROTOCOLS].isArray()) { if (!InitScrapeProtocols(scrapeConfig[prometheus::SCRAPE_PROTOCOLS])) { LOG_ERROR(sLogger, ("scrape protocol config error", scrapeConfig[prometheus::SCRAPE_PROTOCOLS])); @@ -56,16 +43,9 @@ bool ScrapeConfig::Init(const Json::Value& scrapeConfig) { if (scrapeConfig.isMember(prometheus::ENABLE_COMPRESSION) && scrapeConfig[prometheus::ENABLE_COMPRESSION].isBool()) { - InitEnableCompression(scrapeConfig[prometheus::ENABLE_COMPRESSION].asBool()); + // InitEnableCompression(scrapeConfig[prometheus::ENABLE_COMPRESSION].asBool()); } else { - InitEnableCompression(true); - } - - if (scrapeConfig.isMember(prometheus::METRICS_PATH) && scrapeConfig[prometheus::METRICS_PATH].isString()) { - mMetricsPath = scrapeConfig[prometheus::METRICS_PATH].asString(); - } - if (scrapeConfig.isMember(prometheus::SCHEME) && scrapeConfig[prometheus::SCHEME].isString()) { - mScheme = scrapeConfig[prometheus::SCHEME].asString(); + // InitEnableCompression(true); } // basic auth, authorization, oauth2 @@ -87,71 +67,22 @@ bool ScrapeConfig::Init(const Json::Value& scrapeConfig) { } } - // : a size in bytes, e.g. 512MB. A unit is required. Supported units: B, KB, MB, GB, TB, PB, EB. - if (scrapeConfig.isMember(prometheus::MAX_SCRAPE_SIZE) && scrapeConfig[prometheus::MAX_SCRAPE_SIZE].isString()) { - string tmpMaxScrapeSize = scrapeConfig[prometheus::MAX_SCRAPE_SIZE].asString(); - if (tmpMaxScrapeSize.empty()) { - mMaxScrapeSizeBytes = -1; - } else if (EndWith(tmpMaxScrapeSize, "KiB") || EndWith(tmpMaxScrapeSize, "K") - || EndWith(tmpMaxScrapeSize, "KB")) { - tmpMaxScrapeSize = tmpMaxScrapeSize.substr(0, tmpMaxScrapeSize.find('K')); - mMaxScrapeSizeBytes = stoll(tmpMaxScrapeSize) * 1024; - } else if (EndWith(tmpMaxScrapeSize, "MiB") || EndWith(tmpMaxScrapeSize, "M") - || EndWith(tmpMaxScrapeSize, "MB")) { - tmpMaxScrapeSize = tmpMaxScrapeSize.substr(0, tmpMaxScrapeSize.find('M')); - mMaxScrapeSizeBytes = stoll(tmpMaxScrapeSize) * 1024 * 1024; - } else if (EndWith(tmpMaxScrapeSize, "GiB") || EndWith(tmpMaxScrapeSize, "G") - || EndWith(tmpMaxScrapeSize, "GB")) { - tmpMaxScrapeSize = tmpMaxScrapeSize.substr(0, tmpMaxScrapeSize.find('G')); - mMaxScrapeSizeBytes = stoll(tmpMaxScrapeSize) * 1024 * 1024 * 1024; - } else if (EndWith(tmpMaxScrapeSize, "TiB") || EndWith(tmpMaxScrapeSize, "T") - || EndWith(tmpMaxScrapeSize, "TB")) { - tmpMaxScrapeSize = tmpMaxScrapeSize.substr(0, tmpMaxScrapeSize.find('T')); - mMaxScrapeSizeBytes = stoll(tmpMaxScrapeSize) * 1024 * 1024 * 1024 * 1024; - } else if (EndWith(tmpMaxScrapeSize, "PiB") || EndWith(tmpMaxScrapeSize, "P") - || EndWith(tmpMaxScrapeSize, "PB")) { - tmpMaxScrapeSize = tmpMaxScrapeSize.substr(0, tmpMaxScrapeSize.find('P')); - mMaxScrapeSizeBytes = stoll(tmpMaxScrapeSize) * 1024 * 1024 * 1024 * 1024 * 1024; - } else if (EndWith(tmpMaxScrapeSize, "EiB") || EndWith(tmpMaxScrapeSize, "E") - || EndWith(tmpMaxScrapeSize, "EB")) { - tmpMaxScrapeSize = tmpMaxScrapeSize.substr(0, tmpMaxScrapeSize.find('E')); - mMaxScrapeSizeBytes = stoll(tmpMaxScrapeSize) * 1024 * 1024 * 1024 * 1024 * 1024 * 1024; - } else if (EndWith(tmpMaxScrapeSize, "B")) { - tmpMaxScrapeSize = tmpMaxScrapeSize.substr(0, tmpMaxScrapeSize.find('B')); - mMaxScrapeSizeBytes = stoll(tmpMaxScrapeSize); - } - } - - if (scrapeConfig.isMember(prometheus::SAMPLE_LIMIT) && scrapeConfig[prometheus::SAMPLE_LIMIT].isInt64()) { - mSampleLimit = scrapeConfig[prometheus::SAMPLE_LIMIT].asInt64(); - } - if (scrapeConfig.isMember(prometheus::SERIES_LIMIT) && scrapeConfig[prometheus::SERIES_LIMIT].isInt64()) { - mSeriesLimit = scrapeConfig[prometheus::SERIES_LIMIT].asInt64(); - } if (scrapeConfig.isMember(prometheus::PARAMS) && scrapeConfig[prometheus::PARAMS].isObject()) { const Json::Value& params = scrapeConfig[prometheus::PARAMS]; - if (params.isObject()) { - for (const auto& key : params.getMemberNames()) { - const Json::Value& values = params[key]; - if (values.isArray()) { - vector valueList; - for (const auto& value : values) { - valueList.push_back(value.asString()); - } - mParams[key] = valueList; + for (const auto& key : params.getMemberNames()) { + const Json::Value& values = params[key]; + if (values.isArray()) { + vector valueList; + for (const auto& value : values) { + valueList.push_back(value.asString()); } + mParams[key] = valueList; } } } - for (const auto& relabelConfig : scrapeConfig[prometheus::RELABEL_CONFIGS]) { - mRelabelConfigs.emplace_back(relabelConfig); - } - // build query string - for (auto it = mParams.begin(); it != mParams.end(); ++it) { - const auto& key = it->first; - const auto& values = it->second; + for (auto& [key, values] : mParams) { for (const auto& value : values) { if (!mQueryString.empty()) { mQueryString += "&"; @@ -165,6 +96,82 @@ bool ScrapeConfig::Init(const Json::Value& scrapeConfig) { return true; } +bool ScrapeConfig::InitStaticConfig(const Json::Value& scrapeConfig) { + if (scrapeConfig.isMember(prometheus::JOB_NAME) && scrapeConfig[prometheus::JOB_NAME].isString()) { + mJobName = scrapeConfig[prometheus::JOB_NAME].asString(); + if (mJobName.empty()) { + LOG_ERROR(sLogger, ("job name is empty", "")); + return false; + } + } else { + return false; + } + + if (scrapeConfig.isMember(prometheus::SCRAPE_INTERVAL) && scrapeConfig[prometheus::SCRAPE_INTERVAL].isString()) { + string tmpScrapeIntervalString = scrapeConfig[prometheus::SCRAPE_INTERVAL].asString(); + mScrapeIntervalSeconds = DurationToSecond(tmpScrapeIntervalString); + if (mScrapeIntervalSeconds == 0) { + LOG_ERROR(sLogger, ("scrape interval is invalid", tmpScrapeIntervalString)); + return false; + } + } + if (scrapeConfig.isMember(prometheus::SCRAPE_TIMEOUT) && scrapeConfig[prometheus::SCRAPE_TIMEOUT].isString()) { + string tmpScrapeTimeoutString = scrapeConfig[prometheus::SCRAPE_TIMEOUT].asString(); + mScrapeTimeoutSeconds = DurationToSecond(tmpScrapeTimeoutString); + if (mScrapeTimeoutSeconds == 0) { + LOG_ERROR(sLogger, ("scrape timeout is invalid", tmpScrapeTimeoutString)); + return false; + } + } + if (scrapeConfig.isMember(prometheus::METRICS_PATH) && scrapeConfig[prometheus::METRICS_PATH].isString()) { + mMetricsPath = scrapeConfig[prometheus::METRICS_PATH].asString(); + } + + if (scrapeConfig.isMember(prometheus::HONOR_LABELS) && scrapeConfig[prometheus::HONOR_LABELS].isBool()) { + mHonorLabels = scrapeConfig[prometheus::HONOR_LABELS].asBool(); + } + + if (scrapeConfig.isMember(prometheus::HONOR_TIMESTAMPS) && scrapeConfig[prometheus::HONOR_TIMESTAMPS].isBool()) { + mHonorTimestamps = scrapeConfig[prometheus::HONOR_TIMESTAMPS].asBool(); + } + + if (scrapeConfig.isMember(prometheus::SCHEME) && scrapeConfig[prometheus::SCHEME].isString()) { + mScheme = scrapeConfig[prometheus::SCHEME].asString(); + } + + // : a size in bytes, e.g. 512MB. A unit is required. Supported units: B, KB, MB, GB, TB, PB, EB. + if (scrapeConfig.isMember(prometheus::MAX_SCRAPE_SIZE) && scrapeConfig[prometheus::MAX_SCRAPE_SIZE].isString()) { + string tmpMaxScrapeSize = scrapeConfig[prometheus::MAX_SCRAPE_SIZE].asString(); + mMaxScrapeSizeBytes = SizeToByte(tmpMaxScrapeSize); + if (mMaxScrapeSizeBytes == 0) { + LOG_ERROR(sLogger, ("max scrape size is invalid", tmpMaxScrapeSize)); + return false; + } + } + + if (scrapeConfig.isMember(prometheus::SAMPLE_LIMIT) && scrapeConfig[prometheus::SAMPLE_LIMIT].isInt64()) { + mSampleLimit = scrapeConfig[prometheus::SAMPLE_LIMIT].asUInt64(); + } + if (scrapeConfig.isMember(prometheus::SERIES_LIMIT) && scrapeConfig[prometheus::SERIES_LIMIT].isInt64()) { + mSeriesLimit = scrapeConfig[prometheus::SERIES_LIMIT].asUInt64(); + } + + if (scrapeConfig.isMember(prometheus::RELABEL_CONFIGS)) { + if (!mRelabelConfigs.Init(scrapeConfig[prometheus::RELABEL_CONFIGS])) { + LOG_ERROR(sLogger, ("relabel config error", "")); + return false; + } + } + + if (scrapeConfig.isMember(prometheus::METRIC_RELABEL_CONFIGS)) { + if (!mMetricRelabelConfigs.Init(scrapeConfig[prometheus::METRIC_RELABEL_CONFIGS])) { + LOG_ERROR(sLogger, ("metric relabel config error", "")); + return false; + } + } + return true; +} + bool ScrapeConfig::InitBasicAuth(const Json::Value& basicAuth) { string username; string usernameFile; diff --git a/core/prometheus/schedulers/ScrapeConfig.h b/core/prometheus/schedulers/ScrapeConfig.h index eac6d9615a..4af52c39f0 100644 --- a/core/prometheus/schedulers/ScrapeConfig.h +++ b/core/prometheus/schedulers/ScrapeConfig.h @@ -19,6 +19,8 @@ class ScrapeConfig { int64_t mScrapeIntervalSeconds; int64_t mScrapeTimeoutSeconds; std::string mMetricsPath; + bool mHonorLabels; + bool mHonorTimestamps; std::string mScheme; // auth header @@ -26,10 +28,11 @@ class ScrapeConfig { // enable_compression Accept-Encoding header: gzip, identity std::map mRequestHeaders; - int64_t mMaxScrapeSizeBytes; - int64_t mSampleLimit; - int64_t mSeriesLimit; - std::vector mRelabelConfigs; + uint64_t mMaxScrapeSizeBytes; + uint64_t mSampleLimit; + uint64_t mSeriesLimit; + RelabelConfigList mRelabelConfigs; + RelabelConfigList mMetricRelabelConfigs; std::map> mParams; @@ -37,6 +40,7 @@ class ScrapeConfig { ScrapeConfig(); bool Init(const Json::Value& config); + bool InitStaticConfig(const Json::Value& config); private: bool InitBasicAuth(const Json::Value& basicAuth); diff --git a/core/prometheus/schedulers/ScrapeScheduler.cpp b/core/prometheus/schedulers/ScrapeScheduler.cpp index f5013eebea..31a1d91c05 100644 --- a/core/prometheus/schedulers/ScrapeScheduler.cpp +++ b/core/prometheus/schedulers/ScrapeScheduler.cpp @@ -21,17 +21,18 @@ #include #include -#include "Common.h" #include "common/StringTools.h" #include "common/TimeUtil.h" #include "common/timer/HttpRequestTimerEvent.h" #include "common/timer/Timer.h" #include "logger/Logger.h" -#include "prometheus/Constants.h" -#include "prometheus/async/PromHttpRequest.h" #include "pipeline/queue/ProcessQueueItem.h" #include "pipeline/queue/ProcessQueueManager.h" #include "pipeline/queue/QueueKey.h" +#include "prometheus/Constants.h" +#include "prometheus/async/PromFuture.h" +#include "prometheus/async/PromHttpRequest.h" +#include "sdk/Common.h" using namespace std; @@ -46,13 +47,13 @@ ScrapeScheduler::ScrapeScheduler(std::shared_ptr scrapeConfigPtr, : mScrapeConfigPtr(std::move(scrapeConfigPtr)), mHost(std::move(host)), mPort(port), - mLabels(std::move(labels)), + mTargetLabels(std::move(labels)), mQueueKey(queueKey), mInputIndex(inputIndex) { string tmpTargetURL = mScrapeConfigPtr->mScheme + "://" + mHost + ":" + ToString(mPort) + mScrapeConfigPtr->mMetricsPath + (mScrapeConfigPtr->mQueryString.empty() ? "" : "?" + mScrapeConfigPtr->mQueryString); - mHash = mScrapeConfigPtr->mJobName + tmpTargetURL + ToString(mLabels.Hash()); + mHash = mScrapeConfigPtr->mJobName + tmpTargetURL + ToString(mTargetLabels.Hash()); mInstance = mHost + ":" + ToString(mPort); mInterval = mScrapeConfigPtr->mScrapeIntervalSeconds; @@ -60,6 +61,11 @@ ScrapeScheduler::ScrapeScheduler(std::shared_ptr scrapeConfigPtr, } void ScrapeScheduler::OnMetricResult(const HttpResponse& response, uint64_t timestampMilliSec) { + mSelfMonitor->AddCounter(METRIC_PLUGIN_OUT_EVENTS_TOTAL, response.mStatusCode); + mSelfMonitor->AddCounter(METRIC_PLUGIN_OUT_SIZE_BYTES, response.mStatusCode, response.mBody.size()); + mSelfMonitor->AddCounter( + METRIC_PLUGIN_PROM_SCRAPE_TIME_MS, response.mStatusCode, GetCurrentTimeInMilliSeconds() - timestampMilliSec); + mScrapeTimestampMilliSec = timestampMilliSec; mScrapeDurationSeconds = 1.0 * (GetCurrentTimeInMilliSeconds() - timestampMilliSec) / 1000; mScrapeResponseSizeBytes = response.mBody.size(); @@ -76,17 +82,22 @@ void ScrapeScheduler::OnMetricResult(const HttpResponse& response, uint64_t time auto eventGroup = BuildPipelineEventGroup(response.mBody); SetAutoMetricMeta(eventGroup); + SetTargetLabels(eventGroup); PushEventGroup(std::move(eventGroup)); + mPluginTotalDelayMs->Add(GetCurrentTimeInMilliSeconds() - timestampMilliSec); } void ScrapeScheduler::SetAutoMetricMeta(PipelineEventGroup& eGroup) { eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_TIMESTAMP_MILLISEC, ToString(mScrapeTimestampMilliSec)); eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_DURATION, ToString(mScrapeDurationSeconds)); eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_RESPONSE_SIZE, ToString(mScrapeResponseSizeBytes)); - eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_INSTANCE, mInstance); eGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_UP_STATE, ToString(mUpState)); } +void ScrapeScheduler::SetTargetLabels(PipelineEventGroup& eGroup) { + mTargetLabels.Range([&eGroup](const std::string& key, const std::string& value) { eGroup.SetTag(key, value); }); +} + PipelineEventGroup ScrapeScheduler::BuildPipelineEventGroup(const std::string& content) { return mParser->BuildLogGroup(content); } @@ -95,8 +106,14 @@ void ScrapeScheduler::PushEventGroup(PipelineEventGroup&& eGroup) { auto item = make_unique(std::move(eGroup), mInputIndex); #ifdef APSARA_UNIT_TEST_MAIN mItem.push_back(std::move(item)); + return; #endif - ProcessQueueManager::GetInstance()->PushQueue(mQueueKey, std::move(item)); + while (true) { + if (ProcessQueueManager::GetInstance()->PushQueue(mQueueKey, std::move(item)) == 0) { + break; + } + usleep(10 * 1000); + } } string ScrapeScheduler::GetId() const { @@ -104,21 +121,35 @@ string ScrapeScheduler::GetId() const { } void ScrapeScheduler::ScheduleNext() { - auto future = std::make_shared(); + auto future = std::make_shared>(); + auto isContextValidFuture = std::make_shared>(); future->AddDoneCallback([this](const HttpResponse& response, uint64_t timestampMilliSec) { this->OnMetricResult(response, timestampMilliSec); this->ExecDone(); this->ScheduleNext(); + return true; + }); + isContextValidFuture->AddDoneCallback([this]() -> bool { + if (ProcessQueueManager::GetInstance()->IsValidToPush(mQueueKey)) { + return true; + } else { + this->DelayExecTime(1); + this->mPromDelayTotal->Add(1); + this->ScheduleNext(); + return false; + } }); if (IsCancelled()) { mFuture->Cancel(); + mIsContextValidFuture->Cancel(); return; } { WriteLock lock(mLock); mFuture = future; + mIsContextValidFuture = isContextValidFuture; } auto event = BuildScrapeTimerEvent(GetNextExecTime()); @@ -126,9 +157,10 @@ void ScrapeScheduler::ScheduleNext() { } void ScrapeScheduler::ScrapeOnce(std::chrono::steady_clock::time_point execTime) { - auto future = std::make_shared(); + auto future = std::make_shared>(); future->AddDoneCallback([this](const HttpResponse& response, uint64_t timestampMilliSec) { this->OnMetricResult(response, timestampMilliSec); + return true; }); mFuture = future; auto event = BuildScrapeTimerEvent(execTime); @@ -149,15 +181,19 @@ std::unique_ptr ScrapeScheduler::BuildScrapeTimerEvent(std::chrono:: mScrapeConfigPtr->mScrapeTimeoutSeconds, mScrapeConfigPtr->mScrapeIntervalSeconds / mScrapeConfigPtr->mScrapeTimeoutSeconds, - this->mFuture); + this->mFuture, + this->mIsContextValidFuture); auto timerEvent = std::make_unique(execTime, std::move(request)); return timerEvent; } void ScrapeScheduler::Cancel() { - if (mFuture) { + if (mFuture != nullptr) { mFuture->Cancel(); } + if (mIsContextValidFuture != nullptr) { + mIsContextValidFuture->Cancel(); + } { WriteLock lock(mLock); mValidState = false; @@ -167,4 +203,23 @@ void ScrapeScheduler::Cancel() { void ScrapeScheduler::SetTimer(std::shared_ptr timer) { mTimer = std::move(timer); } + +void ScrapeScheduler::InitSelfMonitor(const MetricLabels& defaultLabels) { + mSelfMonitor = std::make_shared(); + MetricLabels labels = defaultLabels; + labels.emplace_back(METRIC_LABEL_KEY_INSTANCE, mInstance); + + static const std::unordered_map sScrapeMetricKeys = { + {METRIC_PLUGIN_OUT_EVENTS_TOTAL, MetricType::METRIC_TYPE_COUNTER}, + {METRIC_PLUGIN_OUT_SIZE_BYTES, MetricType::METRIC_TYPE_COUNTER}, + {METRIC_PLUGIN_PROM_SCRAPE_TIME_MS, MetricType::METRIC_TYPE_COUNTER}, + }; + + mSelfMonitor->InitMetricManager(sScrapeMetricKeys, labels); + + WriteMetrics::GetInstance()->PrepareMetricsRecordRef(mMetricsRecordRef, std::move(labels)); + mPromDelayTotal = mMetricsRecordRef.CreateCounter(METRIC_PLUGIN_PROM_SCRAPE_DELAY_TOTAL); + mPluginTotalDelayMs = mMetricsRecordRef.CreateCounter(METRIC_PLUGIN_TOTAL_DELAY_MS); +} + } // namespace logtail diff --git a/core/prometheus/schedulers/ScrapeScheduler.h b/core/prometheus/schedulers/ScrapeScheduler.h index 328b040f0a..3b6ee35045 100644 --- a/core/prometheus/schedulers/ScrapeScheduler.h +++ b/core/prometheus/schedulers/ScrapeScheduler.h @@ -23,9 +23,11 @@ #include "common/http/HttpResponse.h" #include "common/timer/Timer.h" #include "models/PipelineEventGroup.h" +#include "monitor/LoongCollectorMetricTypes.h" +#include "pipeline/queue/QueueKey.h" +#include "prometheus/PromSelfMonitor.h" #include "prometheus/labels/TextParser.h" #include "prometheus/schedulers/ScrapeConfig.h" -#include "pipeline/queue/QueueKey.h" #ifdef APSARA_UNIT_TEST_MAIN #include "pipeline/queue/ProcessQueueItem.h" @@ -52,10 +54,12 @@ class ScrapeScheduler : public BaseScheduler { void ScheduleNext() override; void ScrapeOnce(std::chrono::steady_clock::time_point execTime); void Cancel() override; + void InitSelfMonitor(const MetricLabels&); private: void PushEventGroup(PipelineEventGroup&&); void SetAutoMetricMeta(PipelineEventGroup& eGroup); + void SetTargetLabels(PipelineEventGroup& eGroup); PipelineEventGroup BuildPipelineEventGroup(const std::string& content); @@ -67,7 +71,7 @@ class ScrapeScheduler : public BaseScheduler { std::string mHost; int32_t mPort; std::string mInstance; - Labels mLabels; + Labels mTargetLabels; std::unique_ptr mParser; @@ -80,6 +84,12 @@ class ScrapeScheduler : public BaseScheduler { double mScrapeDurationSeconds = 0; uint64_t mScrapeResponseSizeBytes = 0; bool mUpState = true; + + // self monitor + std::shared_ptr mSelfMonitor; + MetricsRecordRef mMetricsRecordRef; + CounterPtr mPromDelayTotal; + CounterPtr mPluginTotalDelayMs; #ifdef APSARA_UNIT_TEST_MAIN friend class ProcessorParsePrometheusMetricUnittest; friend class ScrapeSchedulerUnittest; diff --git a/core/prometheus/schedulers/TargetSubscriberScheduler.cpp b/core/prometheus/schedulers/TargetSubscriberScheduler.cpp index c8bc2d501e..fd23f60fdf 100644 --- a/core/prometheus/schedulers/TargetSubscriberScheduler.cpp +++ b/core/prometheus/schedulers/TargetSubscriberScheduler.cpp @@ -27,6 +27,7 @@ #include "common/timer/HttpRequestTimerEvent.h" #include "common/timer/Timer.h" #include "logger/Logger.h" +#include "monitor/metric_constants/MetricConstants.h" #include "prometheus/Constants.h" #include "prometheus/Utils.h" #include "prometheus/async/PromFuture.h" @@ -56,7 +57,10 @@ bool TargetSubscriberScheduler::operator<(const TargetSubscriberScheduler& other return mJobName < other.mJobName; } -void TargetSubscriberScheduler::OnSubscription(const HttpResponse& response, uint64_t) { +void TargetSubscriberScheduler::OnSubscription(const HttpResponse& response, uint64_t timestampMilliSec) { + mSelfMonitor->AddCounter(METRIC_PLUGIN_PROM_SUBSCRIBE_TOTAL, response.mStatusCode); + mSelfMonitor->AddCounter( + METRIC_PLUGIN_PROM_SUBSCRIBE_TIME_MS, response.mStatusCode, GetCurrentTimeInMilliSeconds() - timestampMilliSec); if (response.mStatusCode == 304) { // not modified return; @@ -75,6 +79,8 @@ void TargetSubscriberScheduler::OnSubscription(const HttpResponse& response, uin std::unordered_map> newScrapeSchedulerSet = BuildScrapeSchedulerSet(targetGroup); UpdateScrapeScheduler(newScrapeSchedulerSet); + mPromSubscriberTargets->Set(mScrapeSchedulerMap.size()); + mTotalDelayMs->Add(GetCurrentTimeInMilliSeconds() - timestampMilliSec); } void TargetSubscriberScheduler::UpdateScrapeScheduler( @@ -159,23 +165,22 @@ bool TargetSubscriberScheduler::ParseScrapeSchedulerGroup(const std::string& con } // Parse labels Labels labels; - labels.Push(Label{prometheus::JOB, mJobName}); - labels.Push(Label{prometheus::ADDRESS_LABEL_NAME, targets[0]}); - labels.Push(Label{prometheus::SCHEME_LABEL_NAME, mScrapeConfigPtr->mScheme}); - labels.Push(Label{prometheus::METRICS_PATH_LABEL_NAME, mScrapeConfigPtr->mMetricsPath}); - labels.Push( - Label{prometheus::SCRAPE_INTERVAL_LABEL_NAME, SecondToDuration(mScrapeConfigPtr->mScrapeIntervalSeconds)}); - labels.Push( - Label{prometheus::SCRAPE_TIMEOUT_LABEL_NAME, SecondToDuration(mScrapeConfigPtr->mScrapeTimeoutSeconds)}); + labels.Set(prometheus::JOB, mJobName); + labels.Set(prometheus::INSTANCE, targets[0]); + labels.Set(prometheus::ADDRESS_LABEL_NAME, targets[0]); + labels.Set(prometheus::SCHEME_LABEL_NAME, mScrapeConfigPtr->mScheme); + labels.Set(prometheus::METRICS_PATH_LABEL_NAME, mScrapeConfigPtr->mMetricsPath); + labels.Set(prometheus::SCRAPE_INTERVAL_LABEL_NAME, SecondToDuration(mScrapeConfigPtr->mScrapeIntervalSeconds)); + labels.Set(prometheus::SCRAPE_TIMEOUT_LABEL_NAME, SecondToDuration(mScrapeConfigPtr->mScrapeTimeoutSeconds)); for (const auto& pair : mScrapeConfigPtr->mParams) { if (!pair.second.empty()) { - labels.Push(Label{prometheus::PARAM_LABEL_NAME + pair.first, pair.second[0]}); + labels.Set(prometheus::PARAM_LABEL_NAME + pair.first, pair.second[0]); } } if (element.isMember(prometheus::LABELS) && element[prometheus::LABELS].isObject()) { - for (const auto& labelKey : element[prometheus::LABELS].getMemberNames()) { - labels.Push(Label{labelKey, element[prometheus::LABELS][labelKey].asString()}); + for (const string& labelKey : element[prometheus::LABELS].getMemberNames()) { + labels.Set(labelKey, element[prometheus::LABELS][labelKey].asString()); } } scrapeSchedulerGroup.push_back(labels); @@ -188,9 +193,12 @@ TargetSubscriberScheduler::BuildScrapeSchedulerSet(std::vector& targetGr std::unordered_map> scrapeSchedulerMap; for (const auto& labels : targetGroups) { // Relabel Config - Labels resultLabel = Labels(); - bool keep = prometheus::Process(labels, mScrapeConfigPtr->mRelabelConfigs, resultLabel); - if (!keep) { + Labels resultLabel = labels; + // bool keep = prometheus::Process(labels, mScrapeConfigPtr->mRelabelConfigs, resultLabel); + // if (!keep) { + // continue; + // } + if (!mScrapeConfigPtr->mRelabelConfigs.Process(resultLabel)) { continue; } resultLabel.RemoveMetaLabels(); @@ -220,6 +228,7 @@ TargetSubscriberScheduler::BuildScrapeSchedulerSet(std::vector& targetGr scrapeScheduler->GetId(), mScrapeConfigPtr->mScrapeIntervalSeconds, GetCurrentTimeInMilliSeconds()); auto firstExecTime = std::chrono::steady_clock::now() + std::chrono::milliseconds(randSleepMilliSec); scrapeScheduler->SetFirstExecTime(firstExecTime); + scrapeScheduler->InitSelfMonitor(mDefaultLabels); scrapeSchedulerMap[scrapeScheduler->GetId()] = scrapeScheduler; } @@ -235,11 +244,12 @@ string TargetSubscriberScheduler::GetId() const { } void TargetSubscriberScheduler::ScheduleNext() { - auto future = std::make_shared(); + auto future = std::make_shared>(); future->AddDoneCallback([this](const HttpResponse& response, uint64_t timestampMilliSec) { this->OnSubscription(response, timestampMilliSec); this->ExecDone(); this->ScheduleNext(); + return true; }); if (IsCancelled()) { mFuture->Cancel(); @@ -265,9 +275,10 @@ void TargetSubscriberScheduler::Cancel() { } void TargetSubscriberScheduler::SubscribeOnce(std::chrono::steady_clock::time_point execTime) { - auto future = std::make_shared(); + auto future = std::make_shared>(); future->AddDoneCallback([this](const HttpResponse& response, uint64_t timestampNanoSec) { this->OnSubscription(response, timestampNanoSec); + return true; }); mFuture = future; auto event = BuildSubscriberTimerEvent(execTime); @@ -308,5 +319,24 @@ void TargetSubscriberScheduler::CancelAllScrapeScheduler() { } } +void TargetSubscriberScheduler::InitSelfMonitor(const MetricLabels& defaultLabels) { + mDefaultLabels = defaultLabels; + mDefaultLabels.emplace_back(METRIC_LABEL_KEY_JOB, mJobName); + mDefaultLabels.emplace_back(METRIC_LABEL_KEY_POD_NAME, mPodName); + mDefaultLabels.emplace_back(METRIC_LABEL_KEY_SERVICE_HOST, mServiceHost); + mDefaultLabels.emplace_back(METRIC_LABEL_KEY_SERVICE_PORT, ToString(mServicePort)); + + static const std::unordered_map sSubscriberMetricKeys = { + {METRIC_PLUGIN_PROM_SUBSCRIBE_TOTAL, MetricType::METRIC_TYPE_COUNTER}, + {METRIC_PLUGIN_PROM_SUBSCRIBE_TIME_MS, MetricType::METRIC_TYPE_COUNTER}, + }; + + mSelfMonitor = std::make_shared(); + mSelfMonitor->InitMetricManager(sSubscriberMetricKeys, mDefaultLabels); + + WriteMetrics::GetInstance()->PrepareMetricsRecordRef(mMetricsRecordRef, std::move(mDefaultLabels)); + mPromSubscriberTargets = mMetricsRecordRef.CreateIntGauge(METRIC_PLUGIN_PROM_SUBSCRIBE_TARGETS); + mTotalDelayMs = mMetricsRecordRef.CreateCounter(METRIC_PLUGIN_TOTAL_DELAY_MS); +} } // namespace logtail diff --git a/core/prometheus/schedulers/TargetSubscriberScheduler.h b/core/prometheus/schedulers/TargetSubscriberScheduler.h index 114d0d47c9..db17322023 100644 --- a/core/prometheus/schedulers/TargetSubscriberScheduler.h +++ b/core/prometheus/schedulers/TargetSubscriberScheduler.h @@ -24,11 +24,11 @@ #include "common/http/HttpResponse.h" #include "common/timer/Timer.h" +#include "pipeline/queue/QueueKey.h" +#include "prometheus/PromSelfMonitor.h" #include "prometheus/schedulers/BaseScheduler.h" #include "prometheus/schedulers/ScrapeConfig.h" #include "prometheus/schedulers/ScrapeScheduler.h" -#include "pipeline/queue/QueueKey.h" - namespace logtail { @@ -48,6 +48,7 @@ class TargetSubscriberScheduler : public BaseScheduler { void ScheduleNext() override; void Cancel() override; + void InitSelfMonitor(const MetricLabels&); // from pipeline context QueueKey mQueueKey; @@ -82,6 +83,12 @@ class TargetSubscriberScheduler : public BaseScheduler { std::string mETag; + // self monitor + std::shared_ptr mSelfMonitor; + MetricsRecordRef mMetricsRecordRef; + IntGaugePtr mPromSubscriberTargets; + CounterPtr mTotalDelayMs; + MetricLabels mDefaultLabels; #ifdef APSARA_UNIT_TEST_MAIN friend class TargetSubscriberSchedulerUnittest; friend class InputPrometheusUnittest; diff --git a/core/protobuf/sls/logtail_buffer_meta.proto b/core/protobuf/sls/logtail_buffer_meta.proto index 458027f715..dc2639e997 100644 --- a/core/protobuf/sls/logtail_buffer_meta.proto +++ b/core/protobuf/sls/logtail_buffer_meta.proto @@ -27,4 +27,5 @@ message LogtailBufferMeta optional int32 rawsize = 6; optional string shardhashkey = 7; optional SlsCompressType compresstype = 8; + optional SlsTelemetryType telemetrytype = 9; } diff --git a/core/protobuf/sls/sls_logs.proto b/core/protobuf/sls/sls_logs.proto index 16edf75bb5..9f3aad5856 100644 --- a/core/protobuf/sls/sls_logs.proto +++ b/core/protobuf/sls/sls_logs.proto @@ -23,6 +23,12 @@ enum SlsCompressType SLS_CMP_ZSTD = 3; } +enum SlsTelemetryType +{ + SLS_TELEMETRY_TYPE_LOGS = 0; + SLS_TELEMETRY_TYPE_METRICS = 1; +} + message Log { required uint32 Time = 1;// UNIX Time Format diff --git a/core/runner/FlusherRunner.cpp b/core/runner/FlusherRunner.cpp index 16af0a51df..7e452ae5ce 100644 --- a/core/runner/FlusherRunner.cpp +++ b/core/runner/FlusherRunner.cpp @@ -41,6 +41,16 @@ namespace logtail { bool FlusherRunner::Init() { srand(time(nullptr)); + WriteMetrics::GetInstance()->PrepareMetricsRecordRef(mMetricsRecordRef, + {{METRIC_LABEL_KEY_RUNNER_NAME, METRIC_LABEL_VALUE_RUNNER_NAME_FLUSHER}}); + mInItemsTotal = mMetricsRecordRef.CreateCounter(METRIC_RUNNER_IN_ITEMS_TOTAL); + mInItemDataSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_RUNNER_IN_SIZE_BYTES); + mOutItemsTotal = mMetricsRecordRef.CreateCounter(METRIC_RUNNER_OUT_ITEMS_TOTAL); + mTotalDelayMs = mMetricsRecordRef.CreateCounter(METRIC_RUNNER_TOTAL_DELAY_MS); + mLastRunTime = mMetricsRecordRef.CreateIntGauge(METRIC_RUNNER_LAST_RUN_TIME); + mInItemRawDataSizeBytes = mMetricsRecordRef.CreateCounter(METRIC_RUNNER_FLUSHER_IN_SIZE_BYTES); + mWaitingItemsTotal = mMetricsRecordRef.CreateIntGauge(METRIC_RUNNER_FLUSHER_WAITING_ITEMS_TOTAL); + mThreadRes = async(launch::async, &FlusherRunner::Run, this); mLastCheckSendClientTime = time(nullptr); return true; @@ -99,12 +109,21 @@ void FlusherRunner::Run() { LOG_INFO(sLogger, ("flusher runner", "started")); while (true) { auto curTime = chrono::system_clock::now(); + mLastRunTime->Set(chrono::duration_cast(curTime.time_since_epoch()).count()); vector items; - SenderQueueManager::GetInstance()->GetAllAvailableItems(items, !Application::GetInstance()->IsExiting()); + int32_t limit = Application::GetInstance()->IsExiting() ? -1 : AppConfig::GetInstance()->GetSendRequestConcurrency(); + SenderQueueManager::GetInstance()->GetAvailableItems(items, limit); if (items.empty()) { SenderQueueManager::GetInstance()->Wait(1000); } else { + for (auto itr = items.begin(); itr != items.end(); ++itr) { + mInItemDataSizeBytes->Add((*itr)->mData.size()); + mInItemRawDataSizeBytes->Add((*itr)->mRawSize); + } + mInItemsTotal->Add(items.size()); + mWaitingItemsTotal->Add(items.size()); + // smoothing send tps, walk around webserver load burst uint32_t bufferPackageCount = items.size(); if (!Application::GetInstance()->IsExiting() && AppConfig::GetInstance()->IsSendRandomSleep()) { @@ -132,6 +151,10 @@ void FlusherRunner::Run() { } Dispatch(*itr); + mWaitingItemsTotal->Sub(1); + mOutItemsTotal->Add(1); + mTotalDelayMs->Add( + chrono::duration_cast(chrono::system_clock::now() - curTime).count()); } // TODO: move the following logic to scheduler diff --git a/core/runner/FlusherRunner.h b/core/runner/FlusherRunner.h index fb7347cddb..90fdf7e5a4 100644 --- a/core/runner/FlusherRunner.h +++ b/core/runner/FlusherRunner.h @@ -20,6 +20,7 @@ #include #include +#include "monitor/LogtailMetric.h" #include "pipeline/plugin/interface/Flusher.h" #include "pipeline/queue/SenderQueueItem.h" #include "runner/sink/SinkType.h" @@ -63,6 +64,15 @@ class FlusherRunner { int64_t mSendLastTime = 0; int32_t mSendLastByte = 0; + mutable MetricsRecordRef mMetricsRecordRef; + CounterPtr mInItemsTotal; + CounterPtr mInItemDataSizeBytes; + CounterPtr mInItemRawDataSizeBytes; + CounterPtr mOutItemsTotal; + CounterPtr mTotalDelayMs; + IntGaugePtr mWaitingItemsTotal; + IntGaugePtr mLastRunTime; + #ifdef APSARA_UNIT_TEST_MAIN friend class PluginRegistryUnittest; friend class FlusherRunnerUnittest; diff --git a/core/runner/LogProcess.h b/core/runner/LogProcess.h deleted file mode 100644 index c0bd77e9ad..0000000000 --- a/core/runner/LogProcess.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2022 iLogtail Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include - -#include "common/Lock.h" -#include "common/LogRunnable.h" -#include "common/Thread.h" -#include "models/PipelineEventGroup.h" -#include "monitor/Monitor.h" -#include "pipeline/queue/QueueKey.h" - -namespace logtail { - -class LogProcess : public LogRunnable { -public: - static LogProcess* GetInstance() { - static LogProcess* ptr = new LogProcess(); - return ptr; - } - - void Start(); - void HoldOn(); - void Resume(); - bool FlushOut(int32_t waitMs); - - void* ProcessLoop(int32_t threadNo); - // TODO: replace key with configName - bool PushBuffer(QueueKey key, size_t inputIndex, PipelineEventGroup&& group, uint32_t retryTimes = 1); - -private: - LogProcess(); - ~LogProcess(); - - bool Serialize(const PipelineEventGroup& group, - bool enableNanosecond, - const std::string& logstore, - std::string& res, - std::string& errorMsg); - - bool mInitialized = false; - ThreadPtr* mProcessThreads; - int32_t mThreadCount = 1; - std::atomic_bool* mThreadFlags; - ReadWriteLock mAccessProcessThreadRWL; - - IntGaugePtr mAgentProcessQueueFullTotal; - IntGaugePtr mAgentProcessQueueTotal; -}; - -} // namespace logtail \ No newline at end of file diff --git a/core/runner/LogProcess.cpp b/core/runner/ProcessorRunner.cpp similarity index 65% rename from core/runner/LogProcess.cpp rename to core/runner/ProcessorRunner.cpp index cd65998408..336428997a 100644 --- a/core/runner/LogProcess.cpp +++ b/core/runner/ProcessorRunner.cpp @@ -1,4 +1,4 @@ -// Copyright 2022 iLogtail Authors +// Copyright 2024 iLogtail Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,18 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "runner/LogProcess.h" +#include "runner/ProcessorRunner.h" + +#include #include "app_config/AppConfig.h" -#include "pipeline/batch/TimeoutFlushManager.h" +#include "batch/TimeoutFlushManager.h" #include "common/Flags.h" #include "go_pipeline/LogtailPlugin.h" #include "monitor/LogFileProfiler.h" #include "monitor/LogtailAlarm.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/PipelineManager.h" -#include "pipeline/queue/ExactlyOnceQueueManager.h" -#include "pipeline/queue/ProcessQueueManager.h" -#include "pipeline/queue/QueueKeyManager.h" +#include "queue/ExactlyOnceQueueManager.h" +#include "queue/ProcessQueueManager.h" +#include "queue/QueueKeyManager.h" DECLARE_FLAG_INT32(max_send_log_group_size); @@ -39,38 +42,36 @@ DEFINE_FLAG_INT32(default_flush_merged_buffer_interval, "default flush merged bu namespace logtail { -LogProcess::LogProcess() : mAccessProcessThreadRWL(ReadWriteLock::PREFER_WRITER) { +thread_local MetricsRecordRef ProcessorRunner::sMetricsRecordRef; +thread_local CounterPtr ProcessorRunner::sInGroupsCnt; +thread_local CounterPtr ProcessorRunner::sInEventsCnt; +thread_local CounterPtr ProcessorRunner::sInGroupDataSizeBytes; +thread_local IntGaugePtr ProcessorRunner::sLastRunTime; + +ProcessorRunner::ProcessorRunner() + : mThreadCount(AppConfig::GetInstance()->GetProcessThreadCount()), mThreadRes(mThreadCount) { } -LogProcess::~LogProcess() { - for (int32_t threadNo = 0; threadNo < mThreadCount; ++threadNo) { - try { - mProcessThreads[threadNo]->GetValue(1000 * 100); - } catch (...) { - } +void ProcessorRunner::Init() { + for (uint32_t threadNo = 0; threadNo < mThreadCount; ++threadNo) { + mThreadRes[threadNo] = async(launch::async, &ProcessorRunner::Run, this, threadNo); } - delete[] mThreadFlags; - delete[] mProcessThreads; } -void LogProcess::Start() { - if (mInitialized) - return; - mAgentProcessQueueFullTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_PROCESS_QUEUE_FULL_TOTAL); - mAgentProcessQueueTotal = LoongCollectorMonitor::GetInstance()->GetIntGauge(METRIC_AGENT_PROCESS_QUEUE_TOTAL); - - mInitialized = true; - mThreadCount = AppConfig::GetInstance()->GetProcessThreadCount(); - mProcessThreads = new ThreadPtr[mThreadCount]; - mThreadFlags = new atomic_bool[mThreadCount]; - for (int32_t threadNo = 0; threadNo < mThreadCount; ++threadNo) { - mThreadFlags[threadNo] = false; - mProcessThreads[threadNo] = CreateThread([this, threadNo]() { ProcessLoop(threadNo); }); +void ProcessorRunner::Stop() { + mIsFlush = true; + ProcessQueueManager::GetInstance()->Trigger(); + for (uint32_t threadNo = 0; threadNo < mThreadCount; ++threadNo) { + future_status s = mThreadRes[threadNo].wait_for(chrono::seconds(1)); + if (s == future_status::ready) { + LOG_INFO(sLogger, ("processor runner", "stopped successfully")("threadNo", threadNo)); + } else { + LOG_WARNING(sLogger, ("processor runner", "forced to stopped")("threadNo", threadNo)); + } } - LOG_INFO(sLogger, ("process daemon", "started")); } -bool LogProcess::PushBuffer(QueueKey key, size_t inputIndex, PipelineEventGroup&& group, uint32_t retryTimes) { +bool ProcessorRunner::PushQueue(QueueKey key, size_t inputIndex, PipelineEventGroup&& group, uint32_t retryTimes) { unique_ptr item = make_unique(std::move(group), inputIndex); for (size_t i = 0; i < retryTimes; ++i) { if (ProcessQueueManager::GetInstance()->PushQueue(key, std::move(item)) == 0) { @@ -87,111 +88,45 @@ bool LogProcess::PushBuffer(QueueKey key, size_t inputIndex, PipelineEventGroup& return false; } -void LogProcess::HoldOn() { - LOG_INFO(sLogger, ("process daemon pause", "starts")); - mAccessProcessThreadRWL.lock(); - while (true) { - bool allThreadWait = true; - for (int32_t threadNo = 0; threadNo < mThreadCount; ++threadNo) { - if (mThreadFlags[threadNo]) { - allThreadWait = false; - break; - } - } - if (allThreadWait) { - LOG_INFO(sLogger, ("process daemon pause", "succeeded")); - return; - } - usleep(10 * 1000); - } -} +void ProcessorRunner::Run(uint32_t threadNo) { + LOG_INFO(sLogger, ("processor runner", "started")("threadNo", threadNo)); -void LogProcess::Resume() { - LOG_INFO(sLogger, ("process daemon resume", "starts")); - mAccessProcessThreadRWL.unlock(); - LOG_INFO(sLogger, ("process daemon resume", "succeeded")); -} + // thread local metrics should be initialized in each thread + WriteMetrics::GetInstance()->PrepareMetricsRecordRef( + sMetricsRecordRef, {{METRIC_LABEL_KEY_RUNNER_NAME, METRIC_LABEL_VALUE_RUNNER_NAME_PROCESSOR}, {"thread_no", ToString(threadNo)}}); + sInGroupsCnt = sMetricsRecordRef.CreateCounter(METRIC_RUNNER_IN_EVENT_GROUPS_TOTAL); + sInEventsCnt = sMetricsRecordRef.CreateCounter(METRIC_RUNNER_IN_EVENTS_TOTAL); + sInGroupDataSizeBytes = sMetricsRecordRef.CreateCounter(METRIC_RUNNER_IN_SIZE_BYTES); + sLastRunTime = sMetricsRecordRef.CreateIntGauge(METRIC_RUNNER_LAST_RUN_TIME); -bool LogProcess::FlushOut(int32_t waitMs) { - ProcessQueueManager::GetInstance()->Trigger(); - if (ProcessQueueManager::GetInstance()->IsAllQueueEmpty()) { - bool allThreadWait = true; - for (int32_t threadNo = 0; threadNo < mThreadCount; ++threadNo) { - if (mThreadFlags[threadNo]) { - allThreadWait = false; - break; - } else { - // sleep 1ms and double check - usleep(1000); - if (mThreadFlags[threadNo]) { - allThreadWait = false; - break; - } - } - } - if (allThreadWait) - return true; - } - usleep(waitMs * 1000); - return false; -} - -void* LogProcess::ProcessLoop(int32_t threadNo) { - LOG_DEBUG(sLogger, ("runner/LogProcess.hread", "Start")("threadNo", threadNo)); static int32_t lastMergeTime = 0; - static atomic_int s_processCount{0}; - static atomic_long s_processBytes{0}; - static atomic_int s_processLines{0}; - // only thread 0 update metric - int32_t lastUpdateMetricTime = time(NULL); while (true) { - mThreadFlags[threadNo] = false; - int32_t curTime = time(NULL); if (threadNo == 0 && curTime - lastMergeTime >= INT32_FLAG(default_flush_merged_buffer_interval)) { TimeoutFlushManager::GetInstance()->FlushTimeoutBatch(); lastMergeTime = curTime; } - if (threadNo == 0 && curTime - lastUpdateMetricTime >= 40) { - static auto sMonitor = LogtailMonitor::GetInstance(); - - // atomic counter will be negative if process speed is too fast. - sMonitor->UpdateMetric("process_tps", 1.0 * s_processCount / (curTime - lastUpdateMetricTime)); - sMonitor->UpdateMetric("process_bytes_ps", 1.0 * s_processBytes / (curTime - lastUpdateMetricTime)); - sMonitor->UpdateMetric("process_lines_ps", 1.0 * s_processLines / (curTime - lastUpdateMetricTime)); - lastUpdateMetricTime = curTime; - s_processCount = 0; - s_processBytes = 0; - s_processLines = 0; - - // update process queue status - uint32_t InvalidProcessQueueTotal = ProcessQueueManager::GetInstance()->GetInvalidCnt(); - sMonitor->UpdateMetric("process_queue_full", InvalidProcessQueueTotal); - mAgentProcessQueueFullTotal->Set(InvalidProcessQueueTotal); - uint32_t ProcessQueueTotal = ProcessQueueManager::GetInstance()->GetCnt(); - sMonitor->UpdateMetric("process_queue_total", ProcessQueueTotal); - mAgentProcessQueueTotal->Set(ProcessQueueTotal); - if (ExactlyOnceQueueManager::GetInstance()->GetProcessQueueCnt() > 0) { - sMonitor->UpdateMetric("eo_process_queue_full", - ExactlyOnceQueueManager::GetInstance()->GetInvalidProcessQueueCnt()); - sMonitor->UpdateMetric("eo_process_queue_total", - ExactlyOnceQueueManager::GetInstance()->GetProcessQueueCnt()); - } - } - { - ReadLock lock(mAccessProcessThreadRWL); - + sLastRunTime->Set(curTime); unique_ptr item; string configName; if (!ProcessQueueManager::GetInstance()->PopItem(threadNo, item, configName)) { + if (mIsFlush && ProcessQueueManager::GetInstance()->IsAllQueueEmpty()) { + break; + } ProcessQueueManager::GetInstance()->Wait(100); continue; } - mThreadFlags[threadNo] = true; - auto pipeline = PipelineManager::GetInstance()->FindConfigByName(configName); + sInEventsCnt->Add(item->mEventGroup.GetEvents().size()); + sInGroupsCnt->Add(1); + sInGroupDataSizeBytes->Add(item->mEventGroup.DataSize()); + + shared_ptr pipeline = item->mPipeline; + if (!pipeline) { + pipeline = PipelineManager::GetInstance()->FindConfigByName(configName); + } if (!pipeline) { LOG_INFO(sLogger, ("pipeline not found during processing, perhaps due to config deletion", @@ -227,15 +162,6 @@ void* LogProcess::ProcessLoop(int32_t threadNo) { pipeline->GetContext().GetRegion()); } - s_processCount++; - if (isLog) { - s_processBytes += profile.readBytes; - s_processLines += profile.splitLines; - } - - if (eventGroupList.empty()) { - continue; - } if (pipeline->IsFlushingThroughGoPipeline()) { if (isLog) { for (auto& group : eventGroupList) { @@ -294,13 +220,13 @@ void* LogProcess::ProcessLoop(int32_t threadNo) { } pipeline->Send(std::move(eventGroupList)); } + pipeline->SubInProcessCnt(); } } - LOG_WARNING(sLogger, ("runner/LogProcess.hread", "Exit")("threadNo", threadNo)); - return NULL; + LOG_WARNING(sLogger, ("ProcessorRunnerThread", "Exit")("threadNo", threadNo)); } -bool LogProcess::Serialize( +bool ProcessorRunner::Serialize( const PipelineEventGroup& group, bool enableNanosecond, const string& logstore, string& res, string& errorMsg) { sls_logs::LogGroup logGroup; for (const auto& e : group.GetEvents()) { diff --git a/core/runner/ProcessorRunner.h b/core/runner/ProcessorRunner.h new file mode 100644 index 0000000000..28cdf72fea --- /dev/null +++ b/core/runner/ProcessorRunner.h @@ -0,0 +1,69 @@ +/* + * Copyright 2024 iLogtail Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include +#include + +#include "common/Lock.h" +#include "models/PipelineEventGroup.h" +#include "monitor/Monitor.h" +#include "queue/QueueKey.h" + +namespace logtail { + +class ProcessorRunner { +public: + ProcessorRunner(const ProcessorRunner&) = delete; + ProcessorRunner& operator=(const ProcessorRunner&) = delete; + + static ProcessorRunner* GetInstance() { + static ProcessorRunner instance; + return &instance; + } + + void Init(); + void Stop(); + + bool PushQueue(QueueKey key, size_t inputIndex, PipelineEventGroup&& group, uint32_t retryTimes = 1); + +private: + ProcessorRunner(); + ~ProcessorRunner() = default; + + void Run(uint32_t threadNo); + + bool Serialize(const PipelineEventGroup& group, + bool enableNanosecond, + const std::string& logstore, + std::string& res, + std::string& errorMsg); + + uint32_t mThreadCount = 1; + std::vector> mThreadRes; + std::atomic_bool mIsFlush = false; + + thread_local static MetricsRecordRef sMetricsRecordRef; + thread_local static CounterPtr sInGroupsCnt; + thread_local static CounterPtr sInEventsCnt; + thread_local static CounterPtr sInGroupDataSizeBytes; + thread_local static IntGaugePtr sLastRunTime; +}; + +} // namespace logtail diff --git a/core/runner/sink/http/HttpSink.cpp b/core/runner/sink/http/HttpSink.cpp index 0a5e7de3d6..09c3e74736 100644 --- a/core/runner/sink/http/HttpSink.cpp +++ b/core/runner/sink/http/HttpSink.cpp @@ -18,6 +18,7 @@ #include "common/StringTools.h" #include "common/http/Curl.h" #include "logger/Logger.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/plugin/interface/HttpFlusher.h" #include "pipeline/queue/QueueKeyManager.h" #include "pipeline/queue/SenderQueueItem.h" @@ -33,6 +34,19 @@ bool HttpSink::Init() { LOG_ERROR(sLogger, ("failed to init http sink", "failed to init curl multi client")); return false; } + + WriteMetrics::GetInstance()->PrepareMetricsRecordRef(mMetricsRecordRef, + {{METRIC_LABEL_KEY_RUNNER_NAME, METRIC_LABEL_VALUE_RUNNER_NAME_HTTP_SINK}}); + mInItemsTotal = mMetricsRecordRef.CreateCounter(METRIC_RUNNER_IN_ITEMS_TOTAL); + mLastRunTime = mMetricsRecordRef.CreateIntGauge(METRIC_RUNNER_LAST_RUN_TIME); + mOutSuccessfulItemsTotal = mMetricsRecordRef.CreateCounter(METRIC_RUNNER_SINK_OUT_SUCCESSFUL_ITEMS_TOTAL); + mOutFailedItemsTotal = mMetricsRecordRef.CreateCounter(METRIC_RUNNER_SINK_OUT_FAILED_ITEMS_TOTAL); + mSendingItemsTotal = mMetricsRecordRef.CreateIntGauge(METRIC_RUNNER_SINK_SENDING_ITEMS_TOTAL); + mSendConcurrency = mMetricsRecordRef.CreateIntGauge(METRIC_RUNNER_SINK_SEND_CONCURRENCY); + + // TODO: should be dynamic + mSendConcurrency->Set(AppConfig::GetInstance()->GetSendRequestConcurrency()); + mThreadRes = async(launch::async, &HttpSink::Run, this); return true; } @@ -48,9 +62,13 @@ void HttpSink::Stop() { } void HttpSink::Run() { + LOG_INFO(sLogger, ("http sink", "started")); while (true) { + mLastRunTime->Set( + chrono::duration_cast(chrono::system_clock::now().time_since_epoch()).count()); unique_ptr request; if (mQueue.WaitAndPop(request, 500)) { + mInItemsTotal->Add(1); LOG_DEBUG( sLogger, ("got item from flusher runner, item address", request->mItem)( @@ -59,6 +77,7 @@ void HttpSink::Run() { if (!AddRequestToClient(std::move(request))) { continue; } + mSendingItemsTotal->Add(1); } else if (mIsFlush && mQueue.Empty()) { break; } else { @@ -72,7 +91,7 @@ void HttpSink::Run() { } } -bool HttpSink::AddRequestToClient(std::unique_ptr&& request) { +bool HttpSink::AddRequestToClient(unique_ptr&& request) { curl_slist* headers = nullptr; CURL* curl = CreateCurlHandler(request->mMethod, request->mHTTPSFlag, @@ -88,8 +107,9 @@ bool HttpSink::AddRequestToClient(std::unique_ptr&& request) { AppConfig::GetInstance()->IsHostIPReplacePolicyEnabled(), AppConfig::GetInstance()->GetBindInterface()); if (curl == nullptr) { - request->mItem->mStatus = SendingStatus::IDLE; + request->mItem->mStatus.Set(SendingStatus::IDLE); FlusherRunner::GetInstance()->DecreaseHttpSendingCnt(); + mOutFailedItemsTotal->Add(1); LOG_ERROR(sLogger, ("failed to send request", "failed to init curl handler")( "action", "put sender queue item back to sender queue")("item address", request->mItem)( @@ -99,13 +119,14 @@ bool HttpSink::AddRequestToClient(std::unique_ptr&& request) { request->mPrivateData = headers; curl_easy_setopt(curl, CURLOPT_PRIVATE, request.get()); - request->mLastSendTime = std::chrono::system_clock::now(); + request->mLastSendTime = chrono::system_clock::now(); auto res = curl_multi_add_handle(mClient, curl); if (res != CURLM_OK) { - request->mItem->mStatus = SendingStatus::IDLE; + request->mItem->mStatus.Set(SendingStatus::IDLE); FlusherRunner::GetInstance()->DecreaseHttpSendingCnt(); curl_easy_cleanup(curl); + mOutFailedItemsTotal->Add(1); LOG_ERROR(sLogger, ("failed to send request", "failed to add the easy curl handle to multi_handle")("errMsg", curl_multi_strerror(res))( @@ -122,6 +143,8 @@ void HttpSink::DoRun() { CURLMcode mc; int runningHandlers = 1; while (runningHandlers) { + auto curTime = chrono::system_clock::now(); + mLastRunTime->Set(chrono::duration_cast(curTime.time_since_epoch()).count()); if ((mc = curl_multi_perform(mClient, &runningHandlers)) != CURLM_OK) { LOG_ERROR( sLogger, @@ -133,6 +156,7 @@ void HttpSink::DoRun() { unique_ptr request; if (mQueue.TryPop(request)) { + mInItemsTotal->Add(1); LOG_DEBUG( sLogger, ("got item from flusher runner, item address", request->mItem)( @@ -140,6 +164,7 @@ void HttpSink::DoRun() { "wait time", ToString(time(nullptr) - request->mEnqueTime))("try cnt", ToString(request->mTryCnt))); if (AddRequestToClient(std::move(request))) { ++runningHandlers; + mSendingItemsTotal->Add(1); } } @@ -191,10 +216,13 @@ void HttpSink::HandleCompletedRequests() { HttpSinkRequest* request = nullptr; curl_easy_getinfo(handler, CURLINFO_PRIVATE, &request); LOG_DEBUG(sLogger, - ("send http request completed, item address", request->mItem) - ("config-flusher-dst", QueueKeyManager::GetInstance()->GetName(request->mItem->mQueueKey)) - ("response time",ToString(chrono::duration_cast(chrono::system_clock::now()- request->mLastSendTime).count()) + "ms") - ("try cnt", ToString(request->mTryCnt))); + ("send http request completed, item address", request->mItem)( + "config-flusher-dst", QueueKeyManager::GetInstance()->GetName(request->mItem->mQueueKey))( + "response time", + ToString(chrono::duration_cast(chrono::system_clock::now() + - request->mLastSendTime) + .count()) + + "ms")("try cnt", ToString(request->mTryCnt))); switch (msg->data.result) { case CURLE_OK: { long statusCode = 0; @@ -202,6 +230,8 @@ void HttpSink::HandleCompletedRequests() { request->mResponse.mStatusCode = (int32_t)statusCode; static_cast(request->mItem->mFlusher)->OnSendDone(request->mResponse, request->mItem); FlusherRunner::GetInstance()->DecreaseHttpSendingCnt(); + mOutSuccessfulItemsTotal->Add(1); + mSendingItemsTotal->Sub(1); break; } default: @@ -225,6 +255,8 @@ void HttpSink::HandleCompletedRequests() { ->OnSendDone(request->mResponse, request->mItem); FlusherRunner::GetInstance()->DecreaseHttpSendingCnt(); } + mOutFailedItemsTotal->Add(1); + mSendingItemsTotal->Sub(1); break; } curl_multi_remove_handle(mClient, handler); diff --git a/core/runner/sink/http/HttpSink.h b/core/runner/sink/http/HttpSink.h index 6e11b84855..f230655fa2 100644 --- a/core/runner/sink/http/HttpSink.h +++ b/core/runner/sink/http/HttpSink.h @@ -25,6 +25,7 @@ #include "runner/sink/Sink.h" #include "runner/sink/http/HttpSinkRequest.h" +#include "monitor/LogtailMetric.h" namespace logtail { @@ -55,6 +56,15 @@ class HttpSink : public Sink { std::future mThreadRes; std::atomic_bool mIsFlush = false; + mutable MetricsRecordRef mMetricsRecordRef; + CounterPtr mInItemsTotal; + CounterPtr mOutSuccessfulItemsTotal; + CounterPtr mOutFailedItemsTotal; + // CounterPtr mTotalDelayMs; // TODO: should record distribution instead of average + IntGaugePtr mSendingItemsTotal; + IntGaugePtr mSendConcurrency; + IntGaugePtr mLastRunTime; + #ifdef APSARA_UNIT_TEST_MAIN friend class FlusherRunnerUnittest; #endif diff --git a/core/sdk/Client.cpp b/core/sdk/Client.cpp index 887f207291..bc1cc16465 100644 --- a/core/sdk/Client.cpp +++ b/core/sdk/Client.cpp @@ -14,6 +14,7 @@ #include "Client.h" +#include "Common.h" #include "CurlImp.h" #include "Exception.h" #include "Result.h" @@ -219,7 +220,8 @@ namespace sdk { sls_logs::SlsCompressType compressType, const std::string& compressedLogGroup, uint32_t rawSize, - const std::string& hashKey) { + const std::string& hashKey, + bool isTimeSeries) { map httpHeader; httpHeader[CONTENT_TYPE] = TYPE_LOG_PROTOBUF; if (!mKeyProvider.empty()) { @@ -227,7 +229,11 @@ namespace sdk { } httpHeader[X_LOG_BODYRAWSIZE] = std::to_string(rawSize); httpHeader[X_LOG_COMPRESSTYPE] = Client::GetCompressTypeString(compressType); - return SynPostLogStoreLogs(project, logstore, compressedLogGroup, httpHeader, hashKey); + if (isTimeSeries) { + return SynPostMetricStoreLogs(project, logstore, compressedLogGroup, httpHeader); + } else { + return SynPostLogStoreLogs(project, logstore, compressedLogGroup, httpHeader, hashKey); + } } PostLogStoreLogsResponse Client::PostLogStoreLogPackageList(const std::string& project, @@ -253,7 +259,8 @@ namespace sdk { uint32_t rawSize, SenderQueueItem* item, const std::string& hashKey, - int64_t hashKeySeqID) { + int64_t hashKeySeqID, + bool isTimeSeries) { map httpHeader; httpHeader[CONTENT_TYPE] = TYPE_LOG_PROTOBUF; if (!mKeyProvider.empty()) { @@ -261,10 +268,16 @@ namespace sdk { } httpHeader[X_LOG_BODYRAWSIZE] = std::to_string(rawSize); httpHeader[X_LOG_COMPRESSTYPE] = Client::GetCompressTypeString(compressType); - return CreateAsynPostLogStoreLogsRequest( - project, logstore, compressedLogGroup, httpHeader, hashKey, hashKeySeqID, item); + if (isTimeSeries) { + return CreateAsynPostMetricStoreLogsRequest( + project, logstore, compressedLogGroup, httpHeader,item); + } else { + return CreateAsynPostLogStoreLogsRequest( + project, logstore, compressedLogGroup, httpHeader, hashKey, hashKeySeqID, item); + } } + unique_ptr Client::CreatePostLogStoreLogPackageListRequest(const std::string& project, const std::string& logstore, sls_logs::SlsCompressType compressType, @@ -314,6 +327,22 @@ namespace sdk { } } + std::unique_ptr + Client::CreateAsynPostMetricStoreLogsRequest(const std::string& project, + const std::string& logstore, + const std::string& body, + std::map& httpHeader, + SenderQueueItem* item) { + string operation = METRICSTORES; + operation.append("/").append(project).append("/").append(logstore).append("/api/v1/write"); + httpHeader[CONTENT_MD5] = CalcMD5(body); + map parameterList; + string host = GetSlsHost(); + SetCommonHeader(httpHeader, (int32_t)(body.length()), ""); + string signature = GetUrlSignature(HTTP_POST, operation, httpHeader, parameterList, body, GetAccessKey()); + httpHeader[AUTHORIZATION] = LOG_HEADSIGNATURE_PREFIX + GetAccessKeyId() + ':' + signature; + return make_unique(HTTP_POST, mUsingHTTPS, host, mPort, operation, "", httpHeader, body, item); + } unique_ptr Client::CreateAsynPostLogStoreLogsRequest(const std::string& project, const std::string& logstore, @@ -395,6 +424,24 @@ namespace sdk { return ret; } + PostLogStoreLogsResponse Client::SynPostMetricStoreLogs(const std::string& project, + const std::string& logstore, + const std::string& body, + std::map& httpHeader, + std::string* realIpPtr) { + string operation = METRICSTORES; + operation.append("/").append(project).append("/").append(logstore).append("/api/v1/write"); + httpHeader[CONTENT_MD5] = CalcMD5(body); + map parameterList; + HttpMessage httpResponse; + SendRequest(project, HTTP_POST, operation, body, parameterList, httpHeader, httpResponse, realIpPtr); + PostLogStoreLogsResponse ret; + ret.bodyBytes = (int32_t)body.size(); + ret.statusCode = httpResponse.statusCode; + ret.requestId = httpResponse.header[X_LOG_REQUEST_ID]; + return ret; + } + PostLogStoreLogsResponse Client::PostLogUsingWebTracking(const std::string& project, const std::string& logstore, sls_logs::SlsCompressType compressType, diff --git a/core/sdk/Client.h b/core/sdk/Client.h index 6203b8350d..54a6136c07 100644 --- a/core/sdk/Client.h +++ b/core/sdk/Client.h @@ -100,7 +100,18 @@ namespace sdk { sls_logs::SlsCompressType compressType, const std::string& compressedLogGroup, uint32_t rawSize, - const std::string& hashKey = ""); + const std::string& hashKey = "", + bool isTimeSeries = false); + + PostLogStoreLogsResponse PostMetricStoreLogs(const std::string& project, + const std::string& logstore, + sls_logs::SlsCompressType compressType, + const std::string& compressedLogGroup, + uint32_t rawSize) { + return PostLogStoreLogs(project, logstore, compressType, compressedLogGroup, rawSize, "", true); + } + + /** Sync Put data to LOG service. Unsuccessful opertaion will cause an LOGException. * @param project The project name * @param logstore The logstore name @@ -127,7 +138,27 @@ namespace sdk { uint32_t rawSize, SenderQueueItem* item, const std::string& hashKey = "", - int64_t hashKeySeqID = kInvalidHashKeySeqID); + int64_t hashKeySeqID = kInvalidHashKeySeqID, + bool isTimeSeries = false); + /** Async Put metrics data to SLS metricstore. Unsuccessful opertaion will cause an LOGException. + * @param project The project name + * @param logstore The logstore name + * @param compressedLogGroup data of logGroup, LZ4 comressed + * @param rawSize before compress + * @param compressType compression type + * @return request_id. + */ + std::unique_ptr CreatePostMetricStoreLogsRequest(const std::string& project, + const std::string& logstore, + sls_logs::SlsCompressType compressType, + const std::string& compressedLogGroup, + uint32_t rawSize, + SenderQueueItem* item) { + return CreatePostLogStoreLogsRequest( + project, logstore, compressType, compressedLogGroup, rawSize, item, "", kInvalidHashKeySeqID, true); + } + + /** Async Put data to LOG service. Unsuccessful opertaion will cause an LOGException. * @param project The project name * @param logstore The logstore name @@ -171,6 +202,13 @@ namespace sdk { int64_t hashKeySeqID, SenderQueueItem* item); + std::unique_ptr + CreateAsynPostMetricStoreLogsRequest(const std::string& project, + const std::string& logstore, + const std::string& body, + std::map& httpHeader, + SenderQueueItem* item); + // PingSLSServer sends a trivial data packet to SLS for some inner purposes. PostLogStoreLogsResponse PingSLSServer(const std::string& project, const std::string& logstore, std::string* realIpPtr = NULL); @@ -182,6 +220,12 @@ namespace sdk { const std::string& hashKey, std::string* realIpPtr = NULL); + PostLogStoreLogsResponse SynPostMetricStoreLogs(const std::string& project, + const std::string& logstore, + const std::string& body, + std::map& httpHeader, + std::string* realIpPtr = NULL); + void SetCommonHeader(std::map& httpHeader, int32_t contentLength, const std::string& project = ""); diff --git a/core/sdk/Common.cpp b/core/sdk/Common.cpp index 431483995f..8f3a1a7890 100644 --- a/core/sdk/Common.cpp +++ b/core/sdk/Common.cpp @@ -70,6 +70,7 @@ namespace sdk { const char* const LOGE_INVALID_SEQUENCE_ID = "InvalidSequenceId"; const char* const LOGSTORES = "/logstores"; + const char* const METRICSTORES = "/prometheus"; const char* const SHARDS = "/shards"; const char* const INDEX = "/index"; const char* const CONFIGS = "/configs"; diff --git a/core/sdk/Common.h b/core/sdk/Common.h index 8e5486a38b..4dbef90123 100644 --- a/core/sdk/Common.h +++ b/core/sdk/Common.h @@ -85,6 +85,7 @@ namespace sdk { extern const char* const LOGE_INVALID_SEQUENCE_ID; //="InvalidSequenceId"; extern const char* const LOGSTORES; //= "/logstores" + extern const char* const METRICSTORES; //= "/prometheus" extern const char* const SHARDS; //= "/shards" extern const char* const INDEX; //= "/index" extern const char* const CONFIGS; //= "/configs" diff --git a/core/sdk/CurlImp.cpp b/core/sdk/CurlImp.cpp index 05d875f6e2..9cea4bb9d6 100644 --- a/core/sdk/CurlImp.cpp +++ b/core/sdk/CurlImp.cpp @@ -13,10 +13,12 @@ // limitations under the License. #include "CurlImp.h" -#include "Exception.h" + +#include + #include "DNSCache.h" +#include "Exception.h" #include "app_config/AppConfig.h" -#include #include "common/http/Curl.h" using namespace std; diff --git a/core/unittest/app_config/AppConfigUnittestLegal.cpp b/core/unittest/app_config/AppConfigUnittestLegal.cpp index e95028e82b..2dd1b3890f 100644 --- a/core/unittest/app_config/AppConfigUnittestLegal.cpp +++ b/core/unittest/app_config/AppConfigUnittestLegal.cpp @@ -81,7 +81,7 @@ class AppConfigUnittest : public ::testing::Test { v[key] = value; } - void testParameters(const std::string& sysConfDir); + void testParameters(const std::string& confDir); }; APSARA_UNIT_TEST_CASE(AppConfigUnittest, TestLoadEnvParameters, 0); @@ -126,11 +126,11 @@ const int32_t kPollingFileFirstWatchTimeout = 100; const int32_t kPollingModifyCheckInterval = 10000; const int32_t kPollingIgnoreFileModifyTimeout = 100; -void AppConfigUnittest::testParameters(const std::string& sysConfDir) { +void AppConfigUnittest::testParameters(const std::string& confDir) { AppConfig* appConfig = AppConfig::GetInstance(); appConfig->LoadAppConfig(STRING_FLAG(ilogtail_config)); - APSARA_TEST_EQUAL(appConfig->GetLogtailSysConfDir(), sysConfDir); + APSARA_TEST_EQUAL(appConfig->GetLoongcollectorConfDir(), confDir); APSARA_TEST_EQUAL(appConfig->IsAcceptMultiConfig(), kAccessMultiConfig); APSARA_TEST_EQUAL(appConfig->GetMaxMultiConfigSize(), kMaxMultiConfig); APSARA_TEST_EQUAL(INT32_FLAG(batch_send_interval), kBatchSendInterval); @@ -170,9 +170,9 @@ void AppConfigUnittest::testParameters(const std::string& sysConfDir) { // env > gflag void AppConfigUnittest::TestLoadEnvParameters() { - const std::string kLogtailSysConfDir = GetProcessExecutionDir(); + const std::string kConfDir = GetProcessExecutionDir(); - setEnv("logtail_sys_conf_dir", kLogtailSysConfDir); + setEnv("logtail_sys_conf_dir", kConfDir); setEnv("accept_multi_config", kAccessMultiConfig); setEnv("max_multi_config", kMaxMultiConfig); setEnv("batch_send_interval", kBatchSendInterval); @@ -210,17 +210,17 @@ void AppConfigUnittest::TestLoadEnvParameters() { setEnv("polling_ignore_file_modify_timeout", kPollingIgnoreFileModifyTimeout); writeLogtailConfigJSON(Json::Value(Json::objectValue)); - testParameters(kLogtailSysConfDir); + testParameters(kConfDir); unsetEnvKeys(); } -// ilogtail_config.json > gflag +// loongcollector_config.json > gflag void AppConfigUnittest::TestLoadFileParameters() { - const std::string kLogtailSysConfDir = GetProcessExecutionDir(); + const std::string kConfDir = GetProcessExecutionDir(); Json::Value value; - setJSON(value, "logtail_sys_conf_dir", kLogtailSysConfDir); + setJSON(value, "logtail_sys_conf_dir", kConfDir); setJSON(value, "accept_multi_config", kAccessMultiConfig); setJSON(value, "max_multi_config", kMaxMultiConfig); setJSON(value, "batch_send_interval", kBatchSendInterval); @@ -258,7 +258,7 @@ void AppConfigUnittest::TestLoadFileParameters() { setJSON(value, "polling_ignore_file_modify_timeout", kPollingIgnoreFileModifyTimeout); writeLogtailConfigJSON(value); - testParameters(kLogtailSysConfDir); + testParameters(kConfDir); } DEFINE_FLAG_BOOL(test_param_bool, "test_param_bool", true); @@ -274,7 +274,7 @@ DEFINE_FLAG_INT64(test_param_int64_error, "test_param_int64", 64); DEFINE_FLAG_DOUBLE(test_param_double_error, "test_param_double", 1.1); DEFINE_FLAG_STRING(test_param_string_error, "test_param_string", "str"); -// ilogtail_config.json > gflag +// loongcollector_config.json > gflag void AppConfigUnittest::TestLoadJsonAndEnvParameters() { Json::Value jsonconfig; setJSON(jsonconfig, "test_param_bool", false); diff --git a/core/unittest/batch/BatcherUnittest.cpp b/core/unittest/batch/BatcherUnittest.cpp index 799153692d..40b11b9d96 100644 --- a/core/unittest/batch/BatcherUnittest.cpp +++ b/core/unittest/batch/BatcherUnittest.cpp @@ -41,8 +41,8 @@ class BatcherUnittest : public ::testing::Test { void SetUp() override { mCtx.SetConfigName("test_config"); sFlusher->SetContext(mCtx); - sFlusher->SetMetricsRecordRef(FlusherMock::sName, "1", "1", "1"); - sFlusher->SetNodeID("1"); + sFlusher->SetMetricsRecordRef(FlusherMock::sName, "1"); + sFlusher->SetPluginID("1"); } void TearDown() override { TimeoutFlushManager::GetInstance()->mTimeoutRecords.clear(); } @@ -577,17 +577,17 @@ void BatcherUnittest::TestMetric() { vector res; batch.Add(std::move(g), res); APSARA_TEST_EQUAL(5U, batch.mMetricsRecordRef->GetLabels()->size()); - APSARA_TEST_TRUE(batch.mMetricsRecordRef.HasLabel(METRIC_LABEL_PROJECT, "")); - APSARA_TEST_TRUE(batch.mMetricsRecordRef.HasLabel(METRIC_LABEL_CONFIG_NAME, "test_config")); - APSARA_TEST_TRUE(batch.mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_COMPONENT_NAME, "batcher")); - APSARA_TEST_TRUE(batch.mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_FLUSHER_NODE_ID, "1")); + APSARA_TEST_TRUE(batch.mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PROJECT, "")); + APSARA_TEST_TRUE(batch.mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PIPELINE_NAME, "test_config")); + APSARA_TEST_TRUE(batch.mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_COMPONENT_NAME, METRIC_LABEL_VALUE_COMPONENT_NAME_BATCHER)); + APSARA_TEST_TRUE(batch.mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_FLUSHER_PLUGIN_ID, "1")); APSARA_TEST_TRUE(batch.mMetricsRecordRef.HasLabel("enable_group_batch", "false")); - APSARA_TEST_EQUAL(3U, batch.mInEventsCnt->GetValue()); + APSARA_TEST_EQUAL(3U, batch.mInEventsTotal->GetValue()); APSARA_TEST_EQUAL(groupSize, batch.mInGroupDataSizeBytes->GetValue()); - APSARA_TEST_EQUAL(2U, batch.mOutEventsCnt->GetValue()); - APSARA_TEST_EQUAL(1U, batch.mEventBatchItemsCnt->GetValue()); - APSARA_TEST_EQUAL(1U, batch.mBufferedGroupsCnt->GetValue()); - APSARA_TEST_EQUAL(1U, batch.mBufferedEventsCnt->GetValue()); + APSARA_TEST_EQUAL(2U, batch.mOutEventsTotal->GetValue()); + APSARA_TEST_EQUAL(1U, batch.mEventBatchItemsTotal->GetValue()); + APSARA_TEST_EQUAL(1U, batch.mBufferedGroupsTotal->GetValue()); + APSARA_TEST_EQUAL(1U, batch.mBufferedEventsTotal->GetValue()); APSARA_TEST_EQUAL(batchSize, batch.mBufferedDataSizeByte->GetValue()); } { @@ -607,12 +607,12 @@ void BatcherUnittest::TestMetric() { batch.Add(std::move(g), res); batch.FlushQueue(0, res[0]); APSARA_TEST_TRUE(batch.mMetricsRecordRef.HasLabel("enable_group_batch", "true")); - APSARA_TEST_EQUAL(3U, batch.mInEventsCnt->GetValue()); + APSARA_TEST_EQUAL(3U, batch.mInEventsTotal->GetValue()); APSARA_TEST_EQUAL(groupSize, batch.mInGroupDataSizeBytes->GetValue()); - APSARA_TEST_EQUAL(2U, batch.mOutEventsCnt->GetValue()); - APSARA_TEST_EQUAL(1U, batch.mEventBatchItemsCnt->GetValue()); - APSARA_TEST_EQUAL(1U, batch.mBufferedGroupsCnt->GetValue()); - APSARA_TEST_EQUAL(1U, batch.mBufferedEventsCnt->GetValue()); + APSARA_TEST_EQUAL(2U, batch.mOutEventsTotal->GetValue()); + APSARA_TEST_EQUAL(1U, batch.mEventBatchItemsTotal->GetValue()); + APSARA_TEST_EQUAL(1U, batch.mBufferedGroupsTotal->GetValue()); + APSARA_TEST_EQUAL(1U, batch.mBufferedEventsTotal->GetValue()); APSARA_TEST_EQUAL(batchSize, batch.mBufferedDataSizeByte->GetValue()); } } diff --git a/core/unittest/batch/TimeoutFlushManagerUnittest.cpp b/core/unittest/batch/TimeoutFlushManagerUnittest.cpp index 0a437d0271..da419ae7ea 100644 --- a/core/unittest/batch/TimeoutFlushManagerUnittest.cpp +++ b/core/unittest/batch/TimeoutFlushManagerUnittest.cpp @@ -31,7 +31,7 @@ class TimeoutFlushManagerUnittest : public ::testing::Test { sFlusher = make_unique(); sCtx.SetConfigName("test_config"); sFlusher->SetContext(sCtx); - sFlusher->SetMetricsRecordRef(FlusherMock::sName, "1", "1", "1"); + sFlusher->SetMetricsRecordRef(FlusherMock::sName, "1"); } void TearDown() override { TimeoutFlushManager::GetInstance()->mTimeoutRecords.clear(); } diff --git a/core/unittest/checkpoint/AdhocCheckpointManagerUnittest.cpp b/core/unittest/checkpoint/AdhocCheckpointManagerUnittest.cpp index 6cfff49e9d..d23d4a5bbf 100644 --- a/core/unittest/checkpoint/AdhocCheckpointManagerUnittest.cpp +++ b/core/unittest/checkpoint/AdhocCheckpointManagerUnittest.cpp @@ -32,7 +32,7 @@ class AdhocCheckpointManagerUnittest : public ::testing::Test { kTestRootDir = (bfs::path(GetProcessExecutionDir()) / "AdhocCheckpointManagerUnittest").string(); bfs::remove_all(kTestRootDir); bfs::create_directories(kTestRootDir); - AppConfig::GetInstance()->SetLogtailSysConfDir(kTestRootDir); + AppConfig::GetInstance()->SetLoongcollectorConfDir(kTestRootDir); mAdhocCheckpointManager = AdhocCheckpointManager::GetInstance(); } diff --git a/core/unittest/checkpoint/CheckpointManagerUnittest.cpp b/core/unittest/checkpoint/CheckpointManagerUnittest.cpp index 38b97631dd..864b9de15a 100644 --- a/core/unittest/checkpoint/CheckpointManagerUnittest.cpp +++ b/core/unittest/checkpoint/CheckpointManagerUnittest.cpp @@ -29,7 +29,7 @@ class CheckpointManagerUnittest : public ::testing::Test { kTestRootDir = (bfs::path(GetProcessExecutionDir()) / "CheckpointManagerUnittest").string(); bfs::remove_all(kTestRootDir); bfs::create_directories(kTestRootDir); - AppConfig::GetInstance()->SetLogtailSysConfDir(kTestRootDir); + AppConfig::GetInstance()->SetLoongcollectorConfDir(kTestRootDir); } static void TearDownTestCase() { bfs::remove_all(kTestRootDir); } diff --git a/core/unittest/checkpoint/CheckpointManagerV2Unittest.cpp b/core/unittest/checkpoint/CheckpointManagerV2Unittest.cpp index e26e676d5e..3c9f6c3fe0 100644 --- a/core/unittest/checkpoint/CheckpointManagerV2Unittest.cpp +++ b/core/unittest/checkpoint/CheckpointManagerV2Unittest.cpp @@ -39,7 +39,7 @@ class CheckpointManagerV2Unittest : public ::testing::Test { bfs::remove_all(kTestRootDir); } bfs::create_directories(kTestRootDir); - AppConfig::GetInstance()->SetLogtailSysConfDir(kTestRootDir); + AppConfig::GetInstance()->SetLoongcollectorConfDir(kTestRootDir); INT32_FLAG(logtail_checkpoint_check_gc_interval_sec) = 1; } diff --git a/core/unittest/compression/CompressorFactoryUnittest.cpp b/core/unittest/compression/CompressorFactoryUnittest.cpp index 84e159ee34..e9244c3904 100644 --- a/core/unittest/compression/CompressorFactoryUnittest.cpp +++ b/core/unittest/compression/CompressorFactoryUnittest.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "common/compression/CompressorFactory.h" #include "unittest/Unittest.h" @@ -96,10 +96,10 @@ void CompressorFactoryUnittest::TestMetric() { auto compressor = CompressorFactory::GetInstance()->Create(Json::Value(), mCtx, "test_plugin", mFlusherId, CompressType::LZ4); APSARA_TEST_EQUAL(4U, compressor->mMetricsRecordRef->GetLabels()->size()); - APSARA_TEST_TRUE(compressor->mMetricsRecordRef.HasLabel(METRIC_LABEL_PROJECT, "")); - APSARA_TEST_TRUE(compressor->mMetricsRecordRef.HasLabel(METRIC_LABEL_CONFIG_NAME, "test_config")); - APSARA_TEST_TRUE(compressor->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_COMPONENT_NAME, "compressor")); - APSARA_TEST_TRUE(compressor->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_FLUSHER_NODE_ID, mFlusherId)); + APSARA_TEST_TRUE(compressor->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PROJECT, "")); + APSARA_TEST_TRUE(compressor->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PIPELINE_NAME, "test_config")); + APSARA_TEST_TRUE(compressor->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_COMPONENT_NAME, METRIC_LABEL_VALUE_COMPONENT_NAME_COMPRESSOR)); + APSARA_TEST_TRUE(compressor->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_FLUSHER_PLUGIN_ID, mFlusherId)); } UNIT_TEST_CASE(CompressorFactoryUnittest, TestCreate) diff --git a/core/unittest/compression/CompressorUnittest.cpp b/core/unittest/compression/CompressorUnittest.cpp index 54576c63a4..a678d692e7 100644 --- a/core/unittest/compression/CompressorUnittest.cpp +++ b/core/unittest/compression/CompressorUnittest.cpp @@ -13,7 +13,7 @@ // limitations under the License. #include "common/compression/LZ4Compressor.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "unittest/Unittest.h" using namespace std; @@ -49,11 +49,11 @@ void CompressorUnittest::TestMetric() { string output; string errorMsg; compressor.DoCompress(input, output, errorMsg); - APSARA_TEST_EQUAL(1U, compressor.mInItemsCnt->GetValue()); + APSARA_TEST_EQUAL(1U, compressor.mInItemsTotal->GetValue()); APSARA_TEST_EQUAL(input.size(), compressor.mInItemSizeBytes->GetValue()); - APSARA_TEST_EQUAL(1U, compressor.mOutItemsCnt->GetValue()); + APSARA_TEST_EQUAL(1U, compressor.mOutItemsTotal->GetValue()); APSARA_TEST_EQUAL(output.size(), compressor.mOutItemSizeBytes->GetValue()); - APSARA_TEST_EQUAL(0U, compressor.mDiscardedItemsCnt->GetValue()); + APSARA_TEST_EQUAL(0U, compressor.mDiscardedItemsTotal->GetValue()); APSARA_TEST_EQUAL(0U, compressor.mDiscardedItemSizeBytes->GetValue()); } { @@ -63,11 +63,11 @@ void CompressorUnittest::TestMetric() { string output; string errorMsg; compressor.DoCompress(input, output, errorMsg); - APSARA_TEST_EQUAL(1U, compressor.mInItemsCnt->GetValue()); + APSARA_TEST_EQUAL(1U, compressor.mInItemsTotal->GetValue()); APSARA_TEST_EQUAL(input.size(), compressor.mInItemSizeBytes->GetValue()); - APSARA_TEST_EQUAL(0U, compressor.mOutItemsCnt->GetValue()); + APSARA_TEST_EQUAL(0U, compressor.mOutItemsTotal->GetValue()); APSARA_TEST_EQUAL(0U, compressor.mOutItemSizeBytes->GetValue()); - APSARA_TEST_EQUAL(1U, compressor.mDiscardedItemsCnt->GetValue()); + APSARA_TEST_EQUAL(1U, compressor.mDiscardedItemsTotal->GetValue()); APSARA_TEST_EQUAL(input.size(), compressor.mDiscardedItemSizeBytes->GetValue()); } } diff --git a/core/unittest/config/ConfigMatchUnittest.cpp b/core/unittest/config/ConfigMatchUnittest.cpp index 8402e4d215..ba5f72587c 100644 --- a/core/unittest/config/ConfigMatchUnittest.cpp +++ b/core/unittest/config/ConfigMatchUnittest.cpp @@ -90,7 +90,7 @@ class ConfigMatchUnittest : public ::testing::Test { gDispatchThreadId = nullptr; ConfigManager::GetInstance()->CleanEnviroments(); bfs::remove("user_log_config.json"); - bfs::remove("ilogtail_config.json"); + bfs::remove("loongcollector_config.json"); bfs::remove_all(gRootDir); } @@ -569,7 +569,7 @@ class ConfigMatchUnittest : public ::testing::Test { void TestBlacklistControlCommon(const std::string& pathRoot, const std::string& logPath = "") { GenerateUserLogConfigForTestingBlacklistControl(pathRoot); - std::ofstream out("ilogtail_config.json"); + std::ofstream out("loongcollector_config.json"); out << std::string("{") << std::string("\"config_server_address\" : \"file\",") << std::string("\"data_server_address\" : \"file\",") << std::string("\"domain\" : \"\"") << std::string("}"); diff --git a/core/unittest/config/ConfigUpdateUnittest.cpp b/core/unittest/config/ConfigUpdateUnittest.cpp index 9f253ba11c..ad75101781 100644 --- a/core/unittest/config/ConfigUpdateUnittest.cpp +++ b/core/unittest/config/ConfigUpdateUnittest.cpp @@ -207,7 +207,6 @@ void ConfigUpdateUnittest::OnStartUp() const { APSARA_TEST_EQUAL(2U, diff.mAdded.size()); APSARA_TEST_TRUE(diff.mModified.empty()); APSARA_TEST_TRUE(diff.mRemoved.empty()); - APSARA_TEST_TRUE(diff.mUnchanged.empty()); PipelineManagerMock::GetInstance()->UpdatePipelines(diff); APSARA_TEST_EQUAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); @@ -223,7 +222,6 @@ void ConfigUpdateUnittest::OnConfigDelete() const { APSARA_TEST_TRUE(diff.mAdded.empty()); APSARA_TEST_TRUE(diff.mModified.empty()); APSARA_TEST_EQUAL(1U, diff.mRemoved.size()); - APSARA_TEST_TRUE(diff.mUnchanged.empty()); PipelineManagerMock::GetInstance()->UpdatePipelines(diff); APSARA_TEST_TRUE(PipelineManagerMock::GetInstance()->GetAllConfigNames().empty()); @@ -254,7 +252,6 @@ void ConfigUpdateUnittest::OnConfigToInvalidDetail() const { APSARA_TEST_EQUAL(3U, diff.mAdded.size()); APSARA_TEST_EQUAL(1U, diff.mModified.size()); APSARA_TEST_TRUE(diff.mRemoved.empty()); - APSARA_TEST_TRUE(diff.mUnchanged.empty()); PipelineManagerMock::GetInstance()->UpdatePipelines(diff); APSARA_TEST_EQUAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); @@ -273,7 +270,6 @@ void ConfigUpdateUnittest::OnConfigToEnabledValid() const { APSARA_TEST_EQUAL(3U, diff.mAdded.size()); APSARA_TEST_EQUAL(1U, diff.mModified.size()); APSARA_TEST_TRUE(diff.mRemoved.empty()); - APSARA_TEST_TRUE(diff.mUnchanged.empty()); PipelineManagerMock::GetInstance()->UpdatePipelines(diff); APSARA_TEST_EQUAL(4U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); @@ -292,7 +288,6 @@ void ConfigUpdateUnittest::OnConfigToDisabledValid() const { APSARA_TEST_TRUE(diff.mAdded.empty()); APSARA_TEST_TRUE(diff.mModified.empty()); APSARA_TEST_EQUAL(1U, diff.mRemoved.size()); - APSARA_TEST_TRUE(diff.mUnchanged.empty()); PipelineManagerMock::GetInstance()->UpdatePipelines(diff); APSARA_TEST_TRUE(PipelineManagerMock::GetInstance()->GetAllConfigNames().empty()); @@ -316,7 +311,6 @@ void ConfigUpdateUnittest::OnConfigUnchanged() const { APSARA_TEST_EQUAL(1U, diff.mAdded.size()); APSARA_TEST_TRUE(diff.mModified.empty()); APSARA_TEST_TRUE(diff.mRemoved.empty()); - APSARA_TEST_EQUAL(1U, diff.mUnchanged.size()); PipelineManagerMock::GetInstance()->UpdatePipelines(diff); APSARA_TEST_EQUAL(1U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); @@ -347,7 +341,6 @@ void ConfigUpdateUnittest::OnConfigAdded() const { APSARA_TEST_EQUAL(2U, diff.mAdded.size()); APSARA_TEST_TRUE(diff.mModified.empty()); APSARA_TEST_TRUE(diff.mRemoved.empty()); - APSARA_TEST_EQUAL(1U, diff.mUnchanged.size()); PipelineManagerMock::GetInstance()->UpdatePipelines(diff); APSARA_TEST_EQUAL(2U, PipelineManagerMock::GetInstance()->GetAllConfigNames().size()); diff --git a/core/unittest/config/ConfigUpdatorUnittest.cpp b/core/unittest/config/ConfigUpdatorUnittest.cpp index c8c9b8fbdb..9d54bb2b83 100644 --- a/core/unittest/config/ConfigUpdatorUnittest.cpp +++ b/core/unittest/config/ConfigUpdatorUnittest.cpp @@ -210,7 +210,7 @@ class ConfigUpdatorUnittest : public ::testing::Test { void TearDown() { AppConfig::GetInstance()->ReadFlagsFromMap(flagMap); bfs::remove_all(mRootDir); - bfs::remove(mRootDir + PS + "ilogtail_config.json"); + bfs::remove(mRootDir + PS + "loongcollector_config.json"); bfs::remove(STRING_FLAG(check_point_filename)); } @@ -289,7 +289,7 @@ void ConfigUpdatorUnittest::SetUpTestCase() { INT32_FLAG(dirfile_check_interval_ms) = 1000; #endif Sender::Instance()->MockAsyncSend = MockAsyncSend; - bfs::remove("ilogtail_config.json"); + bfs::remove("loongcollector_config.json"); mRootDir = GetProcessExecutionDir(); if (PATH_SEPARATOR[0] == mRootDir.at(mRootDir.size() - 1)) mRootDir.resize(mRootDir.size() - 1); @@ -827,7 +827,7 @@ void ConfigUpdatorUnittest::CaseCleanup() { ConfigManager::GetInstance()->CleanEnviroments(); Sender::Instance()->RemoveSender(); bfs::remove_all(mRootDir); - bfs::remove("ilogtail_config.json"); + bfs::remove("loongcollector_config.json"); gDispatchThreadId->join(); gDispatchThreadId = nullptr; diff --git a/core/unittest/config/ConfigWatcherUnittest.cpp b/core/unittest/config/ConfigWatcherUnittest.cpp index 06592fd275..19cee7d545 100644 --- a/core/unittest/config/ConfigWatcherUnittest.cpp +++ b/core/unittest/config/ConfigWatcherUnittest.cpp @@ -43,8 +43,8 @@ class ConfigWatcherUnittest : public testing::Test { static const filesystem::path instanceConfigDir; }; -const filesystem::path ConfigWatcherUnittest::configDir = "./config"; -const filesystem::path ConfigWatcherUnittest::instanceConfigDir = "./instanceconfig"; +const filesystem::path ConfigWatcherUnittest::configDir = "./pipeline_config"; +const filesystem::path ConfigWatcherUnittest::instanceConfigDir = "./instance_config"; void ConfigWatcherUnittest::InvalidConfigDirFound() const { { @@ -60,10 +60,10 @@ void ConfigWatcherUnittest::InvalidConfigDirFound() const { InstanceConfigDiff diff = ConfigWatcher::GetInstance()->CheckInstanceConfigDiff(); APSARA_TEST_TRUE(diff.IsEmpty()); - { ofstream fout("instanceconfig"); } + { ofstream fout("instance_config"); } diff = ConfigWatcher::GetInstance()->CheckInstanceConfigDiff(); APSARA_TEST_TRUE(diff.IsEmpty()); - filesystem::remove("instanceconfig"); + filesystem::remove("instance_config"); } } @@ -140,7 +140,7 @@ void ConfigWatcherUnittest::DuplicateConfigs() const { ConfigWatcher::GetInstance()->AddInstanceSource("dir1"); ConfigWatcher::GetInstance()->AddInstanceSource("dir2"); - filesystem::create_directories("instanceconfig"); + filesystem::create_directories("instance_config"); filesystem::create_directories("dir1"); filesystem::create_directories("dir2"); @@ -162,7 +162,7 @@ void ConfigWatcherUnittest::DuplicateConfigs() const { filesystem::remove_all("dir1"); filesystem::remove_all("dir2"); - filesystem::remove_all("instanceconfig"); + filesystem::remove_all("instance_config"); PluginRegistry::GetInstance()->UnloadPlugins(); } } diff --git a/core/unittest/flusher/FlusherSLSUnittest.cpp b/core/unittest/flusher/FlusherSLSUnittest.cpp index 71e13cbcc6..a760f5b8cd 100644 --- a/core/unittest/flusher/FlusherSLSUnittest.cpp +++ b/core/unittest/flusher/FlusherSLSUnittest.cpp @@ -92,7 +92,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { #endif flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(optionalGoPipeline.isNull()); APSARA_TEST_EQUAL("test_project", flusher->mProject); @@ -104,7 +104,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { APSARA_TEST_EQUAL("", flusher->mEndpoint); #endif APSARA_TEST_EQUAL("", flusher->mAliuid); - APSARA_TEST_EQUAL(FlusherSLS::TelemetryType::LOG, flusher->mTelemetryType); + APSARA_TEST_EQUAL(sls_logs::SlsTelemetryType::SLS_TELEMETRY_TYPE_LOGS, flusher->mTelemetryType); APSARA_TEST_TRUE(flusher->mShardHashKeys.empty()); APSARA_TEST_EQUAL(static_cast(INT32_FLAG(merge_log_count_limit)), flusher->mBatcher.GetEventFlushStrategy().GetMaxCnt()); @@ -125,7 +125,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { auto que = SenderQueueManager::GetInstance()->GetQueue(flusher->GetQueueKey()); APSARA_TEST_NOT_EQUAL(nullptr, que); APSARA_TEST_FALSE(que->GetRateLimiter().has_value()); - APSARA_TEST_EQUAL(2U, que->GetConcurrencyLimiters().size()); + APSARA_TEST_EQUAL(3U, que->GetConcurrencyLimiters().size()); // valid optional param configStr = R"( @@ -145,7 +145,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL("cn-hangzhou", flusher->mRegion); #ifdef __ENTERPRISE__ @@ -154,7 +154,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { APSARA_TEST_EQUAL("cn-hangzhou.log.aliyuncs.com", flusher->mEndpoint); APSARA_TEST_EQUAL("", flusher->mAliuid); #endif - APSARA_TEST_EQUAL(FlusherSLS::TelemetryType::METRIC, flusher->mTelemetryType); + APSARA_TEST_EQUAL(sls_logs::SlsTelemetryType::SLS_TELEMETRY_TYPE_METRICS, flusher->mTelemetryType); APSARA_TEST_EQUAL(1U, flusher->mShardHashKeys.size()); SenderQueueManager::GetInstance()->Clear(); @@ -178,14 +178,14 @@ void FlusherSLSUnittest::OnSuccessfulInit() { #endif flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(STRING_FLAG(default_region_name), flusher->mRegion); #ifdef __ENTERPRISE__ APSARA_TEST_EQUAL("", flusher->mEndpoint); #endif APSARA_TEST_EQUAL("", flusher->mAliuid); - APSARA_TEST_EQUAL(FlusherSLS::TelemetryType::LOG, flusher->mTelemetryType); + APSARA_TEST_EQUAL(sls_logs::SlsTelemetryType::SLS_TELEMETRY_TYPE_LOGS, flusher->mTelemetryType); APSARA_TEST_TRUE(flusher->mShardHashKeys.empty()); SenderQueueManager::GetInstance()->Clear(); @@ -204,7 +204,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(STRING_FLAG(default_region_name), flusher->mRegion); EnterpriseConfigProvider::GetInstance()->mIsPrivateCloud = false; @@ -224,7 +224,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL("cn-hangzhou.log.aliyuncs.com", flusher->mEndpoint); auto iter = SLSClientManager::GetInstance()->mRegionEndpointEntryMap.find("cn-hangzhou"); @@ -247,9 +247,9 @@ void FlusherSLSUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(FlusherSLS::TelemetryType::LOG, flusher->mTelemetryType); + APSARA_TEST_EQUAL(sls_logs::SlsTelemetryType::SLS_TELEMETRY_TYPE_LOGS, flusher->mTelemetryType); SenderQueueManager::GetInstance()->Clear(); configStr = R"( @@ -265,9 +265,9 @@ void FlusherSLSUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); - APSARA_TEST_EQUAL(FlusherSLS::TelemetryType::LOG, flusher->mTelemetryType); + APSARA_TEST_EQUAL(sls_logs::SlsTelemetryType::SLS_TELEMETRY_TYPE_LOGS, flusher->mTelemetryType); SenderQueueManager::GetInstance()->Clear(); // ShardHashKeys @@ -287,7 +287,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { flusher.reset(new FlusherSLS()); ctx.SetExactlyOnceFlag(true); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(flusher->mShardHashKeys.empty()); ctx.SetExactlyOnceFlag(false); @@ -309,7 +309,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); APSARA_TEST_FALSE(flusher->mBatcher.GetGroupFlushStrategy().has_value()); SenderQueueManager::GetInstance()->Clear(); @@ -327,7 +327,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { flusher.reset(new FlusherSLS()); ctx.SetExactlyOnceFlag(true); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); APSARA_TEST_FALSE(flusher->mBatcher.GetGroupFlushStrategy().has_value()); APSARA_TEST_EQUAL(nullptr, SenderQueueManager::GetInstance()->GetQueue(flusher->GetQueueKey())); @@ -362,7 +362,7 @@ void FlusherSLSUnittest::OnSuccessfulInit() { pipeline.mPluginID.store(4); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_TRUE(flusher->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(optionalGoPipelineJson.toStyledString(), optionalGoPipeline.toStyledString()); SenderQueueManager::GetInstance()->Clear(); @@ -384,7 +384,7 @@ void FlusherSLSUnittest::OnFailedInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_FALSE(flusher->Init(configJson, optionalGoPipeline)); configStr = R"( @@ -398,7 +398,7 @@ void FlusherSLSUnittest::OnFailedInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_FALSE(flusher->Init(configJson, optionalGoPipeline)); // invalid Logstore @@ -412,7 +412,7 @@ void FlusherSLSUnittest::OnFailedInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_FALSE(flusher->Init(configJson, optionalGoPipeline)); configStr = R"( @@ -426,7 +426,7 @@ void FlusherSLSUnittest::OnFailedInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_FALSE(flusher->Init(configJson, optionalGoPipeline)); #ifndef __ENTERPRISE__ @@ -441,7 +441,7 @@ void FlusherSLSUnittest::OnFailedInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_FALSE(flusher->Init(configJson, optionalGoPipeline)); configStr = R"( @@ -455,7 +455,7 @@ void FlusherSLSUnittest::OnFailedInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); flusher.reset(new FlusherSLS()); flusher->SetContext(ctx); - flusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + flusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); APSARA_TEST_FALSE(flusher->Init(configJson, optionalGoPipeline)); #endif } @@ -467,6 +467,7 @@ void FlusherSLSUnittest::OnPipelineUpdate() { Json::Value configJson, optionalGoPipeline; FlusherSLS flusher1; flusher1.SetContext(ctx1); + flusher1.SetMetricsRecordRef(FlusherSLS::sName, "1"); string configStr, errorMsg; configStr = R"( @@ -490,6 +491,7 @@ void FlusherSLSUnittest::OnPipelineUpdate() { ctx2.SetConfigName("test_config_2"); FlusherSLS flusher2; flusher2.SetContext(ctx2); + flusher2.SetMetricsRecordRef(FlusherSLS::sName, "1"); configStr = R"( { "Type": "flusher_sls", @@ -523,6 +525,7 @@ void FlusherSLSUnittest::OnPipelineUpdate() { ctx2.SetConfigName("test_config_1"); FlusherSLS flusher2; flusher2.SetContext(ctx2); + flusher2.SetMetricsRecordRef(FlusherSLS::sName, "1"); configStr = R"( { "Type": "flusher_sls", @@ -568,6 +571,7 @@ void FlusherSLSUnittest::TestSend() { ctx.SetConfigName("test_config"); ctx.SetExactlyOnceFlag(true); flusher.SetContext(ctx); + flusher.SetMetricsRecordRef(FlusherSLS::sName, "1"); flusher.Init(configJson, optionalGoPipeline); // create exactly once queue @@ -605,7 +609,7 @@ void FlusherSLSUnittest::TestSend() { APSARA_TEST_TRUE(flusher.Send(std::move(group))); vector res; - ExactlyOnceQueueManager::GetInstance()->GetAllAvailableSenderQueueItems(res); + ExactlyOnceQueueManager::GetInstance()->GetAvailableSenderQueueItems(res, 80); APSARA_TEST_EQUAL(1U, res.size()); auto item = static_cast(res[0]); APSARA_TEST_EQUAL(RawDataType::EVENT_GROUP, item->mType); @@ -659,7 +663,7 @@ void FlusherSLSUnittest::TestSend() { APSARA_TEST_TRUE(flusher.Send(std::move(group))); vector res; - ExactlyOnceQueueManager::GetInstance()->GetAllAvailableSenderQueueItems(res); + ExactlyOnceQueueManager::GetInstance()->GetAvailableSenderQueueItems(res, 80); APSARA_TEST_EQUAL(1U, res.size()); auto item = static_cast(res[0]); APSARA_TEST_EQUAL(RawDataType::EVENT_GROUP, item->mType); @@ -714,13 +718,14 @@ void FlusherSLSUnittest::TestSend() { ParseJsonTable(configStr, configJson, errorMsg); FlusherSLS flusher; flusher.SetContext(ctx); + flusher.SetMetricsRecordRef(FlusherSLS::sName, "1"); flusher.Init(configJson, optionalGoPipeline); { // empty group PipelineEventGroup group(make_shared()); APSARA_TEST_TRUE(flusher.Send(std::move(group))); vector res; - SenderQueueManager::GetInstance()->GetAllAvailableItems(res); + SenderQueueManager::GetInstance()->GetAvailableItems(res, 80); APSARA_TEST_TRUE(res.empty()); } { @@ -739,7 +744,7 @@ void FlusherSLSUnittest::TestSend() { APSARA_TEST_TRUE(flusher.Send(std::move(group))); vector res; - SenderQueueManager::GetInstance()->GetAllAvailableItems(res); + SenderQueueManager::GetInstance()->GetAvailableItems(res, 80); APSARA_TEST_EQUAL(1U, res.size()); auto item = static_cast(res[0]); APSARA_TEST_EQUAL(RawDataType::EVENT_GROUP, item->mType); @@ -805,6 +810,7 @@ void FlusherSLSUnittest::TestSend() { ParseJsonTable(configStr, configJson, errorMsg); FlusherSLS flusher; flusher.SetContext(ctx); + flusher.SetMetricsRecordRef(FlusherSLS::sName, "1"); flusher.Init(configJson, optionalGoPipeline); PipelineEventGroup group(make_shared()); @@ -833,7 +839,7 @@ void FlusherSLSUnittest::TestSend() { APSARA_TEST_TRUE(flusher.Send(std::move(group))); vector res; - SenderQueueManager::GetInstance()->GetAllAvailableItems(res); + SenderQueueManager::GetInstance()->GetAvailableItems(res, 80); APSARA_TEST_EQUAL(1U, res.size()); auto item = static_cast(res[0]); APSARA_TEST_EQUAL(RawDataType::EVENT_GROUP_LIST, item->mType); @@ -882,7 +888,7 @@ void FlusherSLSUnittest::TestSend() { SenderQueueManager::GetInstance()->RemoveItem(item->mQueueKey, item); flusher.FlushAll(); res.clear(); - SenderQueueManager::GetInstance()->GetAllAvailableItems(res); + SenderQueueManager::GetInstance()->GetAvailableItems(res, 80); for (auto& tmp : res) { SenderQueueManager::GetInstance()->RemoveItem(tmp->mQueueKey, tmp); } @@ -906,6 +912,7 @@ void FlusherSLSUnittest::TestFlush() { ParseJsonTable(configStr, configJson, errorMsg); FlusherSLS flusher; flusher.SetContext(ctx); + flusher.SetMetricsRecordRef(FlusherSLS::sName, "1"); flusher.Init(configJson, optionalGoPipeline); PipelineEventGroup group(make_shared()); @@ -925,11 +932,11 @@ void FlusherSLSUnittest::TestFlush() { flusher.Flush(batchKey); vector res; - SenderQueueManager::GetInstance()->GetAllAvailableItems(res); + SenderQueueManager::GetInstance()->GetAvailableItems(res, 80); APSARA_TEST_EQUAL(0U, res.size()); flusher.Flush(0); - SenderQueueManager::GetInstance()->GetAllAvailableItems(res); + SenderQueueManager::GetInstance()->GetAvailableItems(res, 80); APSARA_TEST_EQUAL(1U, res.size()); } @@ -949,6 +956,7 @@ void FlusherSLSUnittest::TestFlushAll() { ParseJsonTable(configStr, configJson, errorMsg); FlusherSLS flusher; flusher.SetContext(ctx); + flusher.SetMetricsRecordRef(FlusherSLS::sName, "1"); flusher.Init(configJson, optionalGoPipeline); PipelineEventGroup group(make_shared()); @@ -967,7 +975,7 @@ void FlusherSLSUnittest::TestFlushAll() { flusher.FlushAll(); vector res; - SenderQueueManager::GetInstance()->GetAllAvailableItems(res); + SenderQueueManager::GetInstance()->GetAvailableItems(res, 80); APSARA_TEST_EQUAL(1U, res.size()); } @@ -1000,12 +1008,13 @@ void FlusherSLSUnittest::OnGoPipelineSend() { ParseJsonTable(configStr, configJson, errorMsg); FlusherSLS flusher; flusher.SetContext(ctx); + flusher.SetMetricsRecordRef(FlusherSLS::sName, "1"); flusher.Init(configJson, optionalGoPipeline); { APSARA_TEST_TRUE(flusher.Send("content", "shardhash_key", "other_logstore")); vector res; - SenderQueueManager::GetInstance()->GetAllAvailableItems(res); + SenderQueueManager::GetInstance()->GetAvailableItems(res, 80); APSARA_TEST_EQUAL(1U, res.size()); auto item = static_cast(res[0]); @@ -1027,7 +1036,7 @@ void FlusherSLSUnittest::OnGoPipelineSend() { APSARA_TEST_TRUE(flusher.Send("content", "shardhash_key", "")); vector res; - SenderQueueManager::GetInstance()->GetAllAvailableItems(res); + SenderQueueManager::GetInstance()->GetAvailableItems(res, 80); APSARA_TEST_EQUAL(1U, res.size()); auto item = static_cast(res[0]); @@ -1049,7 +1058,7 @@ void FlusherSLSUnittest::OnGoPipelineSend() { APSARA_TEST_NOT_EQUAL(nullptr, SenderQueueManager::GetInstance()->GetQueue(key)); vector res; - SenderQueueManager::GetInstance()->GetAllAvailableItems(res); + SenderQueueManager::GetInstance()->GetAvailableItems(res, 80); APSARA_TEST_EQUAL(1U, res.size()); auto item = static_cast(res[0]); @@ -1078,6 +1087,7 @@ UNIT_TEST_CASE(FlusherSLSUnittest, TestFlushAll) UNIT_TEST_CASE(FlusherSLSUnittest, TestAddPackId) UNIT_TEST_CASE(FlusherSLSUnittest, OnGoPipelineSend) + } // namespace logtail UNIT_TEST_MAIN diff --git a/core/unittest/input/InputContainerStdioUnittest.cpp b/core/unittest/input/InputContainerStdioUnittest.cpp index 94e2a89819..60d353b86c 100644 --- a/core/unittest/input/InputContainerStdioUnittest.cpp +++ b/core/unittest/input/InputContainerStdioUnittest.cpp @@ -121,7 +121,7 @@ void InputContainerStdioUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input.reset(new InputContainerStdio()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputContainerStdio::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputContainerStdio::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); // valid optional param @@ -136,7 +136,7 @@ void InputContainerStdioUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input.reset(new InputContainerStdio()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputContainerStdio::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputContainerStdio::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); // invalid optional param @@ -149,7 +149,7 @@ void InputContainerStdioUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input.reset(new InputContainerStdio()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputContainerStdio::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputContainerStdio::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); // TailingAllMatchedFiles @@ -162,7 +162,7 @@ void InputContainerStdioUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input.reset(new InputContainerStdio()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputContainerStdio::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputContainerStdio::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(input->mFileReader.mTailingAllMatchedFiles); @@ -174,7 +174,7 @@ void InputContainerStdioUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input.reset(new InputContainerStdio()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputContainerStdio::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputContainerStdio::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); } @@ -217,7 +217,7 @@ void InputContainerStdioUnittest::OnEnableContainerDiscovery() { PluginInstance::PluginMeta meta = ctx.GetPipeline().GenNextPluginMeta(false); input.reset(new InputContainerStdio()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputContainerStdio::sName, meta.mPluginID, meta.mNodeID, meta.mChildNodeID); + input->SetMetricsRecordRef(InputContainerStdio::sName, meta.mPluginID); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(optionalGoPipelineJson.toStyledString(), optionalGoPipeline.toStyledString()); } @@ -234,7 +234,7 @@ void InputContainerStdioUnittest::OnPipelineUpdate() { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input.SetContext(ctx); - input.SetMetricsRecordRef(InputContainerStdio::sName, "1", "1", "1"); + input.SetMetricsRecordRef(InputContainerStdio::sName, "1"); APSARA_TEST_TRUE(input.Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(input.Start()); diff --git a/core/unittest/input/InputFileUnittest.cpp b/core/unittest/input/InputFileUnittest.cpp index 1aa6f2b5d7..17b334831b 100644 --- a/core/unittest/input/InputFileUnittest.cpp +++ b/core/unittest/input/InputFileUnittest.cpp @@ -83,7 +83,7 @@ void InputFileUnittest::OnSuccessfulInit() { input.reset(new InputFile()); ctx.SetExactlyOnceFlag(false); input->SetContext(ctx); - input->SetMetricsRecordRef(InputFile::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputFile::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_FALSE(input->mEnableContainerDiscovery); APSARA_TEST_EQUAL(0U, input->mMaxCheckpointDirSearchDepth); @@ -105,7 +105,7 @@ void InputFileUnittest::OnSuccessfulInit() { input.reset(new InputFile()); ctx.SetExactlyOnceFlag(false); input->SetContext(ctx); - input->SetMetricsRecordRef(InputFile::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputFile::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(input->mEnableContainerDiscovery); APSARA_TEST_EQUAL(1U, input->mMaxCheckpointDirSearchDepth); @@ -127,7 +127,7 @@ void InputFileUnittest::OnSuccessfulInit() { input.reset(new InputFile()); ctx.SetExactlyOnceFlag(false); input->SetContext(ctx); - input->SetMetricsRecordRef(InputFile::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputFile::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_FALSE(input->mEnableContainerDiscovery); APSARA_TEST_EQUAL(0U, input->mMaxCheckpointDirSearchDepth); @@ -147,7 +147,7 @@ void InputFileUnittest::OnSuccessfulInit() { input.reset(new InputFile()); ctx.SetExactlyOnceFlag(false); input->SetContext(ctx); - input->SetMetricsRecordRef(InputFile::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputFile::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(input->mFileReader.mTailingAllMatchedFiles); APSARA_TEST_TRUE(input->mFileDiscovery.IsTailingAllMatchedFiles()); @@ -165,7 +165,7 @@ void InputFileUnittest::OnSuccessfulInit() { input.reset(new InputFile()); ctx.SetExactlyOnceFlag(false); input->SetContext(ctx); - input->SetMetricsRecordRef(InputFile::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputFile::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(0U, input->mExactlyOnceConcurrency); APSARA_TEST_FALSE(ctx.IsExactlyOnceEnabled()); @@ -177,7 +177,7 @@ void InputFileUnittest::OnFailedInit() { input.reset(new InputFile()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputFile::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputFile::sName, "1"); APSARA_TEST_FALSE(input->Init(configJson, optionalGoPipeline)); } @@ -225,7 +225,7 @@ void InputFileUnittest::OnEnableContainerDiscovery() { PluginInstance::PluginMeta meta = ctx.GetPipeline().GenNextPluginMeta(false); input.reset(new InputFile()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputFile::sName, meta.mPluginID, meta.mNodeID, meta.mChildNodeID); + input->SetMetricsRecordRef(InputFile::sName, meta.mPluginID); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(input->mEnableContainerDiscovery); APSARA_TEST_TRUE(input->mFileDiscovery.IsContainerDiscoveryEnabled()); @@ -250,7 +250,7 @@ void InputFileUnittest::TestCreateInnerProcessors() { configJson["FilePaths"].append(Json::Value(filePath.string())); input.reset(new InputFile()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputFile::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputFile::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(2U, input->mInnerProcessors.size()); APSARA_TEST_EQUAL(ProcessorSplitLogStringNative::sName, input->mInnerProcessors[0]->Name()); @@ -279,7 +279,7 @@ void InputFileUnittest::TestCreateInnerProcessors() { configJson["FilePaths"].append(Json::Value(filePath.string())); input.reset(new InputFile()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputFile::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputFile::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(2U, input->mInnerProcessors.size()); APSARA_TEST_EQUAL(ProcessorSplitMultilineLogStringNative::sName, input->mInnerProcessors[0]->Name()); @@ -309,7 +309,7 @@ void InputFileUnittest::TestCreateInnerProcessors() { configJson["FilePaths"].append(Json::Value(filePath.string())); input.reset(new InputFile()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputFile::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputFile::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(2U, input->mInnerProcessors.size()); APSARA_TEST_EQUAL(ProcessorSplitLogStringNative::sName, input->mInnerProcessors[0]->Name()); @@ -337,7 +337,7 @@ void InputFileUnittest::TestCreateInnerProcessors() { configJson["FilePaths"].append(Json::Value(filePath.string())); input.reset(new InputFile()); input->SetContext(ctx); - input->SetMetricsRecordRef(InputFile::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputFile::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL(2U, input->mInnerProcessors.size()); APSARA_TEST_EQUAL(ProcessorSplitLogStringNative::sName, input->mInnerProcessors[0]->Name()); @@ -367,7 +367,7 @@ void InputFileUnittest::OnPipelineUpdate() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); configJson["FilePaths"].append(Json::Value(filePath.string())); input.SetContext(ctx); - input.SetMetricsRecordRef(InputFile::sName, "1", "1", "1"); + input.SetMetricsRecordRef(InputFile::sName, "1"); APSARA_TEST_TRUE(input.Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(input.Start()); diff --git a/core/unittest/input/InputPrometheusUnittest.cpp b/core/unittest/input/InputPrometheusUnittest.cpp index 71f87bebd2..2b11218651 100644 --- a/core/unittest/input/InputPrometheusUnittest.cpp +++ b/core/unittest/input/InputPrometheusUnittest.cpp @@ -14,18 +14,17 @@ #include -#include #include #include #include "PluginRegistry.h" #include "app_config/AppConfig.h" #include "common/JsonUtil.h" -#include "plugin/processor/inner/ProcessorPromParseMetricNative.h" -#include "plugin/processor/inner/ProcessorPromRelabelMetricNative.h" -#include "plugin/input/InputPrometheus.h" #include "pipeline/Pipeline.h" #include "pipeline/PipelineContext.h" +#include "plugin/input/InputPrometheus.h" +#include "plugin/processor/inner/ProcessorPromParseMetricNative.h" +#include "plugin/processor/inner/ProcessorPromRelabelMetricNative.h" #include "prometheus/PrometheusInputRunner.h" #include "prometheus/labels/Relabel.h" #include "unittest/Unittest.h" @@ -43,10 +42,6 @@ class InputPrometheusUnittest : public testing::Test { protected: static void SetUpTestCase() { - setenv("POD_NAME", "prometheus-test", 1); - setenv("OPERATOR_HOST", "127.0.0.1", 1); - setenv("OPERATOR_PORT", "12345", 1); - AppConfig::GetInstance()->mPurageContainerMode = true; PluginRegistry::GetInstance()->LoadPlugins(); } @@ -56,9 +51,6 @@ class InputPrometheusUnittest : public testing::Test { ctx.SetPipeline(p); } static void TearDownTestCase() { - unsetenv("POD_NAME"); - unsetenv("OPERATOR_HOST"); - unsetenv("OPERATOR_PORT"); PluginRegistry::GetInstance()->UnloadPlugins(); } @@ -89,16 +81,16 @@ void InputPrometheusUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input = make_unique(); input->SetContext(ctx); - input->SetMetricsRecordRef(input->Name(), "1", "1", "1"); + input->SetMetricsRecordRef(input->Name(), "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL("_arms-prom/node-exporter/0", input->mTargetSubscirber->mJobName); APSARA_TEST_EQUAL("/metrics", input->mTargetSubscirber->mScrapeConfigPtr->mMetricsPath); APSARA_TEST_EQUAL(15LL, input->mTargetSubscirber->mScrapeConfigPtr->mScrapeIntervalSeconds); APSARA_TEST_EQUAL(15LL, input->mTargetSubscirber->mScrapeConfigPtr->mScrapeTimeoutSeconds); - APSARA_TEST_EQUAL(-1, input->mTargetSubscirber->mScrapeConfigPtr->mMaxScrapeSizeBytes); - APSARA_TEST_EQUAL(-1, input->mTargetSubscirber->mScrapeConfigPtr->mSampleLimit); - APSARA_TEST_EQUAL(-1, input->mTargetSubscirber->mScrapeConfigPtr->mSeriesLimit); + APSARA_TEST_EQUAL(0ULL, input->mTargetSubscirber->mScrapeConfigPtr->mMaxScrapeSizeBytes); + APSARA_TEST_EQUAL(0ULL, input->mTargetSubscirber->mScrapeConfigPtr->mSampleLimit); + APSARA_TEST_EQUAL(0ULL, input->mTargetSubscirber->mScrapeConfigPtr->mSeriesLimit); // all useful config configStr = R"( @@ -124,16 +116,16 @@ void InputPrometheusUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input = make_unique(); input->SetContext(ctx); - input->SetMetricsRecordRef(InputPrometheus::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputPrometheus::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_EQUAL("_arms-prom/node-exporter/0", input->mTargetSubscirber->mJobName); APSARA_TEST_EQUAL("/metrics", input->mTargetSubscirber->mScrapeConfigPtr->mMetricsPath); APSARA_TEST_EQUAL(15, input->mTargetSubscirber->mScrapeConfigPtr->mScrapeIntervalSeconds); APSARA_TEST_EQUAL(15, input->mTargetSubscirber->mScrapeConfigPtr->mScrapeTimeoutSeconds); - APSARA_TEST_EQUAL(10 * 1024 * 1024, input->mTargetSubscirber->mScrapeConfigPtr->mMaxScrapeSizeBytes); - APSARA_TEST_EQUAL(1000000, input->mTargetSubscirber->mScrapeConfigPtr->mSampleLimit); - APSARA_TEST_EQUAL(1000000, input->mTargetSubscirber->mScrapeConfigPtr->mSeriesLimit); + APSARA_TEST_EQUAL(10 * 1024 * 1024ULL, input->mTargetSubscirber->mScrapeConfigPtr->mMaxScrapeSizeBytes); + APSARA_TEST_EQUAL(1000000ULL, input->mTargetSubscirber->mScrapeConfigPtr->mSampleLimit); + APSARA_TEST_EQUAL(1000000ULL, input->mTargetSubscirber->mScrapeConfigPtr->mSeriesLimit); PrometheusInputRunner::GetInstance()->Stop(); } @@ -152,7 +144,7 @@ void InputPrometheusUnittest::OnFailedInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input = make_unique(); input->SetContext(ctx); - input->SetMetricsRecordRef(InputPrometheus::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputPrometheus::sName, "1"); APSARA_TEST_FALSE(input->Init(configJson, optionalGoPipeline)); // with invalid ScrapeConfig @@ -176,7 +168,7 @@ void InputPrometheusUnittest::OnFailedInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input = make_unique(); input->SetContext(ctx); - input->SetMetricsRecordRef(InputPrometheus::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputPrometheus::sName, "1"); APSARA_TEST_FALSE(input->Init(configJson, optionalGoPipeline)); PrometheusInputRunner::GetInstance()->Stop(); } @@ -207,16 +199,18 @@ void InputPrometheusUnittest::OnPipelineUpdate() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input = make_unique(); input->SetContext(ctx); - input->SetMetricsRecordRef(InputPrometheus::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputPrometheus::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); APSARA_TEST_TRUE(input->Start()); - APSARA_TEST_TRUE(PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.find("_arms-prom/node-exporter/0") - != PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.end()); + APSARA_TEST_TRUE( + PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.find("_arms-prom/node-exporter/0") + != PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.end()); APSARA_TEST_TRUE(input->Stop(true)); - APSARA_TEST_TRUE(PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.find("_arms-prom/node-exporter/0") - == PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.end()); + APSARA_TEST_TRUE( + PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.find("_arms-prom/node-exporter/0") + == PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.end()); PrometheusInputRunner::GetInstance()->Stop(); } @@ -249,7 +243,7 @@ void InputPrometheusUnittest::TestCreateInnerProcessor() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input = make_unique(); input->SetContext(ctx); - input->SetMetricsRecordRef(InputPrometheus::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputPrometheus::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); @@ -258,7 +252,7 @@ void InputPrometheusUnittest::TestCreateInnerProcessor() { APSARA_TEST_EQUAL(ProcessorPromRelabelMetricNative::sName, input->mInnerProcessors[1]->Name()); APSARA_TEST_EQUAL(0U, dynamic_cast(input->mInnerProcessors[1]->mPlugin.get()) - ->mRelabelConfigs.size()); + ->mScrapeConfigPtr->mMetricRelabelConfigs.mRelabelConfigs.size()); } { // with metric relabel config @@ -356,7 +350,7 @@ void InputPrometheusUnittest::TestCreateInnerProcessor() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); input = make_unique(); input->SetContext(ctx); - input->SetMetricsRecordRef(InputPrometheus::sName, "1", "1", "1"); + input->SetMetricsRecordRef(InputPrometheus::sName, "1"); APSARA_TEST_TRUE(input->Init(configJson, optionalGoPipeline)); @@ -366,18 +360,18 @@ void InputPrometheusUnittest::TestCreateInnerProcessor() { APSARA_TEST_EQUAL(ProcessorPromRelabelMetricNative::sName, input->mInnerProcessors[1]->mPlugin->Name()); APSARA_TEST_EQUAL(3U, dynamic_cast(input->mInnerProcessors[1]->mPlugin.get()) - ->mRelabelConfigs.size()); + ->mScrapeConfigPtr->mMetricRelabelConfigs.mRelabelConfigs.size()); APSARA_TEST_EQUAL(Action::KEEP, dynamic_cast(input->mInnerProcessors[1]->mPlugin.get()) - ->mRelabelConfigs[0] + ->mScrapeConfigPtr->mMetricRelabelConfigs.mRelabelConfigs[0] .mAction); APSARA_TEST_EQUAL(Action::KEEP, dynamic_cast(input->mInnerProcessors[1]->mPlugin.get()) - ->mRelabelConfigs[1] + ->mScrapeConfigPtr->mMetricRelabelConfigs.mRelabelConfigs[1] .mAction); APSARA_TEST_EQUAL(Action::REPLACE, dynamic_cast(input->mInnerProcessors[1]->mPlugin.get()) - ->mRelabelConfigs[2] + ->mScrapeConfigPtr->mMetricRelabelConfigs.mRelabelConfigs[2] .mAction); } PrometheusInputRunner::GetInstance()->Stop(); diff --git a/core/unittest/monitor/PluginMetricManagerUnittest.cpp b/core/unittest/monitor/PluginMetricManagerUnittest.cpp index a41908f1b2..2e0db37022 100644 --- a/core/unittest/monitor/PluginMetricManagerUnittest.cpp +++ b/core/unittest/monitor/PluginMetricManagerUnittest.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "monitor/PluginMetricManager.h" #include "unittest/Unittest.h" @@ -22,12 +22,12 @@ class PluginMetricManagerUnittest : public ::testing::Test { public: void SetUp() { MetricLabelsPtr defaultLabels = std::make_shared(); - defaultLabels->emplace_back(METRIC_LABEL_PROJECT, "default_project"); - defaultLabels->emplace_back(METRIC_LABEL_LOGSTORE, "default_logstore"); - defaultLabels->emplace_back(METRIC_LABEL_REGION, "default_region"); - defaultLabels->emplace_back(METRIC_LABEL_CONFIG_NAME, "default_config"); - defaultLabels->emplace_back(METRIC_LABEL_PLUGIN_NAME, "default_plugin"); - defaultLabels->emplace_back(METRIC_LABEL_PLUGIN_ID, "default_id"); + defaultLabels->emplace_back(METRIC_LABEL_KEY_PROJECT, "default_project"); + defaultLabels->emplace_back(METRIC_LABEL_KEY_LOGSTORE, "default_logstore"); + defaultLabels->emplace_back(METRIC_LABEL_KEY_REGION, "default_region"); + defaultLabels->emplace_back(METRIC_LABEL_KEY_PIPELINE_NAME, "default_config"); + defaultLabels->emplace_back(METRIC_LABEL_KEY_PLUGIN_TYPE, "default_plugin"); + defaultLabels->emplace_back(METRIC_LABEL_KEY_PLUGIN_ID, "default_id"); WriteMetrics::GetInstance()->PrepareMetricsRecordRef(mMetricsRecordRef, std::move(*defaultLabels)); std::unordered_map metricKeys; metricKeys.emplace("default_counter", MetricType::METRIC_TYPE_COUNTER); diff --git a/core/unittest/pipeline/CMakeLists.txt b/core/unittest/pipeline/CMakeLists.txt index 199620da42..e9b3969dfa 100644 --- a/core/unittest/pipeline/CMakeLists.txt +++ b/core/unittest/pipeline/CMakeLists.txt @@ -27,8 +27,13 @@ target_link_libraries(pipeline_manager_unittest ${UT_BASE_TARGET}) add_executable(instance_config_manager_unittest InstanceConfigManagerUnittest.cpp) target_link_libraries(instance_config_manager_unittest ${UT_BASE_TARGET}) +add_executable(concurrency_limiter_unittest ConcurrencyLimiterUnittest.cpp) +target_link_libraries(concurrency_limiter_unittest ${UT_BASE_TARGET}) + include(GoogleTest) gtest_discover_tests(global_config_unittest) gtest_discover_tests(pipeline_unittest) gtest_discover_tests(pipeline_manager_unittest) gtest_discover_tests(instance_config_manager_unittest) +gtest_discover_tests(concurrency_limiter_unittest) + diff --git a/core/unittest/pipeline/ConcurrencyLimiterUnittest.cpp b/core/unittest/pipeline/ConcurrencyLimiterUnittest.cpp new file mode 100644 index 0000000000..6ddddbc8d3 --- /dev/null +++ b/core/unittest/pipeline/ConcurrencyLimiterUnittest.cpp @@ -0,0 +1,100 @@ +// Copyright 2023 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include + +#include "common/JsonUtil.h" +#include "pipeline/limiter/ConcurrencyLimiter.h" +#include "unittest/Unittest.h" + +using namespace std; + +namespace logtail { + +class ConcurrencyLimiterUnittest : public testing::Test { +public: + void TestLimiter() const; +}; + +void ConcurrencyLimiterUnittest::TestLimiter() const { + shared_ptr sConcurrencyLimiter = make_shared(80); + // comcurrency = 10, count = 0 + APSARA_TEST_EQUAL(true, sConcurrencyLimiter->IsValidToPop()); + sConcurrencyLimiter->PostPop(); + APSARA_TEST_EQUAL(1U, sConcurrencyLimiter->GetInSendingCount()); + sConcurrencyLimiter->OnFail(); + sConcurrencyLimiter->OnSendDone(); + APSARA_TEST_EQUAL(40U, sConcurrencyLimiter->GetCurrentLimit()); + APSARA_TEST_EQUAL(0U, sConcurrencyLimiter->GetInSendingCount()); + APSARA_TEST_EQUAL(30U, sConcurrencyLimiter->GetCurrentInterval()); + + // count = 10, comcurrency = 10 + APSARA_TEST_EQUAL(true, sConcurrencyLimiter->IsValidToPop()); + int num = 10; + for (int i = 0; i < num; i++) { + APSARA_TEST_EQUAL(true, sConcurrencyLimiter->IsValidToPop()); + sConcurrencyLimiter->PostPop(); + } + APSARA_TEST_EQUAL(10U, sConcurrencyLimiter->GetInSendingCount()); + for (int i = 0; i < num; i++) { + sConcurrencyLimiter->OnSuccess(); + sConcurrencyLimiter->OnSendDone(); + } + APSARA_TEST_EQUAL(0U, sConcurrencyLimiter->GetInSendingCount()); + APSARA_TEST_EQUAL(50U, sConcurrencyLimiter->GetCurrentLimit()); + APSARA_TEST_EQUAL(30U, sConcurrencyLimiter->GetCurrentInterval()); + + // limit = 50/2/2/2/2/2/2/2 = 25/2/2/2/2/2/2 = 3/2/2/2 = 1/2/2 = 0 + // interval = 30 * 1.5 = 45 + num = 7; + for (int i = 0; i < num; i++) { + APSARA_TEST_EQUAL(true, sConcurrencyLimiter->IsValidToPop()); + sConcurrencyLimiter->PostPop(); + } + APSARA_TEST_EQUAL(7U, sConcurrencyLimiter->GetInSendingCount()); + for (int i = 0; i < num; i++) { + sConcurrencyLimiter->OnFail(); + sConcurrencyLimiter->OnSendDone(); + } + APSARA_TEST_EQUAL(0U, sConcurrencyLimiter->GetInSendingCount()); + APSARA_TEST_EQUAL(0U, sConcurrencyLimiter->GetCurrentLimit()); + APSARA_TEST_EQUAL(45U, sConcurrencyLimiter->GetCurrentInterval()); + + num = 3; + for (int i = 0; i < num; i++) { + if (i == 0) { + APSARA_TEST_EQUAL(true, sConcurrencyLimiter->IsValidToPop()); + } else { + APSARA_TEST_EQUAL(false, sConcurrencyLimiter->IsValidToPop()); + } + } + + sConcurrencyLimiter->PostPop(); + sConcurrencyLimiter->OnSuccess(); + sConcurrencyLimiter->OnSendDone(); + + APSARA_TEST_EQUAL(0U, sConcurrencyLimiter->GetInSendingCount()); + APSARA_TEST_EQUAL(1U, sConcurrencyLimiter->GetCurrentLimit()); + APSARA_TEST_EQUAL(30U, sConcurrencyLimiter->GetCurrentInterval()); +} + + +UNIT_TEST_CASE(ConcurrencyLimiterUnittest, TestLimiter) + +} // namespace logtail + +UNIT_TEST_MAIN diff --git a/core/unittest/pipeline/PipelineUnittest.cpp b/core/unittest/pipeline/PipelineUnittest.cpp index d4d76b5138..1fbe72ff22 100644 --- a/core/unittest/pipeline/PipelineUnittest.cpp +++ b/core/unittest/pipeline/PipelineUnittest.cpp @@ -14,8 +14,10 @@ #include +#include #include #include +#include #include "app_config/AppConfig.h" #include "common/JsonUtil.h" @@ -47,6 +49,8 @@ class PipelineUnittest : public ::testing::Test { void TestProcess() const; void TestSend() const; void TestFlushBatch() const; + void TestInProcessingCount() const; + void TestWaitAllItemsInProcessFinished() const; protected: static void SetUpTestCase() { @@ -64,6 +68,13 @@ class PipelineUnittest : public ::testing::Test { ProcessQueueManager::GetInstance()->Clear(); } + unique_ptr GenerateProcessItem(shared_ptr pipeline) const { + PipelineEventGroup eventGroup(make_shared()); + auto item = make_unique(std::move(eventGroup), 0); + item->mPipeline = pipeline; + return item; + } + private: const string configName = "test_config"; }; @@ -112,9 +123,10 @@ void PipelineUnittest::OnSuccessfulInit() const { APSARA_TEST_EQUAL("test_region", pipeline->GetContext().GetRegion()); APSARA_TEST_EQUAL(QueueKeyManager::GetInstance()->GetKey("test_config-flusher_sls-test_project#test_logstore"), pipeline->GetContext().GetLogstoreKey()); + APSARA_TEST_EQUAL(0U, pipeline->mInProcessCnt.load()); APSARA_TEST_EQUAL(2U, pipeline->mMetricsRecordRef->GetLabels()->size()); - APSARA_TEST_TRUE(pipeline->mMetricsRecordRef.HasLabel(METRIC_LABEL_CONFIG_NAME, configName)); - APSARA_TEST_TRUE(pipeline->mMetricsRecordRef.HasLabel(METRIC_LABEL_PROJECT, "test_project")); + APSARA_TEST_TRUE(pipeline->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PIPELINE_NAME, configName)); + APSARA_TEST_TRUE(pipeline->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PROJECT, "test_project")); // without sls flusher configStr = R"( @@ -146,6 +158,7 @@ void PipelineUnittest::OnSuccessfulInit() const { APSARA_TEST_EQUAL("", pipeline->GetContext().GetProjectName()); APSARA_TEST_EQUAL("", pipeline->GetContext().GetLogstoreName()); APSARA_TEST_EQUAL("", pipeline->GetContext().GetRegion()); + APSARA_TEST_EQUAL(0U, pipeline->mInProcessCnt.load()); #ifndef __ENTERPRISE__ APSARA_TEST_EQUAL(QueueKeyManager::GetInstance()->GetKey("test_config-flusher_sls-"), pipeline->GetContext().GetLogstoreKey()); @@ -246,6 +259,7 @@ void PipelineUnittest::OnSuccessfulInit() const { APSARA_TEST_TRUE(pipeline->Init(std::move(*config))); APSARA_TEST_EQUAL(goPipelineWithInput.toStyledString(), pipeline->mGoPipelineWithInput.toStyledString()); APSARA_TEST_EQUAL(goPipelineWithoutInput.toStyledString(), pipeline->mGoPipelineWithoutInput.toStyledString()); + APSARA_TEST_EQUAL(0U, pipeline->mInProcessCnt.load()); goPipelineWithInput.clear(); goPipelineWithoutInput.clear(); @@ -291,6 +305,7 @@ void PipelineUnittest::OnSuccessfulInit() const { APSARA_TEST_TRUE(pipeline->Init(std::move(*config))); APSARA_TEST_EQUAL(1U, pipeline->mRouter.mConditions.size()); APSARA_TEST_EQUAL(1U, pipeline->mRouter.mAlwaysMatchedFlusherIdx.size()); + APSARA_TEST_EQUAL(0U, pipeline->mInProcessCnt.load()); } void PipelineUnittest::OnFailedInit() const { @@ -2688,11 +2703,14 @@ void PipelineUnittest::TestProcess() const { pipeline.mProcessorLine.emplace_back(std::move(processor)); WriteMetrics::GetInstance()->PrepareMetricsRecordRef(pipeline.mMetricsRecordRef, {}); - pipeline.mProcessorsInEventsCnt = pipeline.mMetricsRecordRef.CreateCounter("processors_in_events_cnt"); - pipeline.mProcessorsInGroupsCnt = pipeline.mMetricsRecordRef.CreateCounter("processors_in_event_groups_cnt"); - pipeline.mProcessorsInGroupDataSizeBytes - = pipeline.mMetricsRecordRef.CreateCounter("processors_in_event_group_data_size_bytes"); - pipeline.mProcessorsTotalDelayMs = pipeline.mMetricsRecordRef.CreateCounter("processors_total_delay_ms"); + pipeline.mProcessorsInEventsTotal + = pipeline.mMetricsRecordRef.CreateCounter(METRIC_PIPELINE_PROCESSORS_IN_EVENTS_TOTAL); + pipeline.mProcessorsInGroupsTotal + = pipeline.mMetricsRecordRef.CreateCounter(METRIC_PIPELINE_PROCESSORS_IN_EVENT_GROUPS_TOTAL); + pipeline.mProcessorsInSizeBytes + = pipeline.mMetricsRecordRef.CreateCounter(METRIC_PIPELINE_PROCESSORS_IN_SIZE_BYTES); + pipeline.mProcessorsTotalProcessTimeMs + = pipeline.mMetricsRecordRef.CreateCounter(METRIC_PIPELINE_PROCESSORS_TOTAL_PROCESS_TIME_MS); vector groups; groups.emplace_back(make_shared()); @@ -2702,9 +2720,9 @@ void PipelineUnittest::TestProcess() const { APSARA_TEST_EQUAL( 1U, static_cast(pipeline.mInputs[0]->GetInnerProcessors()[0]->mPlugin.get())->mCnt); APSARA_TEST_EQUAL(1U, static_cast(pipeline.mProcessorLine[0]->mPlugin.get())->mCnt); - APSARA_TEST_EQUAL(1U, pipeline.mProcessorsInEventsCnt->GetValue()); - APSARA_TEST_EQUAL(1U, pipeline.mProcessorsInGroupsCnt->GetValue()); - APSARA_TEST_EQUAL(size, pipeline.mProcessorsInGroupDataSizeBytes->GetValue()); + APSARA_TEST_EQUAL(1U, pipeline.mProcessorsInEventsTotal->GetValue()); + APSARA_TEST_EQUAL(1U, pipeline.mProcessorsInGroupsTotal->GetValue()); + APSARA_TEST_EQUAL(size, pipeline.mProcessorsInSizeBytes->GetValue()); } void PipelineUnittest::TestSend() const { @@ -2841,6 +2859,44 @@ void PipelineUnittest::TestFlushBatch() const { } } +void PipelineUnittest::TestInProcessingCount() const { + auto pipeline = make_shared(); + pipeline->mPluginID.store(0); + pipeline->mInProcessCnt.store(0); + + PipelineContext ctx; + unique_ptr processQueue; + processQueue.reset(new BoundedProcessQueue(2, 2, 3, 0, 1, ctx)); + + vector group; + group.emplace_back(make_shared()); + + processQueue->EnablePop(); + processQueue->Push(GenerateProcessItem(pipeline)); + APSARA_TEST_EQUAL(0, pipeline->mInProcessCnt.load()); + unique_ptr item; + APSARA_TEST_TRUE(processQueue->Pop(item)); + APSARA_TEST_EQUAL(1, pipeline->mInProcessCnt.load()); + + pipeline->SubInProcessCnt(); + APSARA_TEST_EQUAL(0, pipeline->mInProcessCnt.load()); +} + +void PipelineUnittest::TestWaitAllItemsInProcessFinished() const { + auto pipeline = make_shared(); + pipeline->mPluginID.store(0); + pipeline->mInProcessCnt.store(0); + + pipeline->mInProcessCnt.store(1); + std::future future = std::async(std::launch::async, &Pipeline::WaitAllItemsInProcessFinished, pipeline.get()); + + // block + APSARA_TEST_NOT_EQUAL(std::future_status::ready, future.wait_for(std::chrono::seconds(0))); + pipeline->mInProcessCnt.store(0); + // recover + APSARA_TEST_EQUAL(std::future_status::ready, future.wait_for(std::chrono::seconds(0))); +} + UNIT_TEST_CASE(PipelineUnittest, OnSuccessfulInit) UNIT_TEST_CASE(PipelineUnittest, OnFailedInit) UNIT_TEST_CASE(PipelineUnittest, TestProcessQueue) @@ -2850,6 +2906,8 @@ UNIT_TEST_CASE(PipelineUnittest, OnInputFileWithContainerDiscovery) UNIT_TEST_CASE(PipelineUnittest, TestProcess) UNIT_TEST_CASE(PipelineUnittest, TestSend) UNIT_TEST_CASE(PipelineUnittest, TestFlushBatch) +UNIT_TEST_CASE(PipelineUnittest, TestInProcessingCount) +UNIT_TEST_CASE(PipelineUnittest, TestWaitAllItemsInProcessFinished) } // namespace logtail diff --git a/core/unittest/plugin/CMakeLists.txt b/core/unittest/plugin/CMakeLists.txt index b579277496..4d9850bcfd 100644 --- a/core/unittest/plugin/CMakeLists.txt +++ b/core/unittest/plugin/CMakeLists.txt @@ -33,6 +33,9 @@ target_link_libraries(processor_instance_unittest ${UT_BASE_TARGET}) add_executable(flusher_instance_unittest FlusherInstanceUnittest.cpp) target_link_libraries(flusher_instance_unittest ${UT_BASE_TARGET}) +add_executable(flusher_unittest FlusherUnittest.cpp) +target_link_libraries(flusher_unittest ${UT_BASE_TARGET}) + add_executable(plugin_registry_unittest PluginRegistryUnittest.cpp) target_link_libraries(plugin_registry_unittest ${UT_BASE_TARGET}) @@ -42,5 +45,7 @@ gtest_discover_tests(static_processor_creator_unittest) gtest_discover_tests(static_flusher_creator_unittest) gtest_discover_tests(input_instance_unittest) gtest_discover_tests(processor_instance_unittest) +gtest_discover_tests(flusher_instance_unittest) +gtest_discover_tests(flusher_unittest) gtest_discover_tests(plugin_registry_unittest) diff --git a/core/unittest/plugin/FlusherInstanceUnittest.cpp b/core/unittest/plugin/FlusherInstanceUnittest.cpp index 533a43dfd5..b6fcc33633 100644 --- a/core/unittest/plugin/FlusherInstanceUnittest.cpp +++ b/core/unittest/plugin/FlusherInstanceUnittest.cpp @@ -37,13 +37,13 @@ class FlusherInstanceUnittest : public testing::Test { void FlusherInstanceUnittest::TestName() const { unique_ptr flusher - = make_unique(new FlusherMock(), PluginInstance::PluginMeta("0", "0", "1")); + = make_unique(new FlusherMock(), PluginInstance::PluginMeta("0")); APSARA_TEST_EQUAL(FlusherMock::sName, flusher->Name()); } void FlusherInstanceUnittest::TestInit() const { unique_ptr flusher - = make_unique(new FlusherMock(), PluginInstance::PluginMeta("0", "0", "1")); + = make_unique(new FlusherMock(), PluginInstance::PluginMeta("0")); Json::Value config, opt; PipelineContext context; APSARA_TEST_TRUE(flusher->Init(config, context, opt)); @@ -52,20 +52,20 @@ void FlusherInstanceUnittest::TestInit() const { void FlusherInstanceUnittest::TestStart() const { unique_ptr flusher - = make_unique(new FlusherMock(), PluginInstance::PluginMeta("0", "0", "1")); + = make_unique(new FlusherMock(), PluginInstance::PluginMeta("0")); APSARA_TEST_TRUE(flusher->Start()); } void FlusherInstanceUnittest::TestStop() const { unique_ptr flusher - = make_unique(new FlusherMock(), PluginInstance::PluginMeta("0", "0", "1")); + = make_unique(new FlusherMock(), PluginInstance::PluginMeta("0")); APSARA_TEST_TRUE(flusher->Stop(true)); } void FlusherInstanceUnittest::TestGetQueueKey() const { FlusherMock* mock = new FlusherMock(); mock->GenerateQueueKey("target"); - unique_ptr flusher = make_unique(mock, PluginInstance::PluginMeta("0", "0", "1")); + unique_ptr flusher = make_unique(mock, PluginInstance::PluginMeta("0")); APSARA_TEST_EQUAL(QueueKeyManager::GetInstance()->GetKey("-flusher_mock-target"), flusher->GetQueueKey()); } diff --git a/core/unittest/plugin/FlusherUnittest.cpp b/core/unittest/plugin/FlusherUnittest.cpp new file mode 100644 index 0000000000..199ebfe306 --- /dev/null +++ b/core/unittest/plugin/FlusherUnittest.cpp @@ -0,0 +1,86 @@ +// Copyright 2023 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "pipeline/PipelineManager.h" +#include "pipeline/plugin/interface/Flusher.h" +#include "pipeline/queue/QueueKeyManager.h" +#include "unittest/Unittest.h" +#include "unittest/plugin/PluginMock.h" + +using namespace std; + +namespace logtail { + +class FlusherUnittest : public testing::Test { +public: + void TestStop() const; + +protected: + void TearDown() override { QueueKeyManager::GetInstance()->Clear(); } +}; + +void FlusherUnittest::TestStop() const { + auto pipeline = make_shared(); + PipelineManager::GetInstance()->mPipelineNameEntityMap["test_config"] = pipeline; + + auto ctx = PipelineContext(); + ctx.SetConfigName("test_config"); + + FlusherMock* mock = new FlusherMock(); + mock->SetContext(ctx); + Json::Value tmp; + mock->Init(Json::Value(), tmp); + + auto q = SenderQueueManager::GetInstance()->GetQueue(mock->GetQueueKey()); + // push items to queue + for (size_t i = 0; i < q->mCapacity; ++i) { + auto item = make_unique("content", 0, nullptr, mock->GetQueueKey()); + q->Push(std::move(item)); + } + // push items to extra buffer + for (size_t i = 0; i < 10; ++i) { + auto item = make_unique("content", 0, nullptr, mock->GetQueueKey()); + q->Push(std::move(item)); + } + + std::vector items1; + q->GetAvailableItems(items1, -1); + for (auto item : items1) { + APSARA_TEST_EQUAL(item->mPipeline, nullptr); + } + for (size_t i = 0; i < q->mExtraBuffer.size(); ++i) { + auto item = q->mExtraBuffer[i].get(); + APSARA_TEST_EQUAL(item->mPipeline, nullptr); + } + + mock->Stop(false); + + std::vector items2; + q->GetAvailableItems(items2, -1); + for (auto item : items2) { + APSARA_TEST_EQUAL(item->mPipeline, pipeline); + } + for (size_t i = 0; i < q->mExtraBuffer.size(); ++i) { + auto item = q->mExtraBuffer[i].get(); + APSARA_TEST_EQUAL(item->mPipeline, pipeline); + } +} + +UNIT_TEST_CASE(FlusherUnittest, TestStop) + +} // namespace logtail + +UNIT_TEST_MAIN diff --git a/core/unittest/plugin/InputInstanceUnittest.cpp b/core/unittest/plugin/InputInstanceUnittest.cpp index 3736bd01ba..976134badb 100644 --- a/core/unittest/plugin/InputInstanceUnittest.cpp +++ b/core/unittest/plugin/InputInstanceUnittest.cpp @@ -37,12 +37,12 @@ class InputInstanceUnittest : public testing::Test { }; void InputInstanceUnittest::TestName() const { - unique_ptr input = make_unique(new InputMock(), PluginInstance::PluginMeta("0", "0", "1")); + unique_ptr input = make_unique(new InputMock(), PluginInstance::PluginMeta("0")); APSARA_TEST_EQUAL(InputMock::sName, input->Name()); } void InputInstanceUnittest::TestInit() const { - unique_ptr input = make_unique(new InputMock(), PluginInstance::PluginMeta("0", "0", "1")); + unique_ptr input = make_unique(new InputMock(), PluginInstance::PluginMeta("0")); Json::Value config, opt; Pipeline pipeline; PipelineContext context; @@ -53,12 +53,12 @@ void InputInstanceUnittest::TestInit() const { } void InputInstanceUnittest::TestStart() const { - unique_ptr input = make_unique(new InputMock(), PluginInstance::PluginMeta("0", "0", "1")); + unique_ptr input = make_unique(new InputMock(), PluginInstance::PluginMeta("0")); APSARA_TEST_TRUE(input->Start()); } void InputInstanceUnittest::TestStop() const { - unique_ptr input = make_unique(new InputMock(), PluginInstance::PluginMeta("0", "0", "1")); + unique_ptr input = make_unique(new InputMock(), PluginInstance::PluginMeta("0")); APSARA_TEST_TRUE(input->Stop(true)); } diff --git a/core/unittest/plugin/PluginMock.h b/core/unittest/plugin/PluginMock.h index f94bf8f044..0713b08bcf 100644 --- a/core/unittest/plugin/PluginMock.h +++ b/core/unittest/plugin/PluginMock.h @@ -94,7 +94,7 @@ class FlusherMock : public Flusher { const std::string& Name() const override { return sName; } bool Init(const Json::Value& config, Json::Value& optionalGoPipeline) override { GenerateQueueKey("mock"); - SenderQueueManager::GetInstance()->CreateQueue(mQueueKey, mNodeID, *mContext); + SenderQueueManager::GetInstance()->CreateQueue(mQueueKey, mPluginID, *mContext); return true; } bool Send(PipelineEventGroup&& g) override { return mIsValid; } @@ -117,7 +117,7 @@ class FlusherHttpMock : public HttpFlusher { const std::string& Name() const override { return sName; } bool Init(const Json::Value& config, Json::Value& optionalGoPipeline) override { GenerateQueueKey("mock"); - SenderQueueManager::GetInstance()->CreateQueue(mQueueKey, mNodeID, *mContext); + SenderQueueManager::GetInstance()->CreateQueue(mQueueKey, mPluginID, *mContext); return true; } bool Send(PipelineEventGroup&& g) override { return mIsValid; } diff --git a/core/unittest/plugin/PluginRegistryUnittest.cpp b/core/unittest/plugin/PluginRegistryUnittest.cpp index d777d71f5e..086a6638a0 100644 --- a/core/unittest/plugin/PluginRegistryUnittest.cpp +++ b/core/unittest/plugin/PluginRegistryUnittest.cpp @@ -40,27 +40,21 @@ class PluginRegistryUnittest : public testing::Test { }; void PluginRegistryUnittest::TestCreateInput() const { - unique_ptr input = PluginRegistry::GetInstance()->CreateInput(InputMock::sName, {"0", "0", "1"}); + unique_ptr input = PluginRegistry::GetInstance()->CreateInput(InputMock::sName, {"0"}); APSARA_TEST_NOT_EQUAL_FATAL(nullptr, input); APSARA_TEST_EQUAL_FATAL("0", input->PluginID()); - APSARA_TEST_EQUAL_FATAL("0", input->NodeID()); - APSARA_TEST_EQUAL_FATAL("1", input->ChildNodeID()); } void PluginRegistryUnittest::TestCreateProcessor() const { - unique_ptr processor = PluginRegistry::GetInstance()->CreateProcessor(ProcessorMock::sName, {"0", "0", "1"}); + unique_ptr processor = PluginRegistry::GetInstance()->CreateProcessor(ProcessorMock::sName, {"0"}); APSARA_TEST_NOT_EQUAL_FATAL(nullptr, processor); APSARA_TEST_EQUAL_FATAL("0", processor->PluginID()); - APSARA_TEST_EQUAL_FATAL("0", processor->NodeID()); - APSARA_TEST_EQUAL_FATAL("1", processor->ChildNodeID()); } void PluginRegistryUnittest::TestCreateFlusher() const { - unique_ptr flusher = PluginRegistry::GetInstance()->CreateFlusher(FlusherMock::sName, {"0", "0", "1"}); + unique_ptr flusher = PluginRegistry::GetInstance()->CreateFlusher(FlusherMock::sName, {"0"}); APSARA_TEST_NOT_EQUAL_FATAL(nullptr, flusher); APSARA_TEST_EQUAL_FATAL("0", flusher->PluginID()); - APSARA_TEST_EQUAL_FATAL("0", flusher->NodeID()); - APSARA_TEST_EQUAL_FATAL("1", flusher->ChildNodeID()); } void PluginRegistryUnittest::TestValidPlugin() const { diff --git a/core/unittest/plugin/ProcessorInstanceUnittest.cpp b/core/unittest/plugin/ProcessorInstanceUnittest.cpp index b545602a3f..90621bf159 100644 --- a/core/unittest/plugin/ProcessorInstanceUnittest.cpp +++ b/core/unittest/plugin/ProcessorInstanceUnittest.cpp @@ -30,12 +30,12 @@ class ProcessorInstanceUnittest : public testing::Test { }; void ProcessorInstanceUnittest::TestName() const { - unique_ptr processor = make_unique(new ProcessorMock(), PluginInstance::PluginMeta("0", "0", "1")); + unique_ptr processor = make_unique(new ProcessorMock(), PluginInstance::PluginMeta("0")); APSARA_TEST_EQUAL(ProcessorMock::sName, processor->Name()); } void ProcessorInstanceUnittest::TestInit() const { - unique_ptr processor = make_unique(new ProcessorMock(), PluginInstance::PluginMeta("0", "0", "1")); + unique_ptr processor = make_unique(new ProcessorMock(), PluginInstance::PluginMeta("0")); Json::Value config; PipelineContext context; APSARA_TEST_TRUE(processor->Init(config, context)); @@ -43,7 +43,7 @@ void ProcessorInstanceUnittest::TestInit() const { } void ProcessorInstanceUnittest::TestProcess() const { - unique_ptr processor = make_unique(new ProcessorMock(), PluginInstance::PluginMeta("0", "0", "1")); + unique_ptr processor = make_unique(new ProcessorMock(), PluginInstance::PluginMeta("0")); Json::Value config; PipelineContext context; processor->Init(config, context); diff --git a/core/unittest/plugin/StaticFlusherCreatorUnittest.cpp b/core/unittest/plugin/StaticFlusherCreatorUnittest.cpp index 99031a2897..42cb639f5e 100644 --- a/core/unittest/plugin/StaticFlusherCreatorUnittest.cpp +++ b/core/unittest/plugin/StaticFlusherCreatorUnittest.cpp @@ -40,11 +40,9 @@ void StaticFlusherCreatorUnittest::TestIsDynamic() { void StaticFlusherCreatorUnittest::TestCreate() { StaticFlusherCreator creator; - unique_ptr flusherMock = creator.Create({"0", "0", "1"}); + unique_ptr flusherMock = creator.Create({"0"}); APSARA_TEST_NOT_EQUAL(nullptr, flusherMock.get()); APSARA_TEST_EQUAL_FATAL("0", flusherMock->PluginID()); - APSARA_TEST_EQUAL_FATAL("0", flusherMock->NodeID()); - APSARA_TEST_EQUAL_FATAL("1", flusherMock->ChildNodeID()); } UNIT_TEST_CASE(StaticFlusherCreatorUnittest, TestName) diff --git a/core/unittest/plugin/StaticInputCreatorUnittest.cpp b/core/unittest/plugin/StaticInputCreatorUnittest.cpp index d8f810bee4..e7d039eddf 100644 --- a/core/unittest/plugin/StaticInputCreatorUnittest.cpp +++ b/core/unittest/plugin/StaticInputCreatorUnittest.cpp @@ -40,11 +40,9 @@ void StaticInputCreatorUnittest::TestIsDynamic() { void StaticInputCreatorUnittest::TestCreate() { StaticInputCreator creator; - unique_ptr inputMock = creator.Create({"0", "0", "1"}); + unique_ptr inputMock = creator.Create({"0"}); APSARA_TEST_NOT_EQUAL(nullptr, inputMock.get()); APSARA_TEST_EQUAL_FATAL("0", inputMock->PluginID()); - APSARA_TEST_EQUAL_FATAL("0", inputMock->NodeID()); - APSARA_TEST_EQUAL_FATAL("1", inputMock->ChildNodeID()); } UNIT_TEST_CASE(StaticInputCreatorUnittest, TestName) diff --git a/core/unittest/plugin/StaticProcessorCreatorUnittest.cpp b/core/unittest/plugin/StaticProcessorCreatorUnittest.cpp index a27691ebcd..1fb2afffbb 100644 --- a/core/unittest/plugin/StaticProcessorCreatorUnittest.cpp +++ b/core/unittest/plugin/StaticProcessorCreatorUnittest.cpp @@ -40,11 +40,9 @@ void StaticProcessorCreatorUnittest::TestIsDynamic() { void StaticProcessorCreatorUnittest::TestCreate() { StaticProcessorCreator creator; - unique_ptr processorMock = creator.Create({"0", "0", "1"}); + unique_ptr processorMock = creator.Create({"0"}); APSARA_TEST_NOT_EQUAL(nullptr, processorMock.get()); APSARA_TEST_EQUAL_FATAL("0", processorMock->PluginID()); - APSARA_TEST_EQUAL_FATAL("0", processorMock->NodeID()); - APSARA_TEST_EQUAL_FATAL("1", processorMock->ChildNodeID()); } UNIT_TEST_CASE(StaticProcessorCreatorUnittest, TestName) diff --git a/core/unittest/processor/ParseContainerLogBenchmark.cpp b/core/unittest/processor/ParseContainerLogBenchmark.cpp index 2cdf22358b..c48d7970b2 100644 --- a/core/unittest/processor/ParseContainerLogBenchmark.cpp +++ b/core/unittest/processor/ParseContainerLogBenchmark.cpp @@ -51,7 +51,7 @@ static void BM_DockerJson(int size, int batchSize) { config["IgnoringStderr"] = false; ProcessorParseContainerLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); std::string data1 = R"({"log":"Exception in thread \"main\" java.lang.NullPointerExceptionat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitle\n","stream":"stdout","time":"2024-04-07T08:02:40.873971412Z"})"; @@ -164,7 +164,7 @@ static void BM_ContainerdText(int size, int batchSize) { config["IgnoringStderr"] = false; ProcessorParseContainerLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); std::string data1 = R"(2024-04-08T12:48:59.665663286+08:00 stdout P Exception in thread "main" java.lang.NullPointerExceptionat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat com.example.myproject.Book.getTitleat )"; diff --git a/core/unittest/processor/ProcessorDesensitizeNativeUnittest.cpp b/core/unittest/processor/ProcessorDesensitizeNativeUnittest.cpp index 816fb57ad9..4ed95ecc73 100644 --- a/core/unittest/processor/ProcessorDesensitizeNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorDesensitizeNativeUnittest.cpp @@ -55,7 +55,7 @@ UNIT_TEST_CASE(ProcessorDesensitizeNativeUnittest, TestMultipleLines); UNIT_TEST_CASE(ProcessorDesensitizeNativeUnittest, TestMultipleLinesWithProcessorMergeMultilineLogNative); PluginInstance::PluginMeta getPluginMeta(){ - PluginInstance::PluginMeta pluginMeta{"testgetPluginID", "testNodeID", "testNodeChildID"}; + PluginInstance::PluginMeta pluginMeta{"1"}; return pluginMeta; } @@ -164,7 +164,7 @@ dbf@@@324 FS2$%pwd,pwd=saf543#$@,," // run function ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative processorSplitMultilineLogStringNative; processorSplitMultilineLogStringNative.SetContext(mContext); - processorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + processorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorSplitMultilineLogStringNative.Init(config)); processorSplitMultilineLogStringNative.Process(eventGroup); @@ -272,7 +272,7 @@ dbf@@@324 FS2$%pwd,pwd=saf543#$@,," // run function ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); processorMergeMultilineLogNative.Process(eventGroup); diff --git a/core/unittest/processor/ProcessorFilterNativeUnittest.cpp b/core/unittest/processor/ProcessorFilterNativeUnittest.cpp index 2f02dd45d4..71c416da80 100644 --- a/core/unittest/processor/ProcessorFilterNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorFilterNativeUnittest.cpp @@ -42,7 +42,7 @@ UNIT_TEST_CASE(ProcessorFilterNativeUnittest, TestBaseFilter) UNIT_TEST_CASE(ProcessorFilterNativeUnittest, TestFilterNoneUtf8) PluginInstance::PluginMeta getPluginMeta(){ - PluginInstance::PluginMeta pluginMeta{"testgetPluginID", "testNodeID", "testNodeChildID"}; + PluginInstance::PluginMeta pluginMeta{"1"}; return pluginMeta; } @@ -66,7 +66,7 @@ void ProcessorFilterNativeUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); processor.reset(new ProcessorFilterNative()); processor->SetContext(mContext); - processor->SetMetricsRecordRef(ProcessorFilterNative::sName, "1", "1", "1"); + processor->SetMetricsRecordRef(ProcessorFilterNative::sName, "1"); APSARA_TEST_TRUE(processor->Init(configJson)); APSARA_TEST_EQUAL(1, processor->mFilterRule->FilterKeys.size()); APSARA_TEST_EQUAL(1, processor->mFilterRule->FilterRegs.size()); @@ -93,7 +93,7 @@ void ProcessorFilterNativeUnittest::OnFailedInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); processor.reset(new ProcessorFilterNative()); processor->SetContext(mContext); - processor->SetMetricsRecordRef(ProcessorFilterNative::sName, "1", "1", "1"); + processor->SetMetricsRecordRef(ProcessorFilterNative::sName, "1"); APSARA_TEST_FALSE(processor->Init(configJson)); configStr = R"( @@ -112,7 +112,7 @@ void ProcessorFilterNativeUnittest::OnFailedInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); processor.reset(new ProcessorFilterNative()); processor->SetContext(mContext); - processor->SetMetricsRecordRef(ProcessorFilterNative::sName, "1", "1", "1"); + processor->SetMetricsRecordRef(ProcessorFilterNative::sName, "1"); APSARA_TEST_FALSE(processor->Init(configJson)); } @@ -449,7 +449,7 @@ void ProcessorFilterNativeUnittest::TestBaseFilter() { })"; APSARA_TEST_STREQ_FATAL(CompactJson(expectJson).c_str(), CompactJson(outJson).c_str()); - APSARA_TEST_EQUAL_FATAL(2, processor.mProcFilterRecordsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(2, processorInstance.mInEventsTotal->GetValue() - processorInstance.mOutEventsTotal->GetValue()); } { const char* jsonStr = "{\n" diff --git a/core/unittest/processor/ProcessorMergeMultilineLogNativeUnittest.cpp b/core/unittest/processor/ProcessorMergeMultilineLogNativeUnittest.cpp index f5b0045ec7..f56a2e88dd 100644 --- a/core/unittest/processor/ProcessorMergeMultilineLogNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorMergeMultilineLogNativeUnittest.cpp @@ -52,7 +52,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "regex"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); APSARA_TEST_TRUE(processor.mMultiline.IsMultiline()); } @@ -64,7 +64,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "regex"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); APSARA_TEST_TRUE(processor.mMultiline.IsMultiline()); } @@ -76,7 +76,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "regex"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); APSARA_TEST_TRUE(processor.mMultiline.IsMultiline()); } @@ -87,7 +87,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "regex"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); APSARA_TEST_TRUE(processor.mMultiline.IsMultiline()); } @@ -99,7 +99,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "regex"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); APSARA_TEST_TRUE(processor.mMultiline.IsMultiline()); } @@ -115,7 +115,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "regex"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); APSARA_TEST_TRUE(processor.mMultiline.IsMultiline()); } @@ -126,7 +126,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "regex"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); APSARA_TEST_FALSE(processor.mMultiline.IsMultiline()); } @@ -137,7 +137,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "regex"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); APSARA_TEST_FALSE(processor.mMultiline.IsMultiline()); } @@ -148,7 +148,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "regex"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); APSARA_TEST_FALSE(processor.mMultiline.IsMultiline()); } @@ -158,7 +158,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "regex"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); APSARA_TEST_FALSE(processor.mMultiline.IsMultiline()); } @@ -172,7 +172,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "regex"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); APSARA_TEST_FALSE(processor.mMultiline.IsMultiline()); } @@ -182,7 +182,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "flag"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); } // unknown init不通过 @@ -192,7 +192,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "unknown"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_FALSE(processor.Init(config)); } // 格式错误 init不通过 @@ -202,7 +202,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = 1; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_FALSE(processor.Init(config)); } // 不存在 init不通过 @@ -211,7 +211,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["StartPattern"] = ".*"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_FALSE(processor.Init(config)); } } @@ -225,7 +225,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["UnmatchedContentTreatment"] = "single_line"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); } // discard init通过 @@ -236,7 +236,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["UnmatchedContentTreatment"] = "discard"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); } // unknown init通过 @@ -247,7 +247,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["UnmatchedContentTreatment"] = "unknown"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); } // 格式错误 init通过 @@ -258,7 +258,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["UnmatchedContentTreatment"] = 1; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); } // 不存在 init通过 @@ -268,7 +268,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestInit() { config["MergeType"] = "regex"; ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processor.Init(config)); } } @@ -289,7 +289,7 @@ void ProcessorMergeMultilineLogNativeUnittest::TestProcess() { // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); // group为空 { @@ -881,7 +881,7 @@ void ProcessEventsWithPartLogUnittest::TestProcessEventsWithPartLog() { // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processorMergeMultilineLogNative.Init(config)); // make eventGroup auto sourceBuffer = std::make_shared(); @@ -933,7 +933,7 @@ void ProcessEventsWithPartLogUnittest::TestProcessEventsWithPartLog() { // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processorMergeMultilineLogNative.Init(config)); // make eventGroup auto sourceBuffer = std::make_shared(); @@ -981,7 +981,7 @@ void ProcessEventsWithPartLogUnittest::TestProcessEventsWithPartLog() { // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processorMergeMultilineLogNative.Init(config)); // make eventGroup auto sourceBuffer = std::make_shared(); @@ -1043,7 +1043,7 @@ void ProcessEventsWithPartLogUnittest::TestProcessEventsWithPartLog() { // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processorMergeMultilineLogNative.Init(config)); // make eventGroup auto sourceBuffer = std::make_shared(); @@ -1104,7 +1104,7 @@ void ProcessEventsWithPartLogUnittest::TestProcessEventsWithPartLog() { // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processorMergeMultilineLogNative.Init(config)); // make eventGroup auto sourceBuffer = std::make_shared(); @@ -1175,7 +1175,7 @@ void ProcessEventsWithPartLogUnittest::TestProcessEventsWithPartLog() { // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processorMergeMultilineLogNative.Init(config)); // make eventGroup auto sourceBuffer = std::make_shared(); @@ -1255,7 +1255,7 @@ void ProcessEventsWithPartLogUnittest::TestProcessEventsWithPartLog() { // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processorMergeMultilineLogNative.Init(config)); // make eventGroup auto sourceBuffer = std::make_shared(); @@ -1336,7 +1336,7 @@ void ProcessEventsWithPartLogUnittest::TestProcess() { // make ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE(processorMergeMultilineLogNative.Init(config)); // event 不支持 { @@ -1876,7 +1876,7 @@ void ProcessorMergeMultilineLogDisacardUnmatchUnittest::TestLogSplitWithBeginCon // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); // case: unmatch + unmatch { @@ -2145,7 +2145,7 @@ void ProcessorMergeMultilineLogDisacardUnmatchUnittest::TestLogSplitWithBeginEnd // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); // case: unmatch + unmatch { @@ -2389,7 +2389,7 @@ void ProcessorMergeMultilineLogDisacardUnmatchUnittest::TestLogSplitWithBegin() // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); // case: unmatch + start { @@ -2583,7 +2583,7 @@ void ProcessorMergeMultilineLogDisacardUnmatchUnittest::TestLogSplitWithContinue // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); // case: unmatch { @@ -2778,7 +2778,7 @@ void ProcessorMergeMultilineLogDisacardUnmatchUnittest::TestLogSplitWithEnd() { // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); // case: end { @@ -2935,7 +2935,7 @@ void ProcessorMergeMultilineLogKeepUnmatchUnittest::TestLogSplitWithBeginContinu // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); // case: unmatch + unmatch { @@ -3297,7 +3297,7 @@ void ProcessorMergeMultilineLogKeepUnmatchUnittest::TestLogSplitWithBeginEnd() { // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); // case: unmatch + unmatch { @@ -3668,7 +3668,7 @@ void ProcessorMergeMultilineLogKeepUnmatchUnittest::TestLogSplitWithBegin() { // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); // case: unmatch + start { @@ -3887,7 +3887,7 @@ void ProcessorMergeMultilineLogKeepUnmatchUnittest::TestLogSplitWithContinueEnd( // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); // case: unmatch { @@ -4141,7 +4141,7 @@ void ProcessorMergeMultilineLogKeepUnmatchUnittest::TestLogSplitWithEnd() { // ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); // case: end { diff --git a/core/unittest/processor/ProcessorParseApsaraNativeUnittest.cpp b/core/unittest/processor/ProcessorParseApsaraNativeUnittest.cpp index 5b0678b7fe..7ceeb708dc 100644 --- a/core/unittest/processor/ProcessorParseApsaraNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorParseApsaraNativeUnittest.cpp @@ -64,7 +64,7 @@ UNIT_TEST_CASE(ProcessorParseApsaraNativeUnittest, TestApsaraEasyReadLogTimePars UNIT_TEST_CASE(ProcessorParseApsaraNativeUnittest, TestApsaraLogLineParser); PluginInstance::PluginMeta getPluginMeta(){ - PluginInstance::PluginMeta pluginMeta{"testgetPluginID", "testNodeID", "testNodeChildID"}; + PluginInstance::PluginMeta pluginMeta{"1"}; return pluginMeta; } @@ -574,7 +574,7 @@ void ProcessorParseApsaraNativeUnittest::TestMultipleLines() { // run function ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative processorSplitMultilineLogStringNative; processorSplitMultilineLogStringNative.SetContext(mContext); - processorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + processorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorSplitMultilineLogStringNative.Init(config)); processorSplitMultilineLogStringNative.Process(eventGroup); @@ -786,11 +786,11 @@ void ProcessorParseApsaraNativeUnittest::TestProcessWholeLinePart() { APSARA_TEST_STREQ_FATAL(CompactJson(expectJson).c_str(), CompactJson(outJson).c_str()); // check observablity APSARA_TEST_EQUAL_FATAL(1, processor.GetContext().GetProcessProfile().parseFailures); - APSARA_TEST_EQUAL_FATAL(uint64_t(3), processorInstance.mProcInRecordsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(3), processorInstance.mInEventsTotal->GetValue()); // only timestamp failed, so output is 2 - APSARA_TEST_EQUAL_FATAL(uint64_t(2), processorInstance.mProcOutRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(1), processor.mProcDiscardRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(1), processor.mProcParseErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(2), processorInstance.mOutEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(1), processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(1), processor.mOutFailedEventsTotal->GetValue()); } void ProcessorParseApsaraNativeUnittest::TestProcessKeyOverwritten() { @@ -1081,16 +1081,10 @@ void ProcessorParseApsaraNativeUnittest::TestProcessEventKeepUnmatch() { // check observablity int count = 5; APSARA_TEST_EQUAL_FATAL(count, processor.GetContext().GetProcessProfile().parseFailures); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mProcInRecordsTotal->GetValue()); - std::string expectValue = "value1"; - APSARA_TEST_EQUAL_FATAL((expectValue.length()) * count, processor.mProcParseInSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mProcOutRecordsTotal->GetValue()); - expectValue = "rawLogvalue1"; - APSARA_TEST_EQUAL_FATAL((expectValue.length()) * count, processor.mProcParseOutSizeBytes->GetValue()); - - APSARA_TEST_EQUAL_FATAL(uint64_t(0), processor.mProcDiscardRecordsTotal->GetValue()); - - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mProcParseErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mInEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mOutEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(0), processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mOutFailedEventsTotal->GetValue()); } void ProcessorParseApsaraNativeUnittest::TestProcessEventDiscardUnmatch() { @@ -1164,14 +1158,13 @@ void ProcessorParseApsaraNativeUnittest::TestProcessEventDiscardUnmatch() { // check observablity int count = 5; APSARA_TEST_EQUAL_FATAL(count, processor.GetContext().GetProcessProfile().parseFailures); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mProcInRecordsTotal->GetValue()); - std::string expectValue = "value1"; - APSARA_TEST_EQUAL_FATAL((expectValue.length()) * count, processor.mProcParseInSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mInEventsTotal->GetValue()); // discard unmatch, so output is 0 - APSARA_TEST_EQUAL_FATAL(uint64_t(0), processorInstance.mProcOutRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(0), processor.mProcParseOutSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mProcDiscardRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mProcParseErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(0), processorInstance.mOutEventsTotal->GetValue()); + // event group size is not 0 + APSARA_TEST_NOT_EQUAL_FATAL(uint64_t(0), processorInstance.mOutSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mOutFailedEventsTotal->GetValue()); } void ProcessorParseApsaraNativeUnittest::TestProcessEventMicrosecondUnmatch() { @@ -1283,10 +1276,10 @@ void ProcessorParseApsaraNativeUnittest::TestProcessEventMicrosecondUnmatch() { APSARA_TEST_STREQ_FATAL(CompactJson(expectJson).c_str(), CompactJson(outJson).c_str()); // check observablity APSARA_TEST_EQUAL_FATAL(1, processor.GetContext().GetProcessProfile().parseFailures); - APSARA_TEST_EQUAL_FATAL(uint64_t(4), processorInstance.mProcInRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(4), processorInstance.mProcOutRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(0), processor.mProcDiscardRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(1), processor.mProcParseErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(4), processorInstance.mInEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(4), processorInstance.mOutEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(0), processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(1), processor.mOutFailedEventsTotal->GetValue()); } } // namespace logtail diff --git a/core/unittest/processor/ProcessorParseContainerLogNativeUnittest.cpp b/core/unittest/processor/ProcessorParseContainerLogNativeUnittest.cpp index 7db7651b05..71e9579e23 100644 --- a/core/unittest/processor/ProcessorParseContainerLogNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorParseContainerLogNativeUnittest.cpp @@ -138,7 +138,7 @@ void ProcessorParseContainerLogNativeUnittest::TestInit() { config["IgnoringStderr"] = 1; ProcessorParseContainerLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); } @@ -152,7 +152,7 @@ void ProcessorParseContainerLogNativeUnittest::TestIgnoringStdoutStderr() { // make ProcessorParseContainerLogNative ProcessorParseContainerLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // make eventGroup auto sourceBuffer = std::make_shared(); @@ -258,7 +258,7 @@ void ProcessorParseContainerLogNativeUnittest::TestIgnoringStdoutStderr() { // make ProcessorParseContainerLogNative ProcessorParseContainerLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // make eventGroup auto sourceBuffer = std::make_shared(); @@ -396,7 +396,7 @@ void ProcessorParseContainerLogNativeUnittest::TestIgnoringStdoutStderr() { // make ProcessorParseContainerLogNative ProcessorParseContainerLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // make eventGroup auto sourceBuffer = std::make_shared(); @@ -558,7 +558,7 @@ void ProcessorParseContainerLogNativeUnittest::TestIgnoringStdoutStderr() { // make ProcessorParseContainerLogNative ProcessorParseContainerLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // make eventGroup auto sourceBuffer = std::make_shared(); @@ -749,7 +749,7 @@ void ProcessorParseContainerLogNativeUnittest::TestContainerdLog() { // make ProcessorParseContainerLogNative ProcessorParseContainerLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // make eventGroup auto sourceBuffer = std::make_shared(); @@ -1018,7 +1018,7 @@ void ProcessorParseContainerLogNativeUnittest::TestContainerdLogWithSplit() { // make ProcessorSplitLogStringNative ProcessorSplitLogStringNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // run test function processor.Process(eventGroup); @@ -1032,7 +1032,7 @@ void ProcessorParseContainerLogNativeUnittest::TestContainerdLogWithSplit() { // make ProcessorParseContainerLogNative ProcessorParseContainerLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // run test function processor.Process(eventGroup); @@ -1046,7 +1046,7 @@ void ProcessorParseContainerLogNativeUnittest::TestContainerdLogWithSplit() { // make ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // run test function processor.Process(eventGroup); @@ -1061,7 +1061,7 @@ void ProcessorParseContainerLogNativeUnittest::TestContainerdLogWithSplit() { // make ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // run test function processor.Process(eventGroup); @@ -1131,7 +1131,7 @@ void ProcessorParseContainerLogNativeUnittest::TestDockerJsonLogLineParserWithSp // make ProcessorSplitLogStringNative ProcessorSplitLogStringNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // run test function processor.Process(eventGroup); @@ -1145,7 +1145,7 @@ void ProcessorParseContainerLogNativeUnittest::TestDockerJsonLogLineParserWithSp // make ProcessorParseContainerLogNative ProcessorParseContainerLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // run test function processor.Process(eventGroup); @@ -1159,7 +1159,7 @@ void ProcessorParseContainerLogNativeUnittest::TestDockerJsonLogLineParserWithSp // make ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // run test function processor.Process(eventGroup); @@ -1174,7 +1174,7 @@ void ProcessorParseContainerLogNativeUnittest::TestDockerJsonLogLineParserWithSp // make ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // run test function processor.Process(eventGroup); @@ -1222,7 +1222,7 @@ void ProcessorParseContainerLogNativeUnittest::TestDockerJsonLogLineParser() { config["IgnoringStderr"] = false; ProcessorParseContainerLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // log 测试 { @@ -1644,7 +1644,7 @@ void ProcessorParseContainerLogNativeUnittest::TestKeepingSourceWhenParseFail() ProcessorParseContainerLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); auto sourceBuffer = std::make_shared(); @@ -1764,7 +1764,7 @@ void ProcessorParseContainerLogNativeUnittest::TestKeepingSourceWhenParseFail() config["KeepingSourceWhenParseFail"] = false; ProcessorParseContainerLogNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorParseContainerLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); // log 测试 { diff --git a/core/unittest/processor/ProcessorParseDelimiterNativeUnittest.cpp b/core/unittest/processor/ProcessorParseDelimiterNativeUnittest.cpp index f4be52bdf6..51018f1de3 100644 --- a/core/unittest/processor/ProcessorParseDelimiterNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorParseDelimiterNativeUnittest.cpp @@ -66,7 +66,7 @@ UNIT_TEST_CASE(ProcessorParseDelimiterNativeUnittest, TestExtend); UNIT_TEST_CASE(ProcessorParseDelimiterNativeUnittest, TestEmpty); PluginInstance::PluginMeta getPluginMeta(){ - PluginInstance::PluginMeta pluginMeta{"testgetPluginID", "testNodeID", "testNodeChildID"}; + PluginInstance::PluginMeta pluginMeta{"1"}; return pluginMeta; } @@ -253,7 +253,7 @@ void ProcessorParseDelimiterNativeUnittest::TestAllowingShortenedFields() { // run function ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); processor.Process(eventGroup); @@ -375,7 +375,7 @@ void ProcessorParseDelimiterNativeUnittest::TestAllowingShortenedFields() { // run function ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); processor.Process(eventGroup); // run function ProcessorParseDelimiterNative @@ -525,7 +525,7 @@ void ProcessorParseDelimiterNativeUnittest::TestExtend() { // run function ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); processor.Process(eventGroup); // run function ProcessorParseDelimiterNative @@ -651,7 +651,7 @@ void ProcessorParseDelimiterNativeUnittest::TestExtend() { // run function ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); processor.Process(eventGroup); // run function ProcessorParseDelimiterNative @@ -775,7 +775,7 @@ void ProcessorParseDelimiterNativeUnittest::TestMultipleLines() { // run function ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); processor.Process(eventGroup); // run function ProcessorParseDelimiterNative @@ -896,7 +896,7 @@ void ProcessorParseDelimiterNativeUnittest::TestMultipleLines() { // run function ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); processor.Process(eventGroup); // run function ProcessorParseDelimiterNative @@ -1022,7 +1022,7 @@ void ProcessorParseDelimiterNativeUnittest::TestMultipleLines() { // run function ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); processor.Process(eventGroup); // run function ProcessorParseDelimiterNative @@ -1153,7 +1153,7 @@ void ProcessorParseDelimiterNativeUnittest::TestMultipleLinesWithProcessorMergeM // run function ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); processorMergeMultilineLogNative.Process(eventGroup); // run function ProcessorParseDelimiterNative @@ -1281,7 +1281,7 @@ void ProcessorParseDelimiterNativeUnittest::TestMultipleLinesWithProcessorMergeM // run function ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); processorMergeMultilineLogNative.Process(eventGroup); // run function ProcessorParseDelimiterNative @@ -1414,7 +1414,7 @@ void ProcessorParseDelimiterNativeUnittest::TestMultipleLinesWithProcessorMergeM // run function ProcessorMergeMultilineLogNative ProcessorMergeMultilineLogNative processorMergeMultilineLogNative; processorMergeMultilineLogNative.SetContext(mContext); - processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1", "1", "1"); + processorMergeMultilineLogNative.SetMetricsRecordRef(ProcessorMergeMultilineLogNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processorMergeMultilineLogNative.Init(config)); processorMergeMultilineLogNative.Process(eventGroup); // run function ProcessorParseDelimiterNative @@ -1714,7 +1714,7 @@ HTTP/2.0' '200' '154' 'go-sdk'" // run function ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative processor; processor.SetContext(mContext); - processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + processor.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(processor.Init(config)); processor.Process(eventGroup); @@ -2335,16 +2335,10 @@ void ProcessorParseDelimiterNativeUnittest::TestProcessEventKeepUnmatch() { // check observablity int count = 5; APSARA_TEST_EQUAL_FATAL(count, processor.GetContext().GetProcessProfile().parseFailures); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mProcInRecordsTotal->GetValue()); - std::string expectValue = "value1"; - APSARA_TEST_EQUAL_FATAL(uint64_t(expectValue.length() * count), processor.mProcParseInSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mProcOutRecordsTotal->GetValue()); - expectValue = "rawLogvalue1"; - APSARA_TEST_EQUAL_FATAL(uint64_t(expectValue.length() * count), processor.mProcParseOutSizeBytes->GetValue()); - - APSARA_TEST_EQUAL_FATAL(uint64_t(0), processor.mProcDiscardRecordsTotal->GetValue()); - - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mProcParseErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mInEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mOutEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(0), processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mOutFailedEventsTotal->GetValue()); } void ProcessorParseDelimiterNativeUnittest::TestProcessEventDiscardUnmatch() { @@ -2429,14 +2423,13 @@ void ProcessorParseDelimiterNativeUnittest::TestProcessEventDiscardUnmatch() { // check observablity int count = 5; APSARA_TEST_EQUAL_FATAL(count, processor.GetContext().GetProcessProfile().parseFailures); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mProcInRecordsTotal->GetValue()); - std::string expectValue = "value1"; - APSARA_TEST_EQUAL_FATAL((expectValue.length()) * count, processor.mProcParseInSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mInEventsTotal->GetValue()); // discard unmatch, so output is 0 - APSARA_TEST_EQUAL_FATAL(uint64_t(0), processorInstance.mProcOutRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(0), processor.mProcParseOutSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mProcDiscardRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mProcParseErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(0), processorInstance.mOutEventsTotal->GetValue()); + // event group size is not 0 + APSARA_TEST_NOT_EQUAL_FATAL(uint64_t(0), processorInstance.mOutSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mOutFailedEventsTotal->GetValue()); } void ProcessorParseDelimiterNativeUnittest::TestEmpty() { diff --git a/core/unittest/processor/ProcessorParseJsonNativeUnittest.cpp b/core/unittest/processor/ProcessorParseJsonNativeUnittest.cpp index 7493da0843..2b5a6a91bd 100644 --- a/core/unittest/processor/ProcessorParseJsonNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorParseJsonNativeUnittest.cpp @@ -59,7 +59,7 @@ UNIT_TEST_CASE(ProcessorParseJsonNativeUnittest, TestProcessJsonRaw); UNIT_TEST_CASE(ProcessorParseJsonNativeUnittest, TestMultipleLines); PluginInstance::PluginMeta getPluginMeta(){ - PluginInstance::PluginMeta pluginMeta{"testgetPluginID", "testNodeID", "testNodeChildID"}; + PluginInstance::PluginMeta pluginMeta{"1"}; return pluginMeta; } @@ -314,7 +314,7 @@ void ProcessorParseJsonNativeUnittest::TestProcessJsonEscapedNullByte() { })"; std::string outJson = eventGroupList[0].ToJsonString(); APSARA_TEST_STREQ_FATAL(CompactJson(expectJson).c_str(), CompactJson(outJson).c_str()); - APSARA_TEST_GE_FATAL(processorInstance.mProcTimeMS->GetValue(), uint64_t(0)); + APSARA_TEST_GE_FATAL(processorInstance.mTotalProcessTimeMs->GetValue(), uint64_t(0)); } void ProcessorParseJsonNativeUnittest::TestProcessJson() { @@ -394,7 +394,7 @@ void ProcessorParseJsonNativeUnittest::TestProcessJson() { })"; std::string outJson = eventGroupList[0].ToJsonString(); APSARA_TEST_STREQ_FATAL(CompactJson(expectJson).c_str(), CompactJson(outJson).c_str()); - APSARA_TEST_GE_FATAL(processorInstance.mProcTimeMS->GetValue(), uint64_t(0)); + APSARA_TEST_GE_FATAL(processorInstance.mTotalProcessTimeMs->GetValue(), uint64_t(0)); } void ProcessorParseJsonNativeUnittest::TestProcessJsonContent() { @@ -485,7 +485,7 @@ void ProcessorParseJsonNativeUnittest::TestProcessJsonContent() { })"; std::string outJson = eventGroupList[0].ToJsonString(); APSARA_TEST_STREQ_FATAL(CompactJson(expectJson).c_str(), CompactJson(outJson).c_str()); - APSARA_TEST_GE_FATAL(processorInstance.mProcTimeMS->GetValue(), uint64_t(0)); + APSARA_TEST_GE_FATAL(processorInstance.mTotalProcessTimeMs->GetValue(), uint64_t(0)); } void ProcessorParseJsonNativeUnittest::TestProcessJsonRaw() { @@ -574,7 +574,7 @@ void ProcessorParseJsonNativeUnittest::TestProcessJsonRaw() { })"; std::string outJson = eventGroupList[0].ToJsonString(); APSARA_TEST_STREQ_FATAL(CompactJson(expectJson).c_str(), CompactJson(outJson).c_str()); - APSARA_TEST_GE_FATAL(processorInstance.mProcTimeMS->GetValue(), uint64_t(0)); + APSARA_TEST_GE_FATAL(processorInstance.mTotalProcessTimeMs->GetValue(), uint64_t(0)); } void ProcessorParseJsonNativeUnittest::TestProcessEventKeepUnmatch() { @@ -619,18 +619,10 @@ void ProcessorParseJsonNativeUnittest::TestProcessEventKeepUnmatch() { // check observablity APSARA_TEST_EQUAL_FATAL(count, processor.GetContext().GetProcessProfile().parseFailures); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mProcInRecordsTotal->GetValue()); - std::string expectValue - = "{\"url\": \"POST /PutData?Category=YunOsAccountOpLog HTTP/1.1\",\"time\": \"07/Jul/2022:10:30:28\""; - APSARA_TEST_EQUAL_FATAL((expectValue.length()) * count, processor.mProcParseInSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mProcOutRecordsTotal->GetValue()); - expectValue = "rawLog{\"url\": \"POST /PutData?Category=YunOsAccountOpLog HTTP/1.1\",\"time\": " - "\"07/Jul/2022:10:30:28\""; - APSARA_TEST_EQUAL_FATAL(uint64_t(expectValue.length() * count), processor.mProcParseOutSizeBytes->GetValue()); - - APSARA_TEST_EQUAL_FATAL(uint64_t(0), processor.mProcDiscardRecordsTotal->GetValue()); - - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mProcParseErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mInEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mOutEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(0), processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mOutFailedEventsTotal->GetValue()); // judge result std::string expectJson = R"({ @@ -649,7 +641,7 @@ void ProcessorParseJsonNativeUnittest::TestProcessEventKeepUnmatch() { })"; std::string outJson = eventGroupList[0].ToJsonString(); APSARA_TEST_STREQ_FATAL(CompactJson(expectJson).c_str(), CompactJson(outJson).c_str()); - APSARA_TEST_GE_FATAL(processorInstance.mProcTimeMS->GetValue(), uint64_t(0)); + APSARA_TEST_GE_FATAL(processorInstance.mTotalProcessTimeMs->GetValue(), uint64_t(0)); } void ProcessorParseJsonNativeUnittest::TestProcessEventDiscardUnmatch() { @@ -693,20 +685,17 @@ void ProcessorParseJsonNativeUnittest::TestProcessEventDiscardUnmatch() { // check observablity APSARA_TEST_EQUAL_FATAL(count, processor.GetContext().GetProcessProfile().parseFailures); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mProcInRecordsTotal->GetValue()); - std::string expectValue - = "{\"url\": \"POST /PutData?Category=YunOsAccountOpLog HTTP/1.1\",\"time\": \"07/Jul/2022:10:30:28\""; - APSARA_TEST_EQUAL_FATAL((expectValue.length()) * count, processor.mProcParseInSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processorInstance.mInEventsTotal->GetValue()); // discard unmatch, so output is 0 - APSARA_TEST_EQUAL_FATAL(uint64_t(0), processorInstance.mProcOutRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(0), processor.mProcParseOutSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mProcDiscardRecordsTotal->GetValue()); - - APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mProcParseErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(0), processorInstance.mOutEventsTotal->GetValue()); + // event group size is not 0 + APSARA_TEST_NOT_EQUAL_FATAL(uint64_t(0), processorInstance.mOutSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(uint64_t(count), processor.mOutFailedEventsTotal->GetValue()); std::string outJson = eventGroupList[0].ToJsonString(); APSARA_TEST_STREQ_FATAL("null", CompactJson(outJson).c_str()); - APSARA_TEST_GE_FATAL(processorInstance.mProcTimeMS->GetValue(), uint64_t(0)); + APSARA_TEST_GE_FATAL(processorInstance.mTotalProcessTimeMs->GetValue(), uint64_t(0)); } } // namespace logtail diff --git a/core/unittest/processor/ProcessorParseRegexNativeUnittest.cpp b/core/unittest/processor/ProcessorParseRegexNativeUnittest.cpp index c38c6298d2..f7cbee3870 100644 --- a/core/unittest/processor/ProcessorParseRegexNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorParseRegexNativeUnittest.cpp @@ -44,7 +44,7 @@ class ProcessorParseRegexNativeUnittest : public ::testing::Test { }; PluginInstance::PluginMeta getPluginMeta(){ - PluginInstance::PluginMeta pluginMeta{"testgetPluginID", "testNodeID", "testNodeChildID"}; + PluginInstance::PluginMeta pluginMeta{"1"}; return pluginMeta; } @@ -85,7 +85,7 @@ void ProcessorParseRegexNativeUnittest::OnSuccessfulInit() { APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); processor.reset(new ProcessorParseRegexNative()); processor->SetContext(ctx); - processor->SetMetricsRecordRef(ProcessorParseRegexNative::sName, "1", "1", "1"); + processor->SetMetricsRecordRef(ProcessorParseRegexNative::sName, "1"); APSARA_TEST_TRUE(processor->Init(configJson)); APSARA_TEST_EQUAL(2, processor->mKeys.size()); APSARA_TEST_EQUAL("k1", processor->mKeys[0]); @@ -143,16 +143,11 @@ void ProcessorParseRegexNativeUnittest::TestProcessWholeLine() { std::string outJson = eventGroupList[0].ToJsonString(); APSARA_TEST_STREQ_FATAL(CompactJson(inJson).c_str(), CompactJson(outJson).c_str()); // metric - APSARA_TEST_EQUAL_FATAL(2, processorInstance.mProcInRecordsTotal->GetValue()); - std::string expectValue = "line1\nline2"; - APSARA_TEST_EQUAL_FATAL((expectValue.length()) * 2, processor.mProcParseInSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(2, processorInstance.mProcOutRecordsTotal->GetValue()); - expectValue = "contentline1\nline2"; - APSARA_TEST_EQUAL_FATAL((expectValue.length()) * 2, processor.mProcParseOutSizeBytes->GetValue()); - - APSARA_TEST_EQUAL_FATAL(0, processor.mProcDiscardRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(0, processor.mProcParseErrorTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(0, processor.mProcKeyCountNotMatchErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(2, processorInstance.mInEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(2, processorInstance.mOutEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0, processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0, processor.mOutFailedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0, processor.mOutKeyNotFoundEventsTotal->GetValue()); } void ProcessorParseRegexNativeUnittest::TestProcessRegex() { @@ -228,7 +223,7 @@ void ProcessorParseRegexNativeUnittest::TestProcessRegex() { })"; std::string outJson = eventGroupList[0].ToJsonString(); APSARA_TEST_STREQ_FATAL(CompactJson(expectJson).c_str(), CompactJson(outJson).c_str()); - APSARA_TEST_GE_FATAL(processorInstance.mProcTimeMS->GetValue(), 0); + APSARA_TEST_GE_FATAL(processorInstance.mTotalProcessTimeMs->GetValue(), 0); } void ProcessorParseRegexNativeUnittest::TestProcessRegexRaw() { @@ -302,7 +297,7 @@ void ProcessorParseRegexNativeUnittest::TestProcessRegexRaw() { })"; std::string outJson = eventGroupList[0].ToJsonString(); APSARA_TEST_STREQ_FATAL(CompactJson(expectJson).c_str(), CompactJson(outJson).c_str()); - APSARA_TEST_GE_FATAL(processorInstance.mProcTimeMS->GetValue(), 0); + APSARA_TEST_GE_FATAL(processorInstance.mTotalProcessTimeMs->GetValue(), 0); } void ProcessorParseRegexNativeUnittest::TestProcessRegexContent() { @@ -378,7 +373,7 @@ void ProcessorParseRegexNativeUnittest::TestProcessRegexContent() { })"; std::string outJson = eventGroupList[0].ToJsonString(); APSARA_TEST_STREQ_FATAL(CompactJson(expectJson).c_str(), CompactJson(outJson).c_str()); - APSARA_TEST_GE_FATAL(processorInstance.mProcTimeMS->GetValue(), 0); + APSARA_TEST_GE_FATAL(processorInstance.mTotalProcessTimeMs->GetValue(), 0); } void ProcessorParseRegexNativeUnittest::TestAddLog() { @@ -483,17 +478,11 @@ void ProcessorParseRegexNativeUnittest::TestProcessEventKeepUnmatch() { APSARA_TEST_EQUAL_FATAL(count, processor.GetContext().GetProcessProfile().regexMatchFailures); APSARA_TEST_EQUAL_FATAL(count, processor.GetContext().GetProcessProfile().parseFailures); - APSARA_TEST_EQUAL_FATAL(count, processorInstance.mProcInRecordsTotal->GetValue()); - std::string expectValue = "value1"; - APSARA_TEST_EQUAL_FATAL((expectValue.length()) * count, processor.mProcParseInSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(count, processorInstance.mProcOutRecordsTotal->GetValue()); - expectValue = "rawLogvalue1"; - APSARA_TEST_EQUAL_FATAL((expectValue.length()) * count, processor.mProcParseOutSizeBytes->GetValue()); - - APSARA_TEST_EQUAL_FATAL(0, processor.mProcDiscardRecordsTotal->GetValue()); - - APSARA_TEST_EQUAL_FATAL(count, processor.mProcParseErrorTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(0, processor.mProcKeyCountNotMatchErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(count, processorInstance.mInEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(count, processorInstance.mOutEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0, processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(count, processor.mOutFailedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0, processor.mOutKeyNotFoundEventsTotal->GetValue()); } void ProcessorParseRegexNativeUnittest::TestProcessEventDiscardUnmatch() { @@ -572,17 +561,14 @@ void ProcessorParseRegexNativeUnittest::TestProcessEventDiscardUnmatch() { // check observablity APSARA_TEST_EQUAL_FATAL(count, processor.GetContext().GetProcessProfile().regexMatchFailures); APSARA_TEST_EQUAL_FATAL(count, processor.GetContext().GetProcessProfile().parseFailures); - - APSARA_TEST_EQUAL_FATAL(count, processorInstance.mProcInRecordsTotal->GetValue()); - std::string expectValue = "value1"; - APSARA_TEST_EQUAL_FATAL((expectValue.length()) * count, processor.mProcParseInSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(count, processorInstance.mInEventsTotal->GetValue()); // discard unmatch, so output is 0 - APSARA_TEST_EQUAL_FATAL(0, processorInstance.mProcOutRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(0, processor.mProcParseOutSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(count, processor.mProcDiscardRecordsTotal->GetValue()); - - APSARA_TEST_EQUAL_FATAL(count, processor.mProcParseErrorTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(0, processor.mProcKeyCountNotMatchErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0, processorInstance.mOutEventsTotal->GetValue()); + // event group size is not 0 + APSARA_TEST_NOT_EQUAL_FATAL(0, processorInstance.mOutSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(count, processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(count, processor.mOutFailedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0, processor.mOutKeyNotFoundEventsTotal->GetValue()); } void ProcessorParseRegexNativeUnittest::TestProcessEventKeyCountUnmatch() { @@ -663,17 +649,14 @@ void ProcessorParseRegexNativeUnittest::TestProcessEventKeyCountUnmatch() { APSARA_TEST_EQUAL_FATAL(count, processor.GetContext().GetProcessProfile().regexMatchFailures); APSARA_TEST_EQUAL_FATAL(count, processor.GetContext().GetProcessProfile().parseFailures); - APSARA_TEST_EQUAL_FATAL(count, processorInstance.mProcInRecordsTotal->GetValue()); - std::string expectValue = "value1\tvalue2"; - APSARA_TEST_EQUAL_FATAL((expectValue.length()) * count, processor.mProcParseInSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(count, processorInstance.mInEventsTotal->GetValue()); // discard unmatch, so output is 0 - APSARA_TEST_EQUAL_FATAL(0, processorInstance.mProcOutRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(0, processor.mProcParseOutSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(count, processor.mProcDiscardRecordsTotal->GetValue()); - - // mProcKeyCountNotMatchErrorTotal should equal count - APSARA_TEST_EQUAL_FATAL(0, processor.mProcParseErrorTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(count, processor.mProcKeyCountNotMatchErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0, processorInstance.mOutEventsTotal->GetValue()); + // event group size is not 0 + APSARA_TEST_NOT_EQUAL_FATAL(0, processorInstance.mOutSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(count, processor.mDiscardedEventsTotal->GetValue()); + // mKeyCountNotMatchErrorTotal should equal count + APSARA_TEST_EQUAL_FATAL(0, processor.mOutFailedEventsTotal->GetValue()); } UNIT_TEST_CASE(ProcessorParseRegexNativeUnittest, TestInit) diff --git a/core/unittest/processor/ProcessorParseTimestampNativeUnittest.cpp b/core/unittest/processor/ProcessorParseTimestampNativeUnittest.cpp index 7af6786275..0ea57418db 100644 --- a/core/unittest/processor/ProcessorParseTimestampNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorParseTimestampNativeUnittest.cpp @@ -53,7 +53,7 @@ UNIT_TEST_CASE(ProcessorParseTimestampNativeUnittest, TestProcessEventPreciseTim UNIT_TEST_CASE(ProcessorParseTimestampNativeUnittest, TestCheckTime); PluginInstance::PluginMeta getPluginMeta(){ - PluginInstance::PluginMeta pluginMeta{"testgetPluginID", "testNodeID", "testNodeChildID"}; + PluginInstance::PluginMeta pluginMeta{"1"}; return pluginMeta; } @@ -398,14 +398,11 @@ void ProcessorParseTimestampNativeUnittest::TestProcessRegularFormat() { APSARA_TEST_EQUAL_FATAL(CompactJson(expectJsonSs.str()), CompactJson(outJson)); // check observablity APSARA_TEST_EQUAL_FATAL(0, processor.GetContext().GetProcessProfile().historyFailures); - APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mProcInRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(strlen(timebuff) * 2, processor.mProcParseInSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mInEventsTotal->GetValue()); // discard history, so output is 0 - APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mProcOutRecordsTotal->GetValue()); - // size of one timestamp and one nanosecond equals to 8 byte, respectively - APSARA_TEST_EQUAL_FATAL(8UL * 4, processor.mProcParseOutSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(0UL, processor.mProcDiscardRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(0UL, processor.mProcParseErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mOutEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0UL, processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0UL, processor.mOutFailedEventsTotal->GetValue()); } void ProcessorParseTimestampNativeUnittest::TestProcessNoYearFormat() { @@ -490,14 +487,11 @@ void ProcessorParseTimestampNativeUnittest::TestProcessNoYearFormat() { APSARA_TEST_EQUAL_FATAL(CompactJson(expectJsonSs.str()), CompactJson(outJson)); // check observablity APSARA_TEST_EQUAL_FATAL(0, processor.GetContext().GetProcessProfile().historyFailures); - APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mProcInRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(strlen(timebuff) * 2, processor.mProcParseInSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mInEventsTotal->GetValue()); // discard history, so output is 0 - APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mProcOutRecordsTotal->GetValue()); - // size of one timestamp and one nanosecond equals to 8 byte, respectively - APSARA_TEST_EQUAL_FATAL(8UL * 4, processor.mProcParseOutSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(0UL, processor.mProcDiscardRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(0UL, processor.mProcParseErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mOutEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0UL, processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0UL, processor.mOutFailedEventsTotal->GetValue()); } void ProcessorParseTimestampNativeUnittest::TestProcessRegularFormatFailed() { @@ -555,14 +549,11 @@ void ProcessorParseTimestampNativeUnittest::TestProcessRegularFormatFailed() { APSARA_TEST_STREQ_FATAL(CompactJson(inJson).c_str(), CompactJson(outJson).c_str()); // check observablity APSARA_TEST_EQUAL_FATAL(0, processor.GetContext().GetProcessProfile().historyFailures); - APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mProcInRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(strlen(timebuff) * 2, processor.mProcParseInSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mInEventsTotal->GetValue()); // discard history, so output is 0 - APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mProcOutRecordsTotal->GetValue()); - // size of one timestamp and one nanosecond equals to 8 byte, respectively - APSARA_TEST_EQUAL_FATAL(0UL, processor.mProcParseOutSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(0UL, processor.mProcDiscardRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(2UL, processor.mProcParseErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mOutEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0UL, processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(2UL, processor.mOutFailedEventsTotal->GetValue()); } void ProcessorParseTimestampNativeUnittest::TestProcessHistoryDiscard() { @@ -614,13 +605,13 @@ void ProcessorParseTimestampNativeUnittest::TestProcessHistoryDiscard() { // check observablity std::string outJson = eventGroupList[0].ToJsonString(); APSARA_TEST_EQUAL_FATAL(2, processor.GetContext().GetProcessProfile().historyFailures); - APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mProcInRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(strlen(timebuff) * 2, processor.mProcParseInSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(2UL, processorInstance.mInEventsTotal->GetValue()); // discard history, so output is 0 - APSARA_TEST_EQUAL_FATAL(0UL, processorInstance.mProcOutRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(0UL, processor.mProcParseOutSizeBytes->GetValue()); - APSARA_TEST_EQUAL_FATAL(2UL, processor.mProcDiscardRecordsTotal->GetValue()); - APSARA_TEST_EQUAL_FATAL(0UL, processor.mProcParseErrorTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0UL, processorInstance.mOutEventsTotal->GetValue()); + // event group size is not 0 + APSARA_TEST_NOT_EQUAL_FATAL(0UL, processorInstance.mOutSizeBytes->GetValue()); + APSARA_TEST_EQUAL_FATAL(2UL, processor.mDiscardedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0UL, processor.mOutFailedEventsTotal->GetValue()); } void ProcessorParseTimestampNativeUnittest::TestProcessEventPreciseTimestampLegacy() { diff --git a/core/unittest/processor/ProcessorPromParseMetricNativeUnittest.cpp b/core/unittest/processor/ProcessorPromParseMetricNativeUnittest.cpp index fcdbe4e6bd..62bf5e3dc2 100644 --- a/core/unittest/processor/ProcessorPromParseMetricNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorPromParseMetricNativeUnittest.cpp @@ -48,6 +48,7 @@ void ProcessorParsePrometheusMetricUnittest::TestInit() { string errorMsg; configStr = R"JSON( { + "job_name": "test_job" } )JSON"; @@ -66,6 +67,7 @@ void ProcessorParsePrometheusMetricUnittest::TestProcess() { string errorMsg; configStr = configStr + R"( { + "job_name": "test_job" } )"; diff --git a/core/unittest/processor/ProcessorPromRelabelMetricNativeUnittest.cpp b/core/unittest/processor/ProcessorPromRelabelMetricNativeUnittest.cpp index ccfb33b83f..2b295e60da 100644 --- a/core/unittest/processor/ProcessorPromRelabelMetricNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorPromRelabelMetricNativeUnittest.cpp @@ -32,6 +32,7 @@ class ProcessorPromRelabelMetricNativeUnittest : public testing::Test { void TestInit(); void TestProcess(); void TestAddAutoMetrics(); + void TestHonorLabels(); PipelineContext mContext; }; @@ -203,7 +204,8 @@ test_metric8{k1="v1", k3="v2", } 9.9410452992e+10 1715829785083 eventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_DURATION, ToString(1.5)); eventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_SCRAPE_RESPONSE_SIZE, ToString(2325)); eventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_UP_STATE, ToString(true)); - eventGroup.SetMetadata(EventGroupMetaKey::PROMETHEUS_INSTANCE, string("localhost:8080")); + eventGroup.SetTag(string("instance"), "localhost:8080"); + eventGroup.SetTag(string("job"), "test_job"); processor.AddAutoMetrics(eventGroup); APSARA_TEST_EQUAL((size_t)15, eventGroup.GetEvents().size()); @@ -218,9 +220,64 @@ test_metric8{k1="v1", k3="v2", } 9.9410452992e+10 1715829785083 APSARA_TEST_EQUAL("test_job", eventGroup.GetEvents().at(14).Cast().GetTag("job")); } +void ProcessorPromRelabelMetricNativeUnittest::TestHonorLabels() { + // make config + Json::Value config; + + ProcessorPromRelabelMetricNative processor; + processor.SetContext(mContext); + + string configStr; + string errorMsg; + configStr = configStr + R"JSON( + { + "job_name": "test_job", + "scrape_timeout": "15s", + "honor_labels": true + } + )JSON"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); + + // init + APSARA_TEST_TRUE(processor.Init(config)); + + // make events + auto parser = TextParser(); + string rawData = R"""( +# begin +test_metric1{k1="v1", k2="v2"} 1.0 + test_metric2{k1="v1", k2="v2"} 2.0 1234567890 +test_metric3{k1="v1",k2="v2"} 9.9410452992e+10 + test_metric4{k1="v1",k2="v2"} 9.9410452992e+10 1715829785083 + test_metric5{k1="v1", k2="v2" } 9.9410452992e+10 1715829785083 +test_metric6{k1="v1",k2="v2",} 9.9410452992e+10 1715829785083 +test_metric7{k1="v1",k3="2", } 9.9410452992e+10 1715829785083 +test_metric8{k1="v1", k3="v2", } 9.9410452992e+10 1715829785083 +# end + )"""; + auto eventGroup = parser.Parse(rawData, 0, 0); + + // set global labels + eventGroup.SetTag(string("k3"), string("v3")); + APSARA_TEST_EQUAL((size_t)8, eventGroup.GetEvents().size()); + auto targetTags = eventGroup.GetTags(); + // honor_labels is true + processor.ProcessEvent(eventGroup.MutableEvents()[0], targetTags); + APSARA_TEST_EQUAL("v3", eventGroup.GetEvents().at(0).Cast().GetTag(string("k3"))); + processor.ProcessEvent(eventGroup.MutableEvents()[6], targetTags); + APSARA_TEST_EQUAL("2", eventGroup.GetEvents().at(6).Cast().GetTag(string("k3")).to_string()); + + // honor_labels is false + processor.mScrapeConfigPtr->mHonorLabels = false; + processor.ProcessEvent(eventGroup.MutableEvents()[7], targetTags); + APSARA_TEST_FALSE(eventGroup.GetEvents().at(7).Cast().HasTag(string("k3"))); + APSARA_TEST_EQUAL("v2", eventGroup.GetEvents().at(7).Cast().GetTag(string("exported_k3")).to_string()); +} + UNIT_TEST_CASE(ProcessorPromRelabelMetricNativeUnittest, TestInit) UNIT_TEST_CASE(ProcessorPromRelabelMetricNativeUnittest, TestProcess) UNIT_TEST_CASE(ProcessorPromRelabelMetricNativeUnittest, TestAddAutoMetrics) +UNIT_TEST_CASE(ProcessorPromRelabelMetricNativeUnittest, TestHonorLabels) } // namespace logtail diff --git a/core/unittest/processor/ProcessorSplitLogStringNativeUnittest.cpp b/core/unittest/processor/ProcessorSplitLogStringNativeUnittest.cpp index aa30e8e87f..32e158ceac 100644 --- a/core/unittest/processor/ProcessorSplitLogStringNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorSplitLogStringNativeUnittest.cpp @@ -40,7 +40,7 @@ UNIT_TEST_CASE(ProcessorSplitLogStringNativeUnittest, TestProcessJson) UNIT_TEST_CASE(ProcessorSplitLogStringNativeUnittest, TestProcessCommon) PluginInstance::PluginMeta getPluginMeta(){ - PluginInstance::PluginMeta pluginMeta{"testgetPluginID", "testNodeID", "testNodeChildID"}; + PluginInstance::PluginMeta pluginMeta{"1"}; return pluginMeta; } diff --git a/core/unittest/processor/ProcessorSplitMultilineLogStringNativeUnittest.cpp b/core/unittest/processor/ProcessorSplitMultilineLogStringNativeUnittest.cpp index 61ceeddc30..3b843916ea 100644 --- a/core/unittest/processor/ProcessorSplitMultilineLogStringNativeUnittest.cpp +++ b/core/unittest/processor/ProcessorSplitMultilineLogStringNativeUnittest.cpp @@ -64,7 +64,7 @@ void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithBeginCon // ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative; ProcessorSplitMultilineLogStringNative.SetContext(mContext); - ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(ProcessorSplitMultilineLogStringNative.Init(config)); // case: unmatch + unmatch // input: 1 event, 2 lines @@ -324,11 +324,11 @@ void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithBeginCon // metric APSARA_TEST_EQUAL_FATAL(0 + 1 + 1 + 2 + 1 + 0, - ProcessorSplitMultilineLogStringNative.mProcMatchedEventsCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mMatchedEventsTotal->GetValue()); APSARA_TEST_EQUAL_FATAL(0 + 1 + 3 + 2 + 3 + 0, - ProcessorSplitMultilineLogStringNative.mProcMatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mMatchedLinesTotal->GetValue()); APSARA_TEST_EQUAL_FATAL(2 + 1 + 2 + 1 + 1 + 1, - ProcessorSplitMultilineLogStringNative.mProcUnmatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mUnmatchedLinesTotal->GetValue()); } void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithBeginEnd() { @@ -342,7 +342,7 @@ void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithBeginEnd // ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative; ProcessorSplitMultilineLogStringNative.SetContext(mContext); - ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(ProcessorSplitMultilineLogStringNative.Init(config)); // case: unmatch + unmatch // input: 1 event, 2 lines @@ -577,11 +577,11 @@ void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithBeginEnd // metric APSARA_TEST_EQUAL_FATAL(0 + 0 + 1 + 0 + 1 + 1, - ProcessorSplitMultilineLogStringNative.mProcMatchedEventsCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mMatchedEventsTotal->GetValue()); APSARA_TEST_EQUAL_FATAL(0 + 0 + 2 + 0 + 2 + 3, - ProcessorSplitMultilineLogStringNative.mProcMatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mMatchedLinesTotal->GetValue()); APSARA_TEST_EQUAL_FATAL(2 + 3 + 2 + 2 + 1 + 1, - ProcessorSplitMultilineLogStringNative.mProcUnmatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mUnmatchedLinesTotal->GetValue()); } void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithBegin() { @@ -595,7 +595,7 @@ void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithBegin() // ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative; ProcessorSplitMultilineLogStringNative.SetContext(mContext); - ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(ProcessorSplitMultilineLogStringNative.Init(config)); // case: unmatch + start // input: 1 event, 2 lines @@ -826,10 +826,10 @@ void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithBegin() // metric APSARA_TEST_EQUAL_FATAL(1 + 0 + 2 + 1 + 1, - ProcessorSplitMultilineLogStringNative.mProcMatchedEventsCnt->GetValue()); - APSARA_TEST_EQUAL_FATAL(1 + 0 + 2 + 2 + 3, ProcessorSplitMultilineLogStringNative.mProcMatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mMatchedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(1 + 0 + 2 + 2 + 3, ProcessorSplitMultilineLogStringNative.mMatchedLinesTotal->GetValue()); APSARA_TEST_EQUAL_FATAL(1 + 1 + 0 + 0 + 1, - ProcessorSplitMultilineLogStringNative.mProcUnmatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mUnmatchedLinesTotal->GetValue()); } void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithContinueEnd() { @@ -843,7 +843,7 @@ void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithContinue // ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative; ProcessorSplitMultilineLogStringNative.SetContext(mContext); - ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(ProcessorSplitMultilineLogStringNative.Init(config)); // case: unmatch // input: 1 event, 1 line @@ -1030,10 +1030,10 @@ void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithContinue // metric APSARA_TEST_EQUAL_FATAL(0 + 0 + 1 + 0 + 1, - ProcessorSplitMultilineLogStringNative.mProcMatchedEventsCnt->GetValue()); - APSARA_TEST_EQUAL_FATAL(0 + 0 + 3 + 0 + 1, ProcessorSplitMultilineLogStringNative.mProcMatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mMatchedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0 + 0 + 3 + 0 + 1, ProcessorSplitMultilineLogStringNative.mMatchedLinesTotal->GetValue()); APSARA_TEST_EQUAL_FATAL(1 + 2 + 0 + 1 + 0, - ProcessorSplitMultilineLogStringNative.mProcUnmatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mUnmatchedLinesTotal->GetValue()); } void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithEnd() { @@ -1046,7 +1046,7 @@ void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithEnd() { // ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative; ProcessorSplitMultilineLogStringNative.SetContext(mContext); - ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(ProcessorSplitMultilineLogStringNative.Init(config)); // case: end // input: 1 event, 1 line @@ -1173,9 +1173,9 @@ void ProcessorSplitMultilineLogDisacardUnmatchUnittest::TestLogSplitWithEnd() { } // metric - APSARA_TEST_EQUAL_FATAL(1 + 0 + 1, ProcessorSplitMultilineLogStringNative.mProcMatchedEventsCnt->GetValue()); - APSARA_TEST_EQUAL_FATAL(1 + 0 + 2, ProcessorSplitMultilineLogStringNative.mProcMatchedLinesCnt->GetValue()); - APSARA_TEST_EQUAL_FATAL(0 + 1 + 1, ProcessorSplitMultilineLogStringNative.mProcUnmatchedLinesCnt->GetValue()); + APSARA_TEST_EQUAL_FATAL(1 + 0 + 1, ProcessorSplitMultilineLogStringNative.mMatchedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(1 + 0 + 2, ProcessorSplitMultilineLogStringNative.mMatchedLinesTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0 + 1 + 1, ProcessorSplitMultilineLogStringNative.mUnmatchedLinesTotal->GetValue()); } class ProcessorSplitMultilineLogKeepUnmatchUnittest : public ::testing::Test { @@ -1207,7 +1207,7 @@ void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithBeginContinu // ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative; ProcessorSplitMultilineLogStringNative.SetContext(mContext); - ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(ProcessorSplitMultilineLogStringNative.Init(config)); // case: unmatch + unmatch // input: 1 event, 2 lines @@ -1560,11 +1560,11 @@ void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithBeginContinu // metric APSARA_TEST_EQUAL_FATAL(0 + 1 + 1 + 2 + 1 + 0, - ProcessorSplitMultilineLogStringNative.mProcMatchedEventsCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mMatchedEventsTotal->GetValue()); APSARA_TEST_EQUAL_FATAL(0 + 1 + 3 + 2 + 3 + 0, - ProcessorSplitMultilineLogStringNative.mProcMatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mMatchedLinesTotal->GetValue()); APSARA_TEST_EQUAL_FATAL(2 + 1 + 2 + 1 + 1 + 1, - ProcessorSplitMultilineLogStringNative.mProcUnmatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mUnmatchedLinesTotal->GetValue()); } void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithBeginEnd() { @@ -1579,7 +1579,7 @@ void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithBeginEnd() { // ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative; ProcessorSplitMultilineLogStringNative.SetContext(mContext); - ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(ProcessorSplitMultilineLogStringNative.Init(config)); // case: unmatch + unmatch // input: 1 event, 2 lines @@ -1942,11 +1942,11 @@ void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithBeginEnd() { // metric APSARA_TEST_EQUAL_FATAL(0 + 0 + 1 + 0 + 1 + 1, - ProcessorSplitMultilineLogStringNative.mProcMatchedEventsCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mMatchedEventsTotal->GetValue()); APSARA_TEST_EQUAL_FATAL(0 + 0 + 2 + 0 + 2 + 3, - ProcessorSplitMultilineLogStringNative.mProcMatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mMatchedLinesTotal->GetValue()); APSARA_TEST_EQUAL_FATAL(2 + 3 + 2 + 2 + 1 + 1, - ProcessorSplitMultilineLogStringNative.mProcUnmatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mUnmatchedLinesTotal->GetValue()); } void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithBegin() { @@ -1959,7 +1959,7 @@ void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithBegin() { // ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative; ProcessorSplitMultilineLogStringNative.SetContext(mContext); - ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(ProcessorSplitMultilineLogStringNative.Init(config)); // case: unmatch + start // input: 1 event, 2 lines @@ -2214,10 +2214,10 @@ void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithBegin() { // metric APSARA_TEST_EQUAL_FATAL(1 + 0 + 2 + 1 + 1, - ProcessorSplitMultilineLogStringNative.mProcMatchedEventsCnt->GetValue()); - APSARA_TEST_EQUAL_FATAL(1 + 0 + 2 + 2 + 2, ProcessorSplitMultilineLogStringNative.mProcMatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mMatchedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(1 + 0 + 2 + 2 + 2, ProcessorSplitMultilineLogStringNative.mMatchedLinesTotal->GetValue()); APSARA_TEST_EQUAL_FATAL(1 + 1 + 0 + 0 + 0, - ProcessorSplitMultilineLogStringNative.mProcUnmatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mUnmatchedLinesTotal->GetValue()); } void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithContinueEnd() { @@ -2231,7 +2231,7 @@ void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithContinueEnd( // ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative; ProcessorSplitMultilineLogStringNative.SetContext(mContext); - ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(ProcessorSplitMultilineLogStringNative.Init(config)); // case: unmatch // input: 1 event, 1 line @@ -2476,10 +2476,10 @@ void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithContinueEnd( // metric APSARA_TEST_EQUAL_FATAL(0 + 0 + 1 + 0 + 1, - ProcessorSplitMultilineLogStringNative.mProcMatchedEventsCnt->GetValue()); - APSARA_TEST_EQUAL_FATAL(0 + 0 + 3 + 0 + 1, ProcessorSplitMultilineLogStringNative.mProcMatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mMatchedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0 + 0 + 3 + 0 + 1, ProcessorSplitMultilineLogStringNative.mMatchedLinesTotal->GetValue()); APSARA_TEST_EQUAL_FATAL(1 + 2 + 0 + 1 + 0, - ProcessorSplitMultilineLogStringNative.mProcUnmatchedLinesCnt->GetValue()); + ProcessorSplitMultilineLogStringNative.mUnmatchedLinesTotal->GetValue()); } void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithEnd() { @@ -2497,7 +2497,7 @@ void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithEnd() { // ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative ProcessorSplitMultilineLogStringNative; ProcessorSplitMultilineLogStringNative.SetContext(mContext); - ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1", "1", "1"); + ProcessorSplitMultilineLogStringNative.SetMetricsRecordRef(ProcessorSplitMultilineLogStringNative::sName, "1"); APSARA_TEST_TRUE_FATAL(ProcessorSplitMultilineLogStringNative.Init(config)); // case: end // input: 1 event, 1 line @@ -2649,9 +2649,9 @@ void ProcessorSplitMultilineLogKeepUnmatchUnittest::TestLogSplitWithEnd() { } // metric - APSARA_TEST_EQUAL_FATAL(1 + 0 + 1, ProcessorSplitMultilineLogStringNative.mProcMatchedEventsCnt->GetValue()); - APSARA_TEST_EQUAL_FATAL(1 + 0 + 2, ProcessorSplitMultilineLogStringNative.mProcMatchedLinesCnt->GetValue()); - APSARA_TEST_EQUAL_FATAL(0 + 1 + 1, ProcessorSplitMultilineLogStringNative.mProcUnmatchedLinesCnt->GetValue()); + APSARA_TEST_EQUAL_FATAL(1 + 0 + 1, ProcessorSplitMultilineLogStringNative.mMatchedEventsTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(1 + 0 + 2, ProcessorSplitMultilineLogStringNative.mMatchedLinesTotal->GetValue()); + APSARA_TEST_EQUAL_FATAL(0 + 1 + 1, ProcessorSplitMultilineLogStringNative.mUnmatchedLinesTotal->GetValue()); } } // namespace logtail diff --git a/core/unittest/prometheus/CMakeLists.txt b/core/unittest/prometheus/CMakeLists.txt index e0a78e48f5..712bcd7970 100644 --- a/core/unittest/prometheus/CMakeLists.txt +++ b/core/unittest/prometheus/CMakeLists.txt @@ -15,9 +15,11 @@ cmake_minimum_required(VERSION 3.22) project(prometheus_unittest) +add_executable(prom_self_monitor_unittest PromSelfMonitorUnittest.cpp) +target_link_libraries(prom_self_monitor_unittest ${UT_BASE_TARGET}) + add_executable(labels_unittest LabelsUnittest.cpp) target_link_libraries(labels_unittest ${UT_BASE_TARGET}) -target_link_libraries(labels_unittest ${UT_BASE_TARGET}) add_executable(relabel_unittest RelabelUnittest.cpp) target_link_libraries(relabel_unittest ${UT_BASE_TARGET}) @@ -48,6 +50,7 @@ target_link_libraries(prom_asyn_unittest ${UT_BASE_TARGET}) include(GoogleTest) +gtest_discover_tests(prom_self_monitor_unittest) gtest_discover_tests(labels_unittest) gtest_discover_tests(relabel_unittest) gtest_discover_tests(scrape_scheduler_unittest) diff --git a/core/unittest/prometheus/LabelsUnittest.cpp b/core/unittest/prometheus/LabelsUnittest.cpp index ffee88562e..1b17de1148 100644 --- a/core/unittest/prometheus/LabelsUnittest.cpp +++ b/core/unittest/prometheus/LabelsUnittest.cpp @@ -27,7 +27,7 @@ namespace logtail { class LabelsUnittest : public testing::Test { public: void TestGet(); - void TestPush(); + void TestSet(); void TestRange(); void TestHash(); void TestRemoveMetaLabels(); @@ -47,9 +47,9 @@ class LabelsBuilderUnittest : public testing::Test { void LabelsUnittest::TestRemoveMetaLabels() { Labels labels; - labels.Push(Label{"host", "172.17.0.3:9100"}); - labels.Push(Label{"__meta_port", "172.17.0.3"}); - labels.Push(Label{"port", "9100"}); + labels.Set("host", "172.17.0.3:9100"); + labels.Set("__meta_port", "172.17.0.3"); + labels.Set("port", "9100"); APSARA_TEST_EQUAL(3UL, labels.Size()); labels.RemoveMetaLabels(); @@ -60,9 +60,9 @@ void LabelsUnittest::TestRemoveMetaLabels() { void LabelsUnittest::TestHash() { Labels labels; - labels.Push(Label{"host", "172.17.0.3:9100"}); - labels.Push(Label{"ip", "172.17.0.3"}); - labels.Push(Label{"port", "9100"}); + labels.Set("host", "172.17.0.3:9100"); + labels.Set("ip", "172.17.0.3"); + labels.Set("port", "9100"); uint64_t hash = labels.Hash(); uint64_t expect = prometheus::OFFSET64; @@ -79,7 +79,7 @@ void LabelsUnittest::TestHash() { void LabelsUnittest::TestGet() { Labels labels; - labels.Push(Label{"host", "172.17.0.3:9100"}); + labels.Set("host", "172.17.0.3:9100"); APSARA_TEST_EQUAL(1UL, labels.Size()); APSARA_TEST_EQUAL("", labels.Get("hosts")); @@ -87,10 +87,10 @@ void LabelsUnittest::TestGet() { APSARA_TEST_EQUAL("172.17.0.3:9100", labels.Get("host")); } -void LabelsUnittest::TestPush() { +void LabelsUnittest::TestSet() { Labels labels; - labels.Push(Label{"host", "172.17.0.3:9100"}); + labels.Set("host", "172.17.0.3:9100"); APSARA_TEST_EQUAL("172.17.0.3:9100", labels.Get("host")); } @@ -102,108 +102,25 @@ void LabelsUnittest::TestRange() { testMap["ip"] = "172.17.0.3"; testMap["port"] = "9100"; - labels.Push(Label{"host", "172.17.0.3:9100"}); - labels.Push(Label{"ip", "172.17.0.3"}); - labels.Push(Label{"port", "9100"}); + labels.Set("host", "172.17.0.3:9100"); + labels.Set("ip", "172.17.0.3"); + labels.Set("port", "9100"); map resMap; - labels.Range([&resMap](Label l) { resMap[l.name] = l.value; }); + labels.Range([&resMap](const string& k, const string& v) { resMap[k] = v; }); APSARA_TEST_EQUAL(testMap, resMap); } -void LabelsBuilderUnittest::TestReset() { - LabelsBuilder lb; - Labels labels; - labels.Push(Label{"host", ""}); - lb.Reset(labels); - APSARA_TEST_EQUAL("", lb.mBase.Get("host")); - APSARA_TEST_EQUAL(1UL, lb.mDeleteLabelNameList.count("host")); -} - -void LabelsBuilderUnittest::TestDeleteLabel() { - LabelsBuilder lb; - Labels labels; - - vector nameList{"host"}; - lb.DeleteLabel(nameList); - - labels.Push(Label{"host", "172.17.0.3:9100"}); - lb.Reset(labels); - APSARA_TEST_EQUAL("", lb.GetLabels().Get("host")); -} - -void LabelsBuilderUnittest::TestSet() { - LabelsBuilder lb; - Labels labels; - labels.Push(Label{"host", "172.17.0.3:9100"}); - lb.Reset(labels); - APSARA_TEST_EQUAL("172.17.0.3:9100", lb.Get("host")); - - lb.mAddLabelList.emplace("host", "127.0.0.1"); - - lb.Set("host", "172.17.0.3:9300"); - APSARA_TEST_EQUAL("172.17.0.3:9300", lb.Get("host")); - - lb.Set("host", ""); - APSARA_TEST_EQUAL("", lb.Get("host")); -} - -void LabelsBuilderUnittest::TestGet() { - LabelsBuilder lb; - Labels labels; - labels.Push(Label{"host", "172.17.0.3:9100"}); - lb.Reset(labels); - APSARA_TEST_EQUAL("172.17.0.3:9100", lb.Get("host")); -} - -void LabelsBuilderUnittest::TestLabels() { - LabelsBuilder lb; - Labels labels; - labels.Push(Label{"host", "172.17.0.3:9100"}); - lb.Reset(labels); - - vector nameList{"host"}; - lb.DeleteLabel(nameList); - - - APSARA_TEST_EQUAL("", lb.GetLabels().Get("host")); -} -void LabelsBuilderUnittest::TestRange() { - LabelsBuilder lb; - Labels labels; - labels.Push(Label{"host", "172.17.0.3:9100"}); - labels.Push(Label{"ip", "172.17.0.3"}); - labels.Push(Label{"port", "9100"}); - lb.Reset(labels); - - vector nameList{"host"}; - lb.DeleteLabel(nameList); - - map resMap; - lb.Range([&resMap](Label l) { resMap[l.name] = l.value; }); - - map expectMap; - expectMap["ip"] = "172.17.0.3"; - expectMap["port"] = "9100"; - - APSARA_TEST_EQUAL(expectMap, resMap); -} UNIT_TEST_CASE(LabelsUnittest, TestGet) -UNIT_TEST_CASE(LabelsUnittest, TestPush) +UNIT_TEST_CASE(LabelsUnittest, TestSet) UNIT_TEST_CASE(LabelsUnittest, TestRange) UNIT_TEST_CASE(LabelsUnittest, TestHash) UNIT_TEST_CASE(LabelsUnittest, TestRemoveMetaLabels) -UNIT_TEST_CASE(LabelsBuilderUnittest, TestReset) -UNIT_TEST_CASE(LabelsBuilderUnittest, TestDeleteLabel) -UNIT_TEST_CASE(LabelsBuilderUnittest, TestSet) -UNIT_TEST_CASE(LabelsBuilderUnittest, TestGet) -UNIT_TEST_CASE(LabelsBuilderUnittest, TestLabels) -UNIT_TEST_CASE(LabelsBuilderUnittest, TestRange) } // namespace logtail diff --git a/core/unittest/prometheus/PromAsynUnittest.cpp b/core/unittest/prometheus/PromAsynUnittest.cpp index 67984adbb8..3ab400ef21 100644 --- a/core/unittest/prometheus/PromAsynUnittest.cpp +++ b/core/unittest/prometheus/PromAsynUnittest.cpp @@ -13,7 +13,7 @@ class PromAsynUnittest : public testing::Test { }; void PromAsynUnittest::TestExecTime() { - auto future = std::make_shared(); + auto future = std::make_shared>(); auto now = std::chrono::system_clock::now(); bool exec = false; future->AddDoneCallback([&exec, now](const HttpResponse&, uint64_t timestampMilliSec) { @@ -21,6 +21,7 @@ void PromAsynUnittest::TestExecTime() { std::chrono::duration_cast(now.time_since_epoch()).count()); APSARA_TEST_TRUE(exec); + return true; }); auto request = std::make_shared( "http", false, "127.0.0.1", 8080, "/", "", map(), "", 10, 3, future); diff --git a/core/unittest/prometheus/PromSelfMonitorUnittest.cpp b/core/unittest/prometheus/PromSelfMonitorUnittest.cpp new file mode 100644 index 0000000000..45501ca356 --- /dev/null +++ b/core/unittest/prometheus/PromSelfMonitorUnittest.cpp @@ -0,0 +1,56 @@ + +#include "monitor/metric_constants/MetricConstants.h" +#include "prometheus/Constants.h" +#include "prometheus/PromSelfMonitor.h" +#include "unittest/Unittest.h" +using namespace std; + +namespace logtail { +class PromSelfMonitorUnittest : public ::testing::Test { +public: + void TestCounterAdd(); + void TestIntGaugeSet(); +}; + +void PromSelfMonitorUnittest::TestCounterAdd() { + auto selfMonitor = std::make_shared(); + std::unordered_map testMetricKeys = { + {METRIC_PLUGIN_PROM_SUBSCRIBE_TOTAL, MetricType::METRIC_TYPE_COUNTER}, + }; + selfMonitor->InitMetricManager(testMetricKeys, MetricLabels{}); + + selfMonitor->AddCounter(METRIC_PLUGIN_PROM_SUBSCRIBE_TOTAL, 200, 999); + + // check result + auto metric = selfMonitor->mPromStatusMap["2XX"]->GetCounter(METRIC_PLUGIN_PROM_SUBSCRIBE_TOTAL); + APSARA_TEST_EQUAL("plugin_prom_subscribe_total", metric->GetName()); + APSARA_TEST_EQUAL(999ULL, metric->GetValue()); + selfMonitor->AddCounter(METRIC_PLUGIN_PROM_SUBSCRIBE_TOTAL, 200); + APSARA_TEST_EQUAL(1000ULL, metric->GetValue()); +} + +void PromSelfMonitorUnittest::TestIntGaugeSet() { + auto selfMonitor = std::make_shared(); + std::unordered_map testMetricKeys = { + {METRIC_PLUGIN_PROM_SUBSCRIBE_TARGETS, MetricType::METRIC_TYPE_INT_GAUGE}, + }; + selfMonitor->InitMetricManager(testMetricKeys, MetricLabels{}); + + auto metricLabels = std::map({{"test-label", "test-value"}}); + selfMonitor->SetIntGauge(METRIC_PLUGIN_PROM_SUBSCRIBE_TARGETS, 200, 999); + + // check result + auto metric = selfMonitor->mPromStatusMap["2XX"]->GetIntGauge(METRIC_PLUGIN_PROM_SUBSCRIBE_TARGETS); + APSARA_TEST_EQUAL("plugin_prom_subscribe_targets", metric->GetName()); + APSARA_TEST_EQUAL(999ULL, metric->GetValue()); + selfMonitor->SetIntGauge(METRIC_PLUGIN_PROM_SUBSCRIBE_TARGETS, 200, 0); + APSARA_TEST_EQUAL(0ULL, metric->GetValue()); +} + + +UNIT_TEST_CASE(PromSelfMonitorUnittest, TestCounterAdd) +UNIT_TEST_CASE(PromSelfMonitorUnittest, TestIntGaugeSet) + +} // namespace logtail + +UNIT_TEST_MAIN diff --git a/core/unittest/prometheus/PrometheusInputRunnerUnittest.cpp b/core/unittest/prometheus/PrometheusInputRunnerUnittest.cpp index ef261324b5..8b8501a358 100644 --- a/core/unittest/prometheus/PrometheusInputRunnerUnittest.cpp +++ b/core/unittest/prometheus/PrometheusInputRunnerUnittest.cpp @@ -15,10 +15,9 @@ */ #include +#include -#include "Common.h" -#include "JsonUtil.h" -#include "json/value.h" +#include "common/JsonUtil.h" #include "prometheus/PrometheusInputRunner.h" #include "unittest/Unittest.h" @@ -26,49 +25,18 @@ using namespace std; namespace logtail { -// InputRunnerMockHttpClient -class InputRunnerMockHttpClient : public sdk::CurlClient { -public: - void Send(const std::string& httpMethod, - const std::string& host, - int32_t port, - const std::string& url, - const std::string& queryString, - const std::map& header, - const std::string& body, - int32_t timeout, - sdk::HttpMessage& httpMessage, - const std::string& intf, - bool httpsFlag); -}; - -void InputRunnerMockHttpClient::Send(const std::string&, - const std::string&, - const int32_t, - const std::string& url, - const std::string&, - const std::map&, - const std::string&, - const int32_t, - sdk::HttpMessage& httpMessage, - const std::string&, - const bool) { - httpMessage.statusCode = 200; -} - class PrometheusInputRunnerUnittest : public testing::Test { public: void OnSuccessfulStartAndStop(); void TestHasRegisteredPlugins(); void TestMulitStartAndStop(); + void TestGetAllProjects(); protected: void SetUp() override { PrometheusInputRunner::GetInstance()->mServiceHost = "127.0.0.1"; PrometheusInputRunner::GetInstance()->mServicePort = 8080; PrometheusInputRunner::GetInstance()->mPodName = "test_pod"; - - PrometheusInputRunner::GetInstance()->mClient = make_unique(); } void TearDown() override {} @@ -95,14 +63,15 @@ void PrometheusInputRunnerUnittest::OnSuccessfulStartAndStop() { std::unique_ptr scrapeJobPtr = make_unique(); APSARA_TEST_TRUE(scrapeJobPtr->Init(config)); - PrometheusInputRunner::GetInstance()->mClient = make_unique(); + auto defaultLabels = MetricLabels(); + string defaultProject = "default_project"; // update scrapeJob - PrometheusInputRunner::GetInstance()->UpdateScrapeInput(std::move(scrapeJobPtr)); + PrometheusInputRunner::GetInstance()->UpdateScrapeInput(std::move(scrapeJobPtr), defaultLabels, defaultProject); PrometheusInputRunner::GetInstance()->Init(); APSARA_TEST_TRUE(PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.find("test_job") != PrometheusInputRunner::GetInstance()->mTargetSubscriberSchedulerMap.end()); - + APSARA_TEST_EQUAL(PrometheusInputRunner::GetInstance()->mJobNameToProjectNameMap["test_job"], defaultProject); // remove PrometheusInputRunner::GetInstance()->RemoveScrapeInput("test_job"); @@ -114,7 +83,6 @@ void PrometheusInputRunnerUnittest::OnSuccessfulStartAndStop() { } void PrometheusInputRunnerUnittest::TestHasRegisteredPlugins() { - PrometheusInputRunner::GetInstance()->mClient = make_unique(); PrometheusInputRunner::GetInstance()->Init(); // not in use @@ -138,13 +106,14 @@ void PrometheusInputRunnerUnittest::TestHasRegisteredPlugins() { std::unique_ptr scrapeJobPtr = make_unique(); APSARA_TEST_TRUE(scrapeJobPtr->Init(config)); - PrometheusInputRunner::GetInstance()->UpdateScrapeInput(std::move(scrapeJobPtr)); + auto defaultLabels = MetricLabels(); + string defaultProject = "default_project"; + PrometheusInputRunner::GetInstance()->UpdateScrapeInput(std::move(scrapeJobPtr), defaultLabels, defaultProject); APSARA_TEST_TRUE(PrometheusInputRunner::GetInstance()->HasRegisteredPlugins()); PrometheusInputRunner::GetInstance()->Stop(); } void PrometheusInputRunnerUnittest::TestMulitStartAndStop() { - PrometheusInputRunner::GetInstance()->mClient = make_unique(); PrometheusInputRunner::GetInstance()->Init(); { std::lock_guard lock(PrometheusInputRunner::GetInstance()->mStartMutex); @@ -172,9 +141,56 @@ void PrometheusInputRunnerUnittest::TestMulitStartAndStop() { } } +void PrometheusInputRunnerUnittest::TestGetAllProjects() { + // build scrape job and target + string errorMsg; + string configStr; + Json::Value config; + + // test_job1 + configStr = R"JSON( + { + "job_name": "test_job1", + "scheme": "http", + "metrics_path": "/metrics", + "scrape_interval": "30s", + "scrape_timeout": "30s" + } + )JSON"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); + + std::unique_ptr scrapeJobPtr1 = make_unique(); + APSARA_TEST_TRUE(scrapeJobPtr1->Init(config)); + auto defaultLabels = MetricLabels(); + string defaultProject = "default_project"; + // update scrapeJob + PrometheusInputRunner::GetInstance()->UpdateScrapeInput(std::move(scrapeJobPtr1), defaultLabels, defaultProject); + + // test_job2 + configStr = R"JSON( + { + "job_name": "test_job2", + "scheme": "http", + "metrics_path": "/metrics", + "scrape_interval": "30s", + "scrape_timeout": "30s" + } + )JSON"; + APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); + std::unique_ptr scrapeJobPtr2 = make_unique(); + APSARA_TEST_TRUE(scrapeJobPtr2->Init(config)); + defaultProject = "default_project2"; + // update scrapeJob + PrometheusInputRunner::GetInstance()->UpdateScrapeInput(std::move(scrapeJobPtr2), defaultLabels, defaultProject); + + // Runner use map to store scrape job, so the order is test_job1, test_job2 + APSARA_TEST_TRUE(PrometheusInputRunner::GetInstance()->GetAllProjects() == "default_project default_project2"); +} + UNIT_TEST_CASE(PrometheusInputRunnerUnittest, OnSuccessfulStartAndStop) UNIT_TEST_CASE(PrometheusInputRunnerUnittest, TestHasRegisteredPlugins) UNIT_TEST_CASE(PrometheusInputRunnerUnittest, TestMulitStartAndStop) +UNIT_TEST_CASE(PrometheusInputRunnerUnittest, TestGetAllProjects) } // namespace logtail diff --git a/core/unittest/prometheus/RelabelUnittest.cpp b/core/unittest/prometheus/RelabelUnittest.cpp index 1b5b78c1b7..2ca70038c7 100644 --- a/core/unittest/prometheus/RelabelUnittest.cpp +++ b/core/unittest/prometheus/RelabelUnittest.cpp @@ -36,7 +36,18 @@ class ActionConverterUnittest : public testing::Test { class RelabelConfigUnittest : public testing::Test { public: void TestRelabelConfig(); - void TestProcess(); + void TestReplace(); + void TestKeep(); + void TestDrop(); + void TestDropEqual(); + void TestHashMod(); + void TestLabelDrop(); + void TestLabelKeep(); + void TestLabelMap(); + void TestKeepEqual(); + void TestLowerCase(); + void TestUpperCase(); + void TestMultiRelabel(); }; @@ -84,9 +95,9 @@ void RelabelConfigUnittest::TestRelabelConfig() { )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - RelabelConfig config = RelabelConfig(configJson); + RelabelConfig config = RelabelConfig(); - APSARA_TEST_EQUAL(true, config.Validate()); + APSARA_TEST_EQUAL(true, config.Init(configJson)); APSARA_TEST_EQUAL(Action::KEEP, config.mAction); // APSARA_TEST_EQUAL("node-exporter", config.regex.get_data()); @@ -98,180 +109,222 @@ void RelabelConfigUnittest::TestRelabelConfig() { APSARA_TEST_EQUAL((uint64_t)222, config.mModulus); } -void RelabelConfigUnittest::TestProcess() { + +void RelabelConfigUnittest::TestReplace() { Json::Value configJson; string configStr; string errorMsg; Labels labels; - labels.Push(Label{"__meta_kubernetes_pod_ip", "172.17.0.3"}); - labels.Push(Label{"__meta_kubernetes_pod_label_app", "node-exporter"}); - vector cfgs; + labels.Set("__meta_kubernetes_pod_ip", "172.17.0.3"); + labels.Set("__meta_kubernetes_pod_label_app", "node-exporter"); // single relabel replace - configStr = configStr + R"( - { + configStr = configStr + R"JSON( + [{ "action": "replace", - "regex": "(.*)" - + ")\",\n" + - R"( + "regex": "(.*)", "replacement": "${1}:9100", "separator": ";", "source_labels": [ "__meta_kubernetes_pod_ip" ], "target_label": "__address__" - } - )"; + }] + )JSON"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - RelabelConfig config = RelabelConfig(configJson); - cfgs.push_back(config); + RelabelConfigList configList = RelabelConfigList(); + APSARA_TEST_TRUE(configList.Init(configJson)); - Labels result; - prometheus::Process(labels, cfgs, result); + Labels result = labels; + configList.Process(result); APSARA_TEST_EQUAL((size_t)3, result.Size()); APSARA_TEST_EQUAL("172.17.0.3:9100", result.Get("__address__")); APSARA_TEST_EQUAL("node-exporter", result.Get("__meta_kubernetes_pod_label_app")); APSARA_TEST_EQUAL("172.17.0.3", result.Get("__meta_kubernetes_pod_ip")); - +} +void RelabelConfigUnittest::TestKeep() { + Json::Value configJson; + string configStr; + string errorMsg; + RelabelConfigList configList; + Labels labels; + labels.Set("__meta_kubernetes_pod_ip", "172.17.0.3"); + labels.Set("__meta_kubernetes_pod_label_app", "node-exporter"); // single relabel keep configStr = R"( - { + [{ "action": "keep", "regex": "172.*", "separator": ";", "source_labels": [ "__meta_kubernetes_pod_ip" ] - } + }] )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - config = RelabelConfig(configJson); - cfgs.clear(); - cfgs.push_back(config); - prometheus::Process(labels, cfgs, result); - + configList = RelabelConfigList(); + APSARA_TEST_TRUE(configList.Init(configJson)); + auto result = labels; + configList.Process(result); APSARA_TEST_EQUAL((size_t)2, result.Size()); APSARA_TEST_EQUAL("172.17.0.3", result.Get("__meta_kubernetes_pod_ip")); +} +void RelabelConfigUnittest::TestDrop() { + Json::Value configJson; + string configStr; + string errorMsg; + RelabelConfigList configList; + Labels labels; + labels.Set("__meta_kubernetes_pod_ip", "172.17.0.3"); + labels.Set("__meta_kubernetes_pod_label_app", "node-exporter"); // single relabel drop configStr = R"( - { + [{ "action": "drop", "regex": "172.*", "source_labels": [ "__meta_kubernetes_pod_ip" ] - } + }] )"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - config = RelabelConfig(configJson); - cfgs.clear(); - cfgs.push_back(config); - prometheus::Process(labels, cfgs, result); - APSARA_TEST_EQUAL((size_t)0, result.Size()); - APSARA_TEST_EQUAL("", result.Get("__meta_kubernetes_pod_label_app")); + configList = RelabelConfigList(); + APSARA_TEST_TRUE(configList.Init(configJson)); + auto result = labels; + + APSARA_TEST_FALSE(configList.Process(result)); +} +void RelabelConfigUnittest::TestDropEqual() { + Json::Value configJson; + string configStr; + string errorMsg; + RelabelConfigList configList; // relabel dropequal configStr = R"( - { + [{ "action": "dropequal", "regex": "172.*", "source_labels": [ "__meta_kubernetes_pod_ip" ], "target_label": "pod_ip" - } + }] )"; Labels dropEqualLabels; - dropEqualLabels.Push(Label{"__meta_kubernetes_pod_ip", "172.17.0.3"}); - dropEqualLabels.Push(Label{"pod_ip", "172.17.0.3"}); - dropEqualLabels.Push(Label{"__meta_kubernetes_pod_label_app", "node-exporter"}); + dropEqualLabels.Set("__meta_kubernetes_pod_ip", "172.17.0.3"); + dropEqualLabels.Set("pod_ip", "172.17.0.3"); + dropEqualLabels.Set("__meta_kubernetes_pod_label_app", "node-exporter"); APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - config = RelabelConfig(configJson); - cfgs.clear(); - cfgs.push_back(config); - prometheus::Process(dropEqualLabels, cfgs, result); - APSARA_TEST_EQUAL((size_t)0, result.Size()); - APSARA_TEST_EQUAL("", result.Get("__meta_kubernetes_pod_label_app")); + configList = RelabelConfigList(); + APSARA_TEST_TRUE(configList.Init(configJson)); + auto result = dropEqualLabels; + APSARA_TEST_FALSE(configList.Process(result)); +} +void RelabelConfigUnittest::TestKeepEqual() { + Json::Value configJson; + string configStr; + string errorMsg; + RelabelConfigList configList; // relabel keepequal configStr = R"( - { + [{ "action": "keepequal", "regex": "172.*", "source_labels": [ "__meta_kubernetes_pod_ip" ], "target_label": "pod_ip" - } + }] )"; Labels keepEqualLabels; - keepEqualLabels.Push(Label{"__meta_kubernetes_pod_ip", "172.17.0.3"}); - keepEqualLabels.Push(Label{"pod_ip", "172.17.0.3"}); - keepEqualLabels.Push(Label{"__meta_kubernetes_pod_label_app", "node-exporter"}); + keepEqualLabels.Set("__meta_kubernetes_pod_ip", "172.17.0.3"); + keepEqualLabels.Set("pod_ip", "172.17.0.3"); + keepEqualLabels.Set("__meta_kubernetes_pod_label_app", "node-exporter"); APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - config = RelabelConfig(configJson); - cfgs.clear(); - cfgs.push_back(config); - prometheus::Process(keepEqualLabels, cfgs, result); + configList = RelabelConfigList(); + APSARA_TEST_TRUE(configList.Init(configJson)); + auto result = keepEqualLabels; + configList.Process(result); APSARA_TEST_EQUAL((size_t)3, result.Size()); APSARA_TEST_EQUAL("172.17.0.3", result.Get("__meta_kubernetes_pod_ip")); +} +void RelabelConfigUnittest::TestLowerCase() { + Json::Value configJson; + string configStr; + string errorMsg; + RelabelConfigList configList; // relabel lowercase configStr = R"( - { + [{ "action": "lowercase", "regex": "172.*", "source_labels": [ "__meta_kubernetes_pod_label_app" ], "target_label": "__meta_kubernetes_pod_label_app" - } + }] )"; Labels lowercaseLabels; - lowercaseLabels.Push(Label{"__meta_kubernetes_pod_ip", "172.17.0.3"}); - lowercaseLabels.Push(Label{"pod_ip", "172.17.0.3"}); - lowercaseLabels.Push(Label{"__meta_kubernetes_pod_label_app", "node-Exporter"}); + lowercaseLabels.Set("__meta_kubernetes_pod_ip", "172.17.0.3"); + lowercaseLabels.Set("pod_ip", "172.17.0.3"); + lowercaseLabels.Set("__meta_kubernetes_pod_label_app", "node-Exporter"); APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - config = RelabelConfig(configJson); - cfgs.clear(); - cfgs.push_back(config); - prometheus::Process(lowercaseLabels, cfgs, result); + configList = RelabelConfigList(); + APSARA_TEST_TRUE(configList.Init(configJson)); + auto result = lowercaseLabels; + configList.Process(result); APSARA_TEST_EQUAL((size_t)3, result.Size()); APSARA_TEST_EQUAL("node-exporter", result.Get("__meta_kubernetes_pod_label_app")); +} +void RelabelConfigUnittest::TestUpperCase() { + Json::Value configJson; + string configStr; + string errorMsg; + RelabelConfigList configList; // relabel uppercase configStr = R"( - { + [{ "action": "uppercase", "regex": "172.*", "source_labels": [ "__meta_kubernetes_pod_label_app" ], "target_label": "__meta_kubernetes_pod_label_app" - } + }] )"; Labels uppercaseLabels; - uppercaseLabels.Push(Label{"__meta_kubernetes_pod_ip", "172.17.0.3"}); - uppercaseLabels.Push(Label{"pod_ip", "172.17.0.3"}); - uppercaseLabels.Push(Label{"__meta_kubernetes_pod_label_app", "node-Exporter"}); + uppercaseLabels.Set("__meta_kubernetes_pod_ip", "172.17.0.3"); + uppercaseLabels.Set("pod_ip", "172.17.0.3"); + uppercaseLabels.Set("__meta_kubernetes_pod_label_app", "node-Exporter"); APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - config = RelabelConfig(configJson); - cfgs.clear(); - cfgs.push_back(config); - prometheus::Process(uppercaseLabels, cfgs, result); + configList = RelabelConfigList(); + APSARA_TEST_TRUE(configList.Init(configJson)); + auto result = uppercaseLabels; + configList.Process(result); APSARA_TEST_EQUAL((size_t)3, result.Size()); APSARA_TEST_EQUAL("NODE-EXPORTER", result.Get("__meta_kubernetes_pod_label_app")); +} + +void RelabelConfigUnittest::TestHashMod() { + Json::Value configJson; + string configStr; + string errorMsg; + RelabelConfigList configList; // relabel hashmod configStr = R"( - { + [{ "action": "hashmod", "regex": "172.*", "source_labels": [ @@ -279,126 +332,168 @@ void RelabelConfigUnittest::TestProcess() { ], "target_label": "hash_val", "modulus": 255 - } + }] )"; Labels hashmodLabels; - hashmodLabels.Push(Label{"__meta_kubernetes_pod_ip", "172.17.0.3"}); - hashmodLabels.Push(Label{"pod_ip", "172.17.0.3"}); - hashmodLabels.Push(Label{"__meta_kubernetes_pod_label_app", "node-Exporter"}); + hashmodLabels.Set("__meta_kubernetes_pod_ip", "172.17.0.3"); + hashmodLabels.Set("pod_ip", "172.17.0.3"); + hashmodLabels.Set("__meta_kubernetes_pod_label_app", "node-Exporter"); APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - config = RelabelConfig(configJson); - cfgs.clear(); - cfgs.push_back(config); - prometheus::Process(hashmodLabels, cfgs, result); + configList = RelabelConfigList(); + APSARA_TEST_TRUE(configList.Init(configJson)); + auto result = hashmodLabels; + configList.Process(result); APSARA_TEST_EQUAL((size_t)4, result.Size()); APSARA_TEST_TRUE(!result.Get("hash_val").empty()); +} - configStr.clear(); +void RelabelConfigUnittest::TestLabelMap() { + Json::Value configJson; + string configStr; + string errorMsg; + RelabelConfigList configList; + Labels labels; + labels.Set("__meta_kubernetes_pod_ip", "172.17.0.3"); + labels.Set("__meta_kubernetes_pod_label_app", "node-exporter"); // single relabel labelmap - configStr = configStr + R"( - { + configStr = R"JSON( + [{ "action": "labelmap", - "regex": "__meta_kubernetes_pod_label_(.+)" - + ")\"," + R"("replacement": "k8s_$1" - } - )"; + "regex": "__meta_kubernetes_pod_label_(.+)", + "replacement": "k8s_$1" + }] + )JSON"; APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - config = RelabelConfig(configJson); - cfgs.clear(); - cfgs.push_back(config); - prometheus::Process(labels, cfgs, result); + configList = RelabelConfigList(); + APSARA_TEST_TRUE(configList.Init(configJson)); + auto result = labels; + configList.Process(result); APSARA_TEST_EQUAL((size_t)3, result.Size()); APSARA_TEST_EQUAL("node-exporter", result.Get("k8s_app")); +} + +void RelabelConfigUnittest::TestLabelDrop() { + Json::Value configJson; + string configStr; + string errorMsg; + RelabelConfigList configList; // relabel labeldrop configStr = R"( - { + [{ "action": "labeldrop", - "regex": "__meta.*", - } + "regex": "__meta.*" + }] )"; Labels labelDropLabels; - labelDropLabels.Push(Label{"__meta_kubernetes_pod_ip", "172.17.0.3"}); - labelDropLabels.Push(Label{"pod_ip", "172.17.0.3"}); - labelDropLabels.Push(Label{"__meta_kubernetes_pod_label_app", "node-Exporter"}); + labelDropLabels.Set("__meta_kubernetes_pod_ip", "172.17.0.3"); + labelDropLabels.Set("pod_ip", "172.17.0.3"); + labelDropLabels.Set("__meta_kubernetes_pod_label_app", "node-Exporter"); APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - config = RelabelConfig(configJson); - cfgs.clear(); - cfgs.push_back(config); - prometheus::Process(labelDropLabels, cfgs, result); + configList = RelabelConfigList(); + APSARA_TEST_TRUE(configList.Init(configJson)); + auto result = labelDropLabels; + configList.Process(result); APSARA_TEST_EQUAL((size_t)1, result.Size()); APSARA_TEST_EQUAL("172.17.0.3", result.Get("pod_ip")); +} + +void RelabelConfigUnittest::TestLabelKeep() { + Json::Value configJson; + string configStr; + string errorMsg; + RelabelConfigList configList; // relabel labelkeep configStr = R"( - { + [{ "action": "labelkeep", - "regex": "__meta.*", - } + "regex": "__meta.*" + }] )"; Labels labelKeepLabels; - labelKeepLabels.Push(Label{"__meta_kubernetes_pod_ip", "172.17.0.3"}); - labelKeepLabels.Push(Label{"pod_ip", "172.17.0.3"}); - labelKeepLabels.Push(Label{"__meta_kubernetes_pod_label_app", "node-exporter"}); + labelKeepLabels.Set("__meta_kubernetes_pod_ip", "172.17.0.3"); + labelKeepLabels.Set("pod_ip", "172.17.0.3"); + labelKeepLabels.Set("__meta_kubernetes_pod_label_app", "node-exporter"); APSARA_TEST_TRUE(ParseJsonTable(configStr, configJson, errorMsg)); - config = RelabelConfig(configJson); - cfgs.clear(); - cfgs.push_back(config); - prometheus::Process(labelKeepLabels, cfgs, result); + configList = RelabelConfigList(); + APSARA_TEST_TRUE(configList.Init(configJson)); + auto result = labelKeepLabels; + configList.Process(result); APSARA_TEST_EQUAL((size_t)2, result.Size()); APSARA_TEST_EQUAL("172.17.0.3", result.Get("__meta_kubernetes_pod_ip")); APSARA_TEST_EQUAL("node-exporter", result.Get("__meta_kubernetes_pod_label_app")); +} +void RelabelConfigUnittest::TestMultiRelabel() { + Json::Value configJson; + string configStr; + string errorMsg; + RelabelConfigList configList; + Labels labels; + labels.Set("__meta_kubernetes_pod_ip", "172.17.0.3"); + labels.Set("__meta_kubernetes_pod_label_app", "node-exporter"); // multi relabel string configStr1; string configStr2; - configStr1 = configStr1 + R"( - { + configStr1 = configStr1 + R"JSON( + [{ "action": "replace", - "regex": "(.*)" - + ")\",\n" + - R"( + "regex": "(.*)", "replacement": "${1}:9100", "separator": ";", "source_labels": [ "__meta_kubernetes_pod_ip" ], "target_label": "__address__" - } - )"; + }] + )JSON"; configStr2 = R"( - { + [{ "action": "drop", "regex": "172.*", "separator": ";", "source_labels": [ "__address__" ] - } + }] )"; APSARA_TEST_TRUE(ParseJsonTable(configStr1, configJson, errorMsg)); - config = RelabelConfig(configJson); - cfgs.clear(); - cfgs.push_back(config); - APSARA_TEST_TRUE(ParseJsonTable(configStr2, configJson, errorMsg)); - config = RelabelConfig(configJson); - cfgs.push_back(config); + configList = RelabelConfigList(); + APSARA_TEST_TRUE(configList.Init(configJson)); + auto result = labels; + APSARA_TEST_TRUE(configList.Process(result)); + APSARA_TEST_EQUAL((size_t)3, result.Size()); + APSARA_TEST_EQUAL("172.17.0.3:9100", result.Get("__address__")); - prometheus::Process(labels, cfgs, result); - APSARA_TEST_EQUAL((size_t)0, result.Size()); - APSARA_TEST_EQUAL("", result.Get("__address__")); + APSARA_TEST_TRUE(ParseJsonTable(configStr2, configJson, errorMsg)); + configList = RelabelConfigList(); + APSARA_TEST_TRUE(configList.Init(configJson)); + // result = labels; + configList.Process(result); } UNIT_TEST_CASE(ActionConverterUnittest, TestStringToAction) UNIT_TEST_CASE(ActionConverterUnittest, TestActionToString) UNIT_TEST_CASE(RelabelConfigUnittest, TestRelabelConfig) -UNIT_TEST_CASE(RelabelConfigUnittest, TestProcess) +UNIT_TEST_CASE(RelabelConfigUnittest, TestReplace) +UNIT_TEST_CASE(RelabelConfigUnittest, TestDrop) +UNIT_TEST_CASE(RelabelConfigUnittest, TestKeep) +UNIT_TEST_CASE(RelabelConfigUnittest, TestHashMod) +UNIT_TEST_CASE(RelabelConfigUnittest, TestLabelMap) +UNIT_TEST_CASE(RelabelConfigUnittest, TestLabelDrop) +UNIT_TEST_CASE(RelabelConfigUnittest, TestLabelKeep) +UNIT_TEST_CASE(RelabelConfigUnittest, TestDropEqual) +UNIT_TEST_CASE(RelabelConfigUnittest, TestKeepEqual) +UNIT_TEST_CASE(RelabelConfigUnittest, TestLowerCase) +UNIT_TEST_CASE(RelabelConfigUnittest, TestUpperCase) +UNIT_TEST_CASE(RelabelConfigUnittest, TestMultiRelabel) } // namespace logtail diff --git a/core/unittest/prometheus/ScrapeConfigUnittest.cpp b/core/unittest/prometheus/ScrapeConfigUnittest.cpp index 65e7aeb8da..1268f1c913 100644 --- a/core/unittest/prometheus/ScrapeConfigUnittest.cpp +++ b/core/unittest/prometheus/ScrapeConfigUnittest.cpp @@ -64,6 +64,8 @@ void ScrapeConfigUnittest::TestInit() { ], "enable_compression": false, "scheme": "http", + "honor_labels": true, + "honor_timestamps": false, "basic_auth": { "username": "test_user", "password": "test_password" @@ -99,6 +101,8 @@ void ScrapeConfigUnittest::TestInit() { APSARA_TEST_EQUAL(scrapeConfig.mScrapeTimeoutSeconds, 30); APSARA_TEST_EQUAL(scrapeConfig.mMetricsPath, "/metrics"); APSARA_TEST_EQUAL(scrapeConfig.mScheme, "http"); + APSARA_TEST_EQUAL(scrapeConfig.mHonorLabels, true); + APSARA_TEST_EQUAL(scrapeConfig.mHonorTimestamps, false); // scrape protocols APSARA_TEST_EQUAL(scrapeConfig.mRequestHeaders["Accept"], @@ -107,15 +111,15 @@ void ScrapeConfigUnittest::TestInit() { "application/openmetrics-text;version=0.0.1;q=0.2,*/*;q=0.1"); // disable compression - APSARA_TEST_EQUAL(scrapeConfig.mRequestHeaders["Accept-Encoding"], "identity"); + // APSARA_TEST_EQUAL(scrapeConfig.mRequestHeaders["Accept-Encoding"], "identity"); // basic auth APSARA_TEST_EQUAL(scrapeConfig.mRequestHeaders["Authorization"], "Basic dGVzdF91c2VyOnRlc3RfcGFzc3dvcmQ="); - APSARA_TEST_EQUAL(scrapeConfig.mMaxScrapeSizeBytes, 1024 * 1024 * 1024); - APSARA_TEST_EQUAL(scrapeConfig.mSampleLimit, 10000); - APSARA_TEST_EQUAL(scrapeConfig.mSeriesLimit, 10000); - APSARA_TEST_EQUAL(scrapeConfig.mRelabelConfigs.size(), 1UL); + APSARA_TEST_EQUAL(scrapeConfig.mMaxScrapeSizeBytes, 1024 * 1024 * 1024ULL); + APSARA_TEST_EQUAL(scrapeConfig.mSampleLimit, 10000ULL); + APSARA_TEST_EQUAL(scrapeConfig.mSeriesLimit, 10000ULL); + APSARA_TEST_EQUAL(scrapeConfig.mRelabelConfigs.mRelabelConfigs.size(), 1UL); APSARA_TEST_EQUAL(scrapeConfig.mParams["__param_query"][0], "test_query"); APSARA_TEST_EQUAL(scrapeConfig.mParams["__param_query_1"][0], "test_query_1"); } @@ -379,7 +383,7 @@ void ScrapeConfigUnittest::TestEnableCompression() { APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); scrapeConfig.mRequestHeaders.clear(); APSARA_TEST_TRUE(scrapeConfig.Init(config)); - APSARA_TEST_EQUAL("gzip", scrapeConfig.mRequestHeaders["Accept-Encoding"]); + // APSARA_TEST_EQUAL("gzip", scrapeConfig.mRequestHeaders["Accept-Encoding"]); // disable configStr = R"JSON({ @@ -393,7 +397,7 @@ void ScrapeConfigUnittest::TestEnableCompression() { APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); scrapeConfig.mRequestHeaders.clear(); APSARA_TEST_TRUE(scrapeConfig.Init(config)); - APSARA_TEST_EQUAL("identity", scrapeConfig.mRequestHeaders["Accept-Encoding"]); + // APSARA_TEST_EQUAL("identity", scrapeConfig.mRequestHeaders["Accept-Encoding"]); // enable configStr = R"JSON({ @@ -407,7 +411,7 @@ void ScrapeConfigUnittest::TestEnableCompression() { APSARA_TEST_TRUE(ParseJsonTable(configStr, config, errorMsg)); scrapeConfig.mRequestHeaders.clear(); APSARA_TEST_TRUE(scrapeConfig.Init(config)); - APSARA_TEST_EQUAL("gzip", scrapeConfig.mRequestHeaders["Accept-Encoding"]); + // APSARA_TEST_EQUAL("gzip", scrapeConfig.mRequestHeaders["Accept-Encoding"]); } UNIT_TEST_CASE(ScrapeConfigUnittest, TestInit); diff --git a/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp b/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp index fd3d540a92..b38fb8b557 100644 --- a/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp +++ b/core/unittest/prometheus/ScrapeSchedulerUnittest.cpp @@ -31,14 +31,6 @@ using namespace std; namespace logtail { -class MockTimer : public Timer { -public: - void Init() {} - void PushEvent(std::unique_ptr&& e) { mQueue.push_back(std::move(e)); } - void Stop() {} - std::vector> mQueue; -}; - class ScrapeSchedulerUnittest : public testing::Test { public: void TestInitscrapeScheduler(); @@ -47,6 +39,7 @@ class ScrapeSchedulerUnittest : public testing::Test { void TestReceiveMessage(); void TestScheduler(); + void TestQueueIsFull(); protected: void SetUp() override { @@ -91,16 +84,18 @@ class ScrapeSchedulerUnittest : public testing::Test { void ScrapeSchedulerUnittest::TestInitscrapeScheduler() { Labels labels; - labels.Push({prometheus::ADDRESS_LABEL_NAME, "localhost:8080"}); + labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); ScrapeScheduler event(mScrapeConfig, "localhost", 8080, labels, 0, 0); APSARA_TEST_EQUAL(event.GetId(), "test_jobhttp://localhost:8080/metrics" + ToString(labels.Hash())); } void ScrapeSchedulerUnittest::TestProcess() { Labels labels; - labels.Push({prometheus::ADDRESS_LABEL_NAME, "localhost:8080"}); - labels.Push({prometheus::ADDRESS_LABEL_NAME, "localhost:8080"}); + labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); + labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); ScrapeScheduler event(mScrapeConfig, "localhost", 8080, labels, 0, 0); + auto defaultLabels = MetricLabels(); + event.InitSelfMonitor(defaultLabels); APSARA_TEST_EQUAL(event.GetId(), "test_jobhttp://localhost:8080/metrics" + ToString(labels.Hash())); // if status code is not 200, no data will be processed // but will continue running, sending self-monitoring metrics @@ -117,8 +112,8 @@ void ScrapeSchedulerUnittest::TestProcess() { void ScrapeSchedulerUnittest::TestSplitByLines() { Labels labels; - labels.Push({prometheus::ADDRESS_LABEL_NAME, "localhost:8080"}); - labels.Push({prometheus::ADDRESS_LABEL_NAME, "localhost:8080"}); + labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); + labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); ScrapeScheduler event(mScrapeConfig, "localhost", 8080, labels, 0, 0); APSARA_TEST_EQUAL(event.GetId(), "test_jobhttp://localhost:8080/metrics" + ToString(labels.Hash())); auto res = event.BuildPipelineEventGroup(mHttpResponse.mBody); @@ -149,8 +144,8 @@ void ScrapeSchedulerUnittest::TestSplitByLines() { void ScrapeSchedulerUnittest::TestReceiveMessage() { Labels labels; - labels.Push({prometheus::ADDRESS_LABEL_NAME, "localhost:8080"}); - labels.Push({prometheus::ADDRESS_LABEL_NAME, "localhost:8080"}); + labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); + labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); auto event = make_shared(mScrapeConfig, "localhost", 8080, labels, 0, 0); @@ -164,9 +159,9 @@ void ScrapeSchedulerUnittest::TestReceiveMessage() { void ScrapeSchedulerUnittest::TestScheduler() { Labels labels; - labels.Push({prometheus::ADDRESS_LABEL_NAME, "localhost:8080"}); + labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); ScrapeScheduler event(mScrapeConfig, "localhost", 8080, labels, 0, 0); - auto timer = make_shared(); + auto timer = make_shared(); event.SetTimer(timer); event.ScheduleNext(); @@ -178,9 +173,36 @@ void ScrapeSchedulerUnittest::TestScheduler() { APSARA_TEST_TRUE(event.mFuture->mState == PromFutureState::Done); } +void ScrapeSchedulerUnittest::TestQueueIsFull() { + Labels labels; + labels.Set(prometheus::ADDRESS_LABEL_NAME, "localhost:8080"); + ScrapeScheduler event(mScrapeConfig, "localhost", 8080, labels, 0, 0); + auto defaultLabels = MetricLabels(); + event.InitSelfMonitor(defaultLabels); + auto timer = make_shared(); + event.SetTimer(timer); + auto now = std::chrono::steady_clock::now(); + event.SetFirstExecTime(now); + event.ScheduleNext(); + + APSARA_TEST_TRUE(timer->mQueue.size() == 1); + + const auto& e = timer->mQueue.top(); + APSARA_TEST_EQUAL(now, e->GetExecTime()); + APSARA_TEST_FALSE(e->IsValid()); + timer->mQueue.pop(); + // queue is full, so it should schedule next after 1 second + APSARA_TEST_EQUAL(1UL, timer->mQueue.size()); + const auto& next = timer->mQueue.top(); + APSARA_TEST_EQUAL(now + std::chrono::seconds(1), next->GetExecTime()); +} + UNIT_TEST_CASE(ScrapeSchedulerUnittest, TestInitscrapeScheduler) UNIT_TEST_CASE(ScrapeSchedulerUnittest, TestProcess) UNIT_TEST_CASE(ScrapeSchedulerUnittest, TestSplitByLines) +UNIT_TEST_CASE(ScrapeSchedulerUnittest, TestScheduler) +UNIT_TEST_CASE(ScrapeSchedulerUnittest, TestQueueIsFull) + } // namespace logtail diff --git a/core/unittest/prometheus/TargetSubscriberSchedulerUnittest.cpp b/core/unittest/prometheus/TargetSubscriberSchedulerUnittest.cpp index fdd8d47d8e..983e401253 100644 --- a/core/unittest/prometheus/TargetSubscriberSchedulerUnittest.cpp +++ b/core/unittest/prometheus/TargetSubscriberSchedulerUnittest.cpp @@ -37,9 +37,6 @@ class TargetSubscriberSchedulerUnittest : public ::testing::Test { protected: void SetUp() override { - setenv("POD_NAME", "prometheus-test", 1); - setenv("OPERATOR_HOST", "127.0.0.1", 1); - setenv("OPERATOR_PORT", "12345", 1); { mConfigString = R"JSON( { @@ -132,11 +129,7 @@ class TargetSubscriberSchedulerUnittest : public ::testing::Test { ])JSON"; } } - void TearDown() override { - unsetenv("POD_NAME"); - unsetenv("OPERATOR_HOST"); - unsetenv("OPERATOR_PORT"); - } + void TearDown() override {} private: HttpResponse mHttpResponse; @@ -155,7 +148,9 @@ void TargetSubscriberSchedulerUnittest::OnInitScrapeJobEvent() { void TargetSubscriberSchedulerUnittest::TestProcess() { std::shared_ptr targetSubscriber = std::make_shared(); + auto metricLabels = MetricLabels(); APSARA_TEST_TRUE(targetSubscriber->Init(mConfig["ScrapeConfig"])); + targetSubscriber->InitSelfMonitor(metricLabels); // if status code is not 200 mHttpResponse.mStatusCode = 404; diff --git a/core/unittest/prometheus/TextParserUnittest.cpp b/core/unittest/prometheus/TextParserUnittest.cpp index fc575dd4d1..5f0caa07f6 100644 --- a/core/unittest/prometheus/TextParserUnittest.cpp +++ b/core/unittest/prometheus/TextParserUnittest.cpp @@ -37,6 +37,8 @@ class TextParserUnittest : public testing::Test { void TestParseFaliure(); void TestParseSuccess(); + + void TestHonorTimestamps(); }; void TextParserUnittest::TestParseMultipleLines() const { @@ -325,6 +327,38 @@ cassandra_token_ownership_ratio 78.9)"; UNIT_TEST_CASE(TextParserUnittest, TestParseSuccess) +void TextParserUnittest::TestHonorTimestamps() { + // false + TextParser parser(false); + // has timestamp + std::string rawData = "abc 123 456"; + PipelineEventGroup res = parser.Parse(rawData, 789, 111); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTimestamp(), 789); + APSARA_TEST_TRUE(IsDoubleEqual(res.GetEvents().back().Cast().GetTimestampNanosecond().value(), 111)); + + // no timestamp + rawData = "abc 123"; + res = parser.Parse(rawData, 789, 111); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTimestamp(), 789); + APSARA_TEST_TRUE(IsDoubleEqual(res.GetEvents().back().Cast().GetTimestampNanosecond().value(), 111)); + + + // true + parser.mHonorTimestamps = true; + // has timestamp + rawData = "abc 123 456"; + res = parser.Parse(rawData, 789, 111); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTimestamp(), 456); + APSARA_TEST_TRUE(IsDoubleEqual(res.GetEvents().back().Cast().GetTimestampNanosecond().value(), 0)); + + // no timestamp + rawData = "abc 123"; + res = parser.Parse(rawData, 789, 111); + APSARA_TEST_EQUAL(res.GetEvents().back().Cast().GetTimestamp(), 789); + APSARA_TEST_TRUE(IsDoubleEqual(res.GetEvents().back().Cast().GetTimestampNanosecond().value(), 111)); +} + +UNIT_TEST_CASE(TextParserUnittest, TestHonorTimestamps) } // namespace logtail diff --git a/core/unittest/prometheus/UtilsUnittest.cpp b/core/unittest/prometheus/UtilsUnittest.cpp index a6ebd4f01e..de2294db01 100644 --- a/core/unittest/prometheus/UtilsUnittest.cpp +++ b/core/unittest/prometheus/UtilsUnittest.cpp @@ -13,10 +13,54 @@ bool IsDoubleEqual(double a, double b) { class PromUtilsUnittest : public testing::Test { public: + void TestDurationToSecond(); + void TestSecondToDuration(); + void TestSizeToByte(); }; +void PromUtilsUnittest::TestDurationToSecond() { + string rawData = "30s"; + APSARA_TEST_EQUAL(30ULL, DurationToSecond(rawData)); + rawData = "1m"; + APSARA_TEST_EQUAL(60ULL, DurationToSecond(rawData)); + rawData = "xxxs"; + APSARA_TEST_EQUAL(0ULL, DurationToSecond(rawData)); +} + +void PromUtilsUnittest::TestSecondToDuration() { + APSARA_TEST_EQUAL("30s", SecondToDuration(30ULL)); + APSARA_TEST_EQUAL("1m", SecondToDuration(60ULL)); + APSARA_TEST_EQUAL("90s", SecondToDuration(90ULL)); +} + +void PromUtilsUnittest::TestSizeToByte() { + APSARA_TEST_EQUAL(1025ULL, SizeToByte("1025B")); + APSARA_TEST_EQUAL(1024ULL, SizeToByte("1K")); + APSARA_TEST_EQUAL(1024ULL, SizeToByte("1KiB")); + APSARA_TEST_EQUAL(1024ULL, SizeToByte("1KB")); + + APSARA_TEST_EQUAL(1024ULL * 1024ULL, SizeToByte("1M")); + APSARA_TEST_EQUAL(1024ULL * 1024ULL, SizeToByte("1MiB")); + APSARA_TEST_EQUAL(2 * 1024ULL * 1024ULL, SizeToByte("2MB")); + APSARA_TEST_EQUAL(1024ULL * 1024ULL * 1024ULL, SizeToByte("1G")); + APSARA_TEST_EQUAL(1024ULL * 1024ULL * 1024ULL, SizeToByte("1GiB")); + APSARA_TEST_EQUAL(1024ULL * 1024ULL * 1024ULL, SizeToByte("1GB")); + + APSARA_TEST_EQUAL(1024ULL * 1024ULL * 1024ULL * 1024ULL, SizeToByte("1T")); + APSARA_TEST_EQUAL(1024ULL * 1024ULL * 1024ULL * 1024ULL, SizeToByte("1TiB")); + APSARA_TEST_EQUAL(1024ULL * 1024ULL * 1024ULL * 1024ULL, SizeToByte("1TB")); + + APSARA_TEST_EQUAL(1024ULL * 1024ULL * 1024ULL * 1024ULL * 1024ULL, SizeToByte("1P")); + APSARA_TEST_EQUAL(1024ULL * 1024ULL * 1024ULL * 1024ULL * 1024ULL, SizeToByte("1PiB")); + APSARA_TEST_EQUAL(1024ULL * 1024ULL * 1024ULL * 1024ULL * 1024ULL, SizeToByte("1PB")); + + APSARA_TEST_EQUAL(0ULL, SizeToByte("1xxE")); +} +UNIT_TEST_CASE(PromUtilsUnittest, TestDurationToSecond); +UNIT_TEST_CASE(PromUtilsUnittest, TestSecondToDuration); +UNIT_TEST_CASE(PromUtilsUnittest, TestSizeToByte); } // namespace logtail diff --git a/core/unittest/queue/BoundedProcessQueueUnittest.cpp b/core/unittest/queue/BoundedProcessQueueUnittest.cpp index e2228108d5..08a178b207 100644 --- a/core/unittest/queue/BoundedProcessQueueUnittest.cpp +++ b/core/unittest/queue/BoundedProcessQueueUnittest.cpp @@ -16,6 +16,7 @@ #include "common/FeedbackInterface.h" #include "models/PipelineEventGroup.h" +#include "pipeline/PipelineManager.h" #include "pipeline/queue/BoundedProcessQueue.h" #include "pipeline/queue/SenderQueue.h" #include "unittest/Unittest.h" @@ -30,6 +31,7 @@ class BoundedProcessQueueUnittest : public testing::Test { void TestPush(); void TestPop(); void TestMetric(); + void TestSetPipeline(); protected: static void SetUpTestCase() { sCtx.SetConfigName("test_config"); } @@ -44,6 +46,7 @@ class BoundedProcessQueueUnittest : public testing::Test { mFeedback1.reset(new FeedbackInterfaceMock); mFeedback2.reset(new FeedbackInterfaceMock); mQueue->SetUpStreamFeedbacks(vector{mFeedback1.get(), mFeedback2.get()}); + mQueue->EnablePop(); } private: @@ -91,9 +94,9 @@ void BoundedProcessQueueUnittest::TestPop() { mQueue->Push(GenerateItem()); // invalidate pop - mQueue->InvalidatePop(); + mQueue->DisablePop(); APSARA_TEST_EQUAL(0, mQueue->Pop(item)); - mQueue->ValidatePop(); + mQueue->EnablePop(); // downstream queues are not valid to push mSenderQueue1->mValidToPush = false; @@ -116,9 +119,9 @@ void BoundedProcessQueueUnittest::TestPop() { void BoundedProcessQueueUnittest::TestMetric() { APSARA_TEST_EQUAL(4U, mQueue->mMetricsRecordRef->GetLabels()->size()); - APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_PROJECT, "")); - APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_CONFIG_NAME, "test_config")); - APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_COMPONENT_NAME, "process_queue")); + APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PROJECT, "")); + APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PIPELINE_NAME, "test_config")); + APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_COMPONENT_NAME, METRIC_LABEL_VALUE_COMPONENT_NAME_PROCESS_QUEUE)); APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_QUEUE_TYPE, "bounded")); auto item = GenerateItem(); @@ -127,22 +130,44 @@ void BoundedProcessQueueUnittest::TestMetric() { auto dataSize = item->mEventGroup.DataSize(); mQueue->Push(std::move(item)); - APSARA_TEST_EQUAL(1U, mQueue->mInItemsCnt->GetValue()); + APSARA_TEST_EQUAL(1U, mQueue->mInItemsTotal->GetValue()); APSARA_TEST_EQUAL(dataSize, mQueue->mInItemDataSizeBytes->GetValue()); - APSARA_TEST_EQUAL(1U, mQueue->mQueueSize->GetValue()); + APSARA_TEST_EQUAL(1U, mQueue->mQueueSizeTotal->GetValue()); APSARA_TEST_EQUAL(dataSize, mQueue->mQueueDataSizeByte->GetValue()); APSARA_TEST_EQUAL(1U, mQueue->mValidToPushFlag->GetValue()); mQueue->Pop(item); - APSARA_TEST_EQUAL(1U, mQueue->mOutItemsCnt->GetValue()); - APSARA_TEST_EQUAL(0U, mQueue->mQueueSize->GetValue()); + APSARA_TEST_EQUAL(1U, mQueue->mOutItemsTotal->GetValue()); + APSARA_TEST_EQUAL(0U, mQueue->mQueueSizeTotal->GetValue()); APSARA_TEST_EQUAL(0U, mQueue->mQueueDataSizeByte->GetValue()); APSARA_TEST_EQUAL(1U, mQueue->mValidToPushFlag->GetValue()); } +void BoundedProcessQueueUnittest::TestSetPipeline() { + auto pipeline = make_shared(); + PipelineManager::GetInstance()->mPipelineNameEntityMap["test_config"] = pipeline; + + auto item1 = GenerateItem(); + auto p1 = item1.get(); + auto pipelineTmp = make_shared(); + item1->mPipeline = pipelineTmp; + + auto item2 = GenerateItem(); + auto p2 = item2.get(); + + mQueue->Push(std::move(item1)); + mQueue->Push(std::move(item2)); + auto p = PipelineManager::GetInstance()->FindConfigByName("test_config"); + mQueue->SetPipelineForItems(p); + + APSARA_TEST_EQUAL(pipelineTmp, p1->mPipeline); + APSARA_TEST_EQUAL(pipeline, p2->mPipeline); +} + UNIT_TEST_CASE(BoundedProcessQueueUnittest, TestPush) UNIT_TEST_CASE(BoundedProcessQueueUnittest, TestPop) UNIT_TEST_CASE(BoundedProcessQueueUnittest, TestMetric) +UNIT_TEST_CASE(BoundedProcessQueueUnittest, TestSetPipeline) } // namespace logtail diff --git a/core/unittest/queue/CircularProcessQueueUnittest.cpp b/core/unittest/queue/CircularProcessQueueUnittest.cpp index 7b5ef9d0d3..cd80c823ad 100644 --- a/core/unittest/queue/CircularProcessQueueUnittest.cpp +++ b/core/unittest/queue/CircularProcessQueueUnittest.cpp @@ -15,6 +15,7 @@ #include #include "models/PipelineEventGroup.h" +#include "pipeline/PipelineManager.h" #include "pipeline/queue/CircularProcessQueue.h" #include "pipeline/queue/SenderQueue.h" #include "unittest/Unittest.h" @@ -28,6 +29,7 @@ class CircularProcessQueueUnittest : public testing::Test { void TestPop(); void TestReset(); void TestMetric(); + void TestSetPipeline(); protected: static void SetUpTestCase() { sCtx.SetConfigName("test_config"); } @@ -38,6 +40,7 @@ class CircularProcessQueueUnittest : public testing::Test { mSenderQueue1.reset(new SenderQueue(10, 0, 10, 0, "", sCtx)); mSenderQueue2.reset(new SenderQueue(10, 0, 10, 0, "", sCtx)); mQueue->SetDownStreamQueues(vector{mSenderQueue1.get(), mSenderQueue2.get()}); + mQueue->EnablePop(); } private: @@ -97,9 +100,9 @@ void CircularProcessQueueUnittest::TestPop() { mQueue->Push(GenerateItem(1)); // invalidate pop - mQueue->InvalidatePop(); + mQueue->DisablePop(); APSARA_TEST_FALSE(mQueue->Pop(item)); - mQueue->ValidatePop(); + mQueue->EnablePop(); // downstream queues are not valid to push mSenderQueue1->mValidToPush = false; @@ -149,38 +152,60 @@ void CircularProcessQueueUnittest::TestReset() { void CircularProcessQueueUnittest::TestMetric() { APSARA_TEST_EQUAL(4U, mQueue->mMetricsRecordRef->GetLabels()->size()); - APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_PROJECT, "")); - APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_CONFIG_NAME, "test_config")); - APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_COMPONENT_NAME, "process_queue")); + APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PROJECT, "")); + APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PIPELINE_NAME, "test_config")); + APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_COMPONENT_NAME, METRIC_LABEL_VALUE_COMPONENT_NAME_PROCESS_QUEUE)); APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_QUEUE_TYPE, "circular")); auto item = GenerateItem(2); auto dataSize1 = item->mEventGroup.DataSize(); mQueue->Push(std::move(item)); - APSARA_TEST_EQUAL(1U, mQueue->mInItemsCnt->GetValue()); + APSARA_TEST_EQUAL(1U, mQueue->mInItemsTotal->GetValue()); APSARA_TEST_EQUAL(dataSize1, mQueue->mInItemDataSizeBytes->GetValue()); - APSARA_TEST_EQUAL(2U, mQueue->mQueueSize->GetValue()); + APSARA_TEST_EQUAL(2U, mQueue->mQueueSizeTotal->GetValue()); APSARA_TEST_EQUAL(dataSize1, mQueue->mQueueDataSizeByte->GetValue()); item = GenerateItem(1); auto dataSize2 = item->mEventGroup.DataSize(); mQueue->Push(std::move(item)); - APSARA_TEST_EQUAL(2U, mQueue->mInItemsCnt->GetValue()); + APSARA_TEST_EQUAL(2U, mQueue->mInItemsTotal->GetValue()); APSARA_TEST_EQUAL(dataSize1 + dataSize2, mQueue->mInItemDataSizeBytes->GetValue()); - APSARA_TEST_EQUAL(1U, mQueue->mQueueSize->GetValue()); + APSARA_TEST_EQUAL(1U, mQueue->mQueueSizeTotal->GetValue()); APSARA_TEST_EQUAL(dataSize2, mQueue->mQueueDataSizeByte->GetValue()); - APSARA_TEST_EQUAL(2U, mQueue->mDroppedEventsCnt->GetValue()); + APSARA_TEST_EQUAL(2U, mQueue->mDiscardedEventsTotal->GetValue()); mQueue->Pop(item); - APSARA_TEST_EQUAL(1U, mQueue->mOutItemsCnt->GetValue()); - APSARA_TEST_EQUAL(0U, mQueue->mQueueSize->GetValue()); + APSARA_TEST_EQUAL(1U, mQueue->mOutItemsTotal->GetValue()); + APSARA_TEST_EQUAL(0U, mQueue->mQueueSizeTotal->GetValue()); APSARA_TEST_EQUAL(0U, mQueue->mQueueDataSizeByte->GetValue()); } +void CircularProcessQueueUnittest::TestSetPipeline() { + auto pipeline = make_shared(); + PipelineManager::GetInstance()->mPipelineNameEntityMap["test_config"] = pipeline; + + auto item1 = GenerateItem(1); + auto p1 = item1.get(); + auto pipelineTmp = make_shared(); + item1->mPipeline = pipelineTmp; + + auto item2 = GenerateItem(1); + auto p2 = item2.get(); + + mQueue->Push(std::move(item1)); + mQueue->Push(std::move(item2)); + auto p = PipelineManager::GetInstance()->FindConfigByName("test_config"); + mQueue->SetPipelineForItems(p); + + APSARA_TEST_EQUAL(pipelineTmp, p1->mPipeline); + APSARA_TEST_EQUAL(pipeline, p2->mPipeline); +} + UNIT_TEST_CASE(CircularProcessQueueUnittest, TestPush) UNIT_TEST_CASE(CircularProcessQueueUnittest, TestPop) UNIT_TEST_CASE(CircularProcessQueueUnittest, TestReset) UNIT_TEST_CASE(CircularProcessQueueUnittest, TestMetric) +UNIT_TEST_CASE(CircularProcessQueueUnittest, TestSetPipeline) } // namespace logtail diff --git a/core/unittest/queue/ExactlyOnceQueueManagerUnittest.cpp b/core/unittest/queue/ExactlyOnceQueueManagerUnittest.cpp index 5e929eb477..e2dafe1294 100644 --- a/core/unittest/queue/ExactlyOnceQueueManagerUnittest.cpp +++ b/core/unittest/queue/ExactlyOnceQueueManagerUnittest.cpp @@ -15,6 +15,7 @@ #include #include "models/PipelineEventGroup.h" +#include "pipeline/PipelineManager.h" #include "pipeline/queue/ExactlyOnceQueueManager.h" #include "pipeline/queue/QueueKeyManager.h" #include "pipeline/queue/SLSSenderQueueItem.h" @@ -35,7 +36,7 @@ class ExactlyOnceQueueManagerUnittest : public testing::Test { void TestPushProcessQueue(); void TestIsAllProcessQueueEmpty(); void TestPushSenderQueue(); - void TestGetAllAvailableSenderQueueItems(); + void TestGetAvailableSenderQueueItems(); void TestRemoveSenderItem(); void TestIsAllSenderQueueEmpty(); void OnPipelineUpdate(); @@ -201,7 +202,7 @@ void ExactlyOnceQueueManagerUnittest::TestPushSenderQueue() { APSARA_TEST_EQUAL(2, sManager->PushSenderQueue(1, GenerateSenderItem())); } -void ExactlyOnceQueueManagerUnittest::TestGetAllAvailableSenderQueueItems() { +void ExactlyOnceQueueManagerUnittest::TestGetAvailableSenderQueueItems() { vector checkpoints1; for (size_t i = 0; i < 2; ++i) { auto cpt = make_shared(); @@ -231,20 +232,21 @@ void ExactlyOnceQueueManagerUnittest::TestGetAllAvailableSenderQueueItems() { { // no limits vector items; - sManager->GetAllAvailableSenderQueueItems(items, false); + sManager->GetAvailableSenderQueueItems(items, -1); APSARA_TEST_EQUAL(4U, items.size()); for (auto& item : items) { - item->mStatus = SendingStatus::IDLE; + item->mStatus.Set(SendingStatus::IDLE); } } auto& regionConcurrencyLimiter = sManager->mSenderQueues.at(0).mConcurrencyLimiters[0]; { // with limits, limited by concurrency limiter - regionConcurrencyLimiter->SetLimit(3); + regionConcurrencyLimiter.first->SetCurrentLimit(3); + regionConcurrencyLimiter.first->SetInSendingCount(0); vector items; - sManager->GetAllAvailableSenderQueueItems(items); + sManager->GetAvailableSenderQueueItems(items, 80); APSARA_TEST_EQUAL(3U, items.size()); - APSARA_TEST_EQUAL(0, regionConcurrencyLimiter->GetLimit()); + APSARA_TEST_EQUAL(3, regionConcurrencyLimiter.first->GetInSendingCount()); } } @@ -278,16 +280,66 @@ void ExactlyOnceQueueManagerUnittest::TestIsAllSenderQueueEmpty() { void ExactlyOnceQueueManagerUnittest::OnPipelineUpdate() { PipelineContext ctx; ctx.SetConfigName("test_config"); - sManager->CreateOrUpdateQueue(0, 0, ctx, sCheckpoints); sManager->CreateOrUpdateQueue(1, 0, ctx, sCheckpoints); + sManager->CreateOrUpdateQueue(2, 0, ctx, sCheckpoints); - sManager->InvalidatePopProcessQueue("test_config"); - APSARA_TEST_FALSE(sManager->mProcessQueues[0]->mValidToPop); + auto pipeline1 = make_shared(); + PipelineManager::GetInstance()->mPipelineNameEntityMap["test_config"] = pipeline1; + + auto item1 = GenerateProcessItem(); + auto p1 = item1.get(); + sManager->PushProcessQueue(1, std::move(item1)); + + auto item2 = GenerateProcessItem(); + auto p2 = item2.get(); + sManager->PushProcessQueue(2, std::move(item2)); + + sManager->DisablePopProcessQueue("test_config", false); + APSARA_TEST_FALSE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[1]->mValidToPop); + APSARA_TEST_FALSE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[2]->mValidToPop); + APSARA_TEST_EQUAL(pipeline1, p1->mPipeline); + APSARA_TEST_EQUAL(pipeline1, p2->mPipeline); + + auto item3 = GenerateProcessItem(); + auto p3 = item3.get(); + sManager->PushProcessQueue(1, std::move(item3)); + + auto item4 = GenerateProcessItem(); + auto p4 = item4.get(); + sManager->PushProcessQueue(2, std::move(item4)); + + auto pipeline2 = make_shared(); + PipelineManager::GetInstance()->mPipelineNameEntityMap["test_config"] = pipeline2; + + sManager->DisablePopProcessQueue("test_config", false); APSARA_TEST_FALSE(sManager->mProcessQueues[1]->mValidToPop); + APSARA_TEST_FALSE(sManager->mProcessQueues[2]->mValidToPop); + APSARA_TEST_EQUAL(pipeline1, p1->mPipeline); + APSARA_TEST_EQUAL(pipeline1, p2->mPipeline); + APSARA_TEST_EQUAL(pipeline2, p3->mPipeline); + APSARA_TEST_EQUAL(pipeline2, p4->mPipeline); + + auto item5 = GenerateProcessItem(); + auto p5 = item5.get(); + sManager->PushProcessQueue(1, std::move(item5)); - sManager->ValidatePopProcessQueue("test_config"); - APSARA_TEST_TRUE(sManager->mProcessQueues[0]->mValidToPop); + auto item6 = GenerateProcessItem(); + auto p6 = item6.get(); + sManager->PushProcessQueue(2, std::move(item6)); + + sManager->DisablePopProcessQueue("test_config", true); + APSARA_TEST_FALSE(sManager->mProcessQueues[1]->mValidToPop); + APSARA_TEST_FALSE(sManager->mProcessQueues[2]->mValidToPop); + APSARA_TEST_EQUAL(pipeline1, p1->mPipeline); + APSARA_TEST_EQUAL(pipeline1, p2->mPipeline); + APSARA_TEST_EQUAL(pipeline2, p3->mPipeline); + APSARA_TEST_EQUAL(pipeline2, p4->mPipeline); + APSARA_TEST_EQUAL(nullptr, p5->mPipeline); + APSARA_TEST_EQUAL(nullptr, p6->mPipeline); + + sManager->EnablePopProcessQueue("test_config"); APSARA_TEST_TRUE(sManager->mProcessQueues[1]->mValidToPop); + APSARA_TEST_TRUE(sManager->mProcessQueues[2]->mValidToPop); } unique_ptr ExactlyOnceQueueManagerUnittest::GenerateProcessItem() { @@ -306,7 +358,7 @@ UNIT_TEST_CASE(ExactlyOnceQueueManagerUnittest, TestDeleteQueue) UNIT_TEST_CASE(ExactlyOnceQueueManagerUnittest, TestPushProcessQueue) UNIT_TEST_CASE(ExactlyOnceQueueManagerUnittest, TestIsAllProcessQueueEmpty) UNIT_TEST_CASE(ExactlyOnceQueueManagerUnittest, TestPushSenderQueue) -UNIT_TEST_CASE(ExactlyOnceQueueManagerUnittest, TestGetAllAvailableSenderQueueItems) +UNIT_TEST_CASE(ExactlyOnceQueueManagerUnittest, TestGetAvailableSenderQueueItems) UNIT_TEST_CASE(ExactlyOnceQueueManagerUnittest, TestRemoveSenderItem) UNIT_TEST_CASE(ExactlyOnceQueueManagerUnittest, TestIsAllSenderQueueEmpty) UNIT_TEST_CASE(ExactlyOnceQueueManagerUnittest, OnPipelineUpdate) diff --git a/core/unittest/queue/ExactlyOnceSenderQueueUnittest.cpp b/core/unittest/queue/ExactlyOnceSenderQueueUnittest.cpp index 16dba46c8f..a065f0c46f 100644 --- a/core/unittest/queue/ExactlyOnceSenderQueueUnittest.cpp +++ b/core/unittest/queue/ExactlyOnceSenderQueueUnittest.cpp @@ -26,7 +26,7 @@ class ExactlyOnceSenderQueueUnittest : public testing::Test { public: void TestPush(); void TestRemove(); - void TestGetAllAvailableItems(); + void TestGetAvailableItems(); void TestReset(); protected: @@ -79,9 +79,10 @@ void ExactlyOnceSenderQueueUnittest::TestPush() { APSARA_TEST_NOT_EQUAL(nullptr, mQueue->mQueue[1]); APSARA_TEST_TRUE(mQueue->mRateLimiter.has_value()); APSARA_TEST_EQUAL(100U, mQueue->mRateLimiter->mMaxSendBytesPerSecond); - APSARA_TEST_EQUAL(2U, mQueue->mConcurrencyLimiters.size()); - APSARA_TEST_EQUAL(FlusherSLS::GetRegionConcurrencyLimiter("region"), mQueue->mConcurrencyLimiters[0]); - APSARA_TEST_EQUAL(FlusherSLS::GetProjectConcurrencyLimiter("project"), mQueue->mConcurrencyLimiters[1]); + APSARA_TEST_EQUAL(3U, mQueue->mConcurrencyLimiters.size()); + + //APSARA_TEST_EQUAL(FlusherSLS::GetRegionConcurrencyLimiter("region"), mQueue->mConcurrencyLimiters[0]); + //APSARA_TEST_EQUAL(FlusherSLS::GetProjectConcurrencyLimiter("project"), mQueue->mConcurrencyLimiters[1]); // reach high water mark APSARA_TEST_TRUE(mQueue->Push(GenerateItem())); @@ -120,53 +121,56 @@ void ExactlyOnceSenderQueueUnittest::TestRemove() { APSARA_TEST_TRUE(sFeedback.HasFeedback(0)); } -void ExactlyOnceSenderQueueUnittest::TestGetAllAvailableItems() { +void ExactlyOnceSenderQueueUnittest::TestGetAvailableItems() { for (size_t i = 0; i <= sCheckpoints.size(); ++i) { mQueue->Push(GenerateItem()); } { // no limits vector items; - mQueue->GetAllAvailableItems(items, false); + mQueue->GetAvailableItems(items, -1); APSARA_TEST_EQUAL(2U, items.size()); for (auto& item : items) { - item->mStatus = SendingStatus::IDLE; + item->mStatus.Set(SendingStatus::IDLE); } } { // with limits, limited by concurrency limiter mQueue->mRateLimiter->mMaxSendBytesPerSecond = 100; - mQueue->mConcurrencyLimiters[0]->SetLimit(1); + mQueue->mConcurrencyLimiters[0].first->SetCurrentLimit(1); + mQueue->mConcurrencyLimiters[0].first->SetInSendingCount(0); vector items; - mQueue->GetAllAvailableItems(items); + mQueue->GetAvailableItems(items, 80); APSARA_TEST_EQUAL(1U, items.size()); APSARA_TEST_EQUAL(sDataSize, mQueue->mRateLimiter->mLastSecondTotalBytes); - APSARA_TEST_EQUAL(0, mQueue->mConcurrencyLimiters[0]->GetLimit()); + APSARA_TEST_EQUAL(1, mQueue->mConcurrencyLimiters[0].first->GetInSendingCount()); for (auto& item : items) { - item->mStatus = SendingStatus::IDLE; + item->mStatus.Set(SendingStatus::IDLE); } mQueue->mRateLimiter->mLastSecondTotalBytes = 0; } { // with limits, limited by rate limiter mQueue->mRateLimiter->mMaxSendBytesPerSecond = 5; - mQueue->mConcurrencyLimiters[0]->SetLimit(3); + mQueue->mConcurrencyLimiters[0].first->SetCurrentLimit(3); + mQueue->mConcurrencyLimiters[0].first->SetInSendingCount(0); vector items; - mQueue->GetAllAvailableItems(items); + mQueue->GetAvailableItems(items, 80); APSARA_TEST_EQUAL(1U, items.size()); APSARA_TEST_EQUAL(sDataSize, mQueue->mRateLimiter->mLastSecondTotalBytes); - APSARA_TEST_EQUAL(2, mQueue->mConcurrencyLimiters[0]->GetLimit()); + APSARA_TEST_EQUAL(1, mQueue->mConcurrencyLimiters[0].first->GetInSendingCount()); mQueue->mRateLimiter->mLastSecondTotalBytes = 0; } { // with limits, does not work mQueue->mRateLimiter->mMaxSendBytesPerSecond = 100; - mQueue->mConcurrencyLimiters[0]->SetLimit(3); + mQueue->mConcurrencyLimiters[0].first->SetCurrentLimit(3); + mQueue->mConcurrencyLimiters[0].first->SetInSendingCount(0); vector items; - mQueue->GetAllAvailableItems(items); + mQueue->GetAvailableItems(items, 80); APSARA_TEST_EQUAL(1U, items.size()); APSARA_TEST_EQUAL(sDataSize, mQueue->mRateLimiter->mLastSecondTotalBytes); - APSARA_TEST_EQUAL(2, mQueue->mConcurrencyLimiters[0]->GetLimit()); + APSARA_TEST_EQUAL(1, mQueue->mConcurrencyLimiters[0].first->GetInSendingCount()); } } @@ -216,7 +220,7 @@ unique_ptr ExactlyOnceSenderQueueUnittest::GenerateItem(int32_t UNIT_TEST_CASE(ExactlyOnceSenderQueueUnittest, TestPush) UNIT_TEST_CASE(ExactlyOnceSenderQueueUnittest, TestRemove) -UNIT_TEST_CASE(ExactlyOnceSenderQueueUnittest, TestGetAllAvailableItems) +UNIT_TEST_CASE(ExactlyOnceSenderQueueUnittest, TestGetAvailableItems) UNIT_TEST_CASE(ExactlyOnceSenderQueueUnittest, TestReset) } // namespace logtail diff --git a/core/unittest/queue/ProcessQueueManagerUnittest.cpp b/core/unittest/queue/ProcessQueueManagerUnittest.cpp index d87bf88e96..21226fe19b 100644 --- a/core/unittest/queue/ProcessQueueManagerUnittest.cpp +++ b/core/unittest/queue/ProcessQueueManagerUnittest.cpp @@ -15,6 +15,7 @@ #include #include "models/PipelineEventGroup.h" +#include "pipeline/PipelineManager.h" #include "pipeline/queue/ExactlyOnceQueueManager.h" #include "pipeline/queue/ProcessQueueManager.h" #include "pipeline/queue/QueueKeyManager.h" @@ -267,17 +268,22 @@ void ProcessQueueManagerUnittest::TestPopItem() { ctx.SetConfigName("test_config_1"); QueueKey key1 = QueueKeyManager::GetInstance()->GetKey("test_config_1"); sProcessQueueManager->CreateOrUpdateBoundedQueue(key1, 0, ctx); + sProcessQueueManager->EnablePop("test_config_1"); ctx.SetConfigName("test_config_2"); QueueKey key2 = QueueKeyManager::GetInstance()->GetKey("test_config_2"); sProcessQueueManager->CreateOrUpdateBoundedQueue(key2, 1, ctx); + sProcessQueueManager->EnablePop("test_config_2"); ctx.SetConfigName("test_config_3"); QueueKey key3 = QueueKeyManager::GetInstance()->GetKey("test_config_3"); sProcessQueueManager->CreateOrUpdateBoundedQueue(key3, 1, ctx); + sProcessQueueManager->EnablePop("test_config_3"); ctx.SetConfigName("test_config_4"); QueueKey key4 = QueueKeyManager::GetInstance()->GetKey("test_config_4"); sProcessQueueManager->CreateOrUpdateBoundedQueue(key4, 1, ctx); + sProcessQueueManager->EnablePop("test_config_4"); ctx.SetConfigName("test_config_5"); ExactlyOnceQueueManager::GetInstance()->CreateOrUpdateQueue(5, 0, ctx, vector(5)); + ExactlyOnceQueueManager::GetInstance()->EnablePopProcessQueue("test_config_5"); sProcessQueueManager->PushQueue(key2, GenerateItem()); sProcessQueueManager->PushQueue(key3, GenerateItem()); @@ -318,10 +324,26 @@ void ProcessQueueManagerUnittest::TestPopItem() { } void ProcessQueueManagerUnittest::TestIsAllQueueEmpty() { - sProcessQueueManager->CreateOrUpdateBoundedQueue(0, 0, sCtx); - sProcessQueueManager->CreateOrUpdateBoundedQueue(1, 1, sCtx); - ExactlyOnceQueueManager::GetInstance()->CreateOrUpdateQueue(2, 0, sCtx, vector(5)); - ExactlyOnceQueueManager::GetInstance()->CreateOrUpdateQueue(3, 2, sCtx, vector(5)); + PipelineContext ctx; + ctx.SetConfigName("test_config_1"); + QueueKey key1 = QueueKeyManager::GetInstance()->GetKey("test_config_1"); + sProcessQueueManager->CreateOrUpdateBoundedQueue(key1, 0, ctx); + sProcessQueueManager->EnablePop("test_config_1"); + + ctx.SetConfigName("test_config_2"); + QueueKey key2 = QueueKeyManager::GetInstance()->GetKey("test_config_2"); + sProcessQueueManager->CreateOrUpdateBoundedQueue(key2, 1, ctx); + sProcessQueueManager->EnablePop("test_config_2"); + + ctx.SetConfigName("test_config_3"); + QueueKey key3 = QueueKeyManager::GetInstance()->GetKey("test_config_3"); + ExactlyOnceQueueManager::GetInstance()->CreateOrUpdateQueue(key3, 0, ctx, vector(5)); + ExactlyOnceQueueManager::GetInstance()->EnablePopProcessQueue("test_config_3"); + + ctx.SetConfigName("test_config_4"); + QueueKey key4 = QueueKeyManager::GetInstance()->GetKey("test_config_4"); + ExactlyOnceQueueManager::GetInstance()->CreateOrUpdateQueue(key4, 2, ctx, vector(5)); + ExactlyOnceQueueManager::GetInstance()->EnablePopProcessQueue("test_config_4"); APSARA_TEST_TRUE(sProcessQueueManager->IsAllQueueEmpty()); // non empty normal process queue @@ -334,7 +356,7 @@ void ProcessQueueManagerUnittest::TestIsAllQueueEmpty() { APSARA_TEST_TRUE(sProcessQueueManager->IsAllQueueEmpty()); // non empty exactly once process queue - sProcessQueueManager->PushQueue(2, GenerateItem()); + sProcessQueueManager->PushQueue(key3, GenerateItem()); APSARA_TEST_FALSE(sProcessQueueManager->IsAllQueueEmpty()); sProcessQueueManager->PopItem(0, item, configName); @@ -342,27 +364,109 @@ void ProcessQueueManagerUnittest::TestIsAllQueueEmpty() { } void ProcessQueueManagerUnittest::OnPipelineUpdate() { - PipelineContext ctx; - ctx.SetConfigName("test_config_1"); + PipelineContext ctx1, ctx2; + ctx1.SetConfigName("test_config_1"); + ctx2.SetConfigName("test_config_2"); QueueKey key = QueueKeyManager::GetInstance()->GetKey("test_config_1"); - sProcessQueueManager->CreateOrUpdateBoundedQueue(key, 0, ctx); - ctx.SetConfigName("test_config_2"); - ExactlyOnceQueueManager::GetInstance()->CreateOrUpdateQueue(1, 0, ctx, vector(5)); - ExactlyOnceQueueManager::GetInstance()->CreateOrUpdateQueue(2, 0, ctx, vector(5)); - - sProcessQueueManager->InvalidatePop("test_config_1"); - APSARA_TEST_FALSE((*sProcessQueueManager->mQueues[key].first)->mValidToPop); - - sProcessQueueManager->InvalidatePop("test_config_2"); - APSARA_TEST_FALSE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[1]->mValidToPop); - APSARA_TEST_FALSE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[2]->mValidToPop); - - sProcessQueueManager->ValidatePop("test_config_1"); - APSARA_TEST_TRUE((*sProcessQueueManager->mQueues[key].first)->mValidToPop); - - sProcessQueueManager->ValidatePop("test_config_2"); - APSARA_TEST_TRUE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[1]->mValidToPop); - APSARA_TEST_TRUE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[2]->mValidToPop); + sProcessQueueManager->CreateOrUpdateBoundedQueue(key, 0, ctx1); + ExactlyOnceQueueManager::GetInstance()->CreateOrUpdateQueue(1, 0, ctx2, vector(5)); + ExactlyOnceQueueManager::GetInstance()->CreateOrUpdateQueue(2, 0, ctx2, vector(5)); + + auto pipeline1 = make_shared(); + auto pipeline2 = make_shared(); + PipelineManager::GetInstance()->mPipelineNameEntityMap["test_config_1"] = pipeline1; + PipelineManager::GetInstance()->mPipelineNameEntityMap["test_config_2"] = pipeline2; + + { + auto item1 = GenerateItem(); + auto p1 = item1.get(); + sProcessQueueManager->PushQueue(key, std::move(item1)); + + sProcessQueueManager->DisablePop("test_config_1", false); + APSARA_TEST_FALSE((*sProcessQueueManager->mQueues[key].first)->mValidToPop); + APSARA_TEST_EQUAL(pipeline1, p1->mPipeline); + + auto item2 = GenerateItem(); + auto p2 = item2.get(); + sProcessQueueManager->PushQueue(key, std::move(item2)); + + auto pipeline3 = make_shared(); + PipelineManager::GetInstance()->mPipelineNameEntityMap["test_config_1"] = pipeline3; + + sProcessQueueManager->DisablePop("test_config_1", false); + APSARA_TEST_FALSE((*sProcessQueueManager->mQueues[key].first)->mValidToPop); + APSARA_TEST_EQUAL(pipeline1, p1->mPipeline); + APSARA_TEST_EQUAL(pipeline3, p2->mPipeline); + + auto item3 = GenerateItem(); + auto p3 = item3.get(); + sProcessQueueManager->PushQueue(key, std::move(item3)); + + sProcessQueueManager->DisablePop("test_config_1", true); + APSARA_TEST_FALSE((*sProcessQueueManager->mQueues[key].first)->mValidToPop); + APSARA_TEST_EQUAL(pipeline1, p1->mPipeline); + APSARA_TEST_EQUAL(pipeline3, p2->mPipeline); + APSARA_TEST_EQUAL(nullptr, p3->mPipeline); + + sProcessQueueManager->EnablePop("test_config_1"); + APSARA_TEST_TRUE((*sProcessQueueManager->mQueues[key].first)->mValidToPop); + } + { + auto item1 = GenerateItem(); + auto p1 = item1.get(); + sProcessQueueManager->PushQueue(1, std::move(item1)); + + auto item2 = GenerateItem(); + auto p2 = item2.get(); + sProcessQueueManager->PushQueue(2, std::move(item2)); + + sProcessQueueManager->DisablePop("test_config_2", false); + APSARA_TEST_FALSE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[1]->mValidToPop); + APSARA_TEST_FALSE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[2]->mValidToPop); + APSARA_TEST_EQUAL(pipeline2, p1->mPipeline); + APSARA_TEST_EQUAL(pipeline2, p2->mPipeline); + + auto item3 = GenerateItem(); + auto p3 = item3.get(); + sProcessQueueManager->PushQueue(1, std::move(item3)); + + auto item4 = GenerateItem(); + auto p4 = item4.get(); + sProcessQueueManager->PushQueue(2, std::move(item4)); + + auto pipeline3 = make_shared(); + PipelineManager::GetInstance()->mPipelineNameEntityMap["test_config_2"] = pipeline3; + + sProcessQueueManager->DisablePop("test_config_2", false); + APSARA_TEST_FALSE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[1]->mValidToPop); + APSARA_TEST_FALSE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[2]->mValidToPop); + APSARA_TEST_EQUAL(pipeline2, p1->mPipeline); + APSARA_TEST_EQUAL(pipeline2, p2->mPipeline); + APSARA_TEST_EQUAL(pipeline3, p3->mPipeline); + APSARA_TEST_EQUAL(pipeline3, p4->mPipeline); + + auto item5 = GenerateItem(); + auto p5 = item5.get(); + sProcessQueueManager->PushQueue(1, std::move(item5)); + + auto item6 = GenerateItem(); + auto p6 = item6.get(); + sProcessQueueManager->PushQueue(2, std::move(item6)); + + sProcessQueueManager->DisablePop("test_config_2", true); + APSARA_TEST_FALSE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[1]->mValidToPop); + APSARA_TEST_FALSE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[2]->mValidToPop); + APSARA_TEST_EQUAL(pipeline2, p1->mPipeline); + APSARA_TEST_EQUAL(pipeline2, p2->mPipeline); + APSARA_TEST_EQUAL(pipeline3, p3->mPipeline); + APSARA_TEST_EQUAL(pipeline3, p4->mPipeline); + APSARA_TEST_EQUAL(nullptr, p5->mPipeline); + APSARA_TEST_EQUAL(nullptr, p6->mPipeline); + + sProcessQueueManager->EnablePop("test_config_2"); + APSARA_TEST_TRUE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[1]->mValidToPop); + APSARA_TEST_TRUE(ExactlyOnceQueueManager::GetInstance()->mProcessQueues[2]->mValidToPop); + } } UNIT_TEST_CASE(ProcessQueueManagerUnittest, TestUpdateSameTypeQueue) diff --git a/core/unittest/queue/SenderQueueManagerUnittest.cpp b/core/unittest/queue/SenderQueueManagerUnittest.cpp index 25b8d9ee74..e50cff7cb1 100644 --- a/core/unittest/queue/SenderQueueManagerUnittest.cpp +++ b/core/unittest/queue/SenderQueueManagerUnittest.cpp @@ -32,14 +32,14 @@ class SenderQueueManagerUnittest : public testing::Test { void TestDeleteQueue(); void TestGetQueue(); void TestPushQueue(); - void TestGetAllAvailableItems(); + void TestGetAvailableItems(); void TestRemoveItem(); void TestIsAllQueueEmpty(); protected: static void SetUpTestCase() { sManager = SenderQueueManager::GetInstance(); - sConcurrencyLimiter = make_shared(); + sConcurrencyLimiter = make_shared(80); sManager->mQueueParam.mCapacity = 2; sManager->mQueueParam.mLowWatermark = 1; sManager->mQueueParam.mHighWatermark = 3; @@ -62,7 +62,7 @@ class SenderQueueManagerUnittest : public testing::Test { sManager->Clear(); ExactlyOnceQueueManager::GetInstance()->Clear(); QueueKeyManager::GetInstance()->Clear(); - sConcurrencyLimiter->Reset(); + sConcurrencyLimiter = make_shared(80); } private: @@ -93,27 +93,27 @@ void SenderQueueManagerUnittest::TestCreateQueue() { // new queue uint32_t maxRate = 100U; APSARA_TEST_TRUE(sManager->CreateQueue( - 0, sFlusherId, sCtx, vector>{sConcurrencyLimiter}, maxRate)); + 0, sFlusherId, sCtx, {{"region", sConcurrencyLimiter}}, maxRate)); APSARA_TEST_EQUAL(1U, sManager->mQueues.size()); auto& queue = sManager->mQueues.at(0); APSARA_TEST_EQUAL(sManager->mQueueParam.GetCapacity(), queue.mCapacity); APSARA_TEST_EQUAL(sManager->mQueueParam.GetLowWatermark(), queue.mLowWatermark); APSARA_TEST_EQUAL(sManager->mQueueParam.GetHighWatermark(), queue.mHighWatermark); APSARA_TEST_EQUAL(1U, queue.mConcurrencyLimiters.size()); - APSARA_TEST_EQUAL(sConcurrencyLimiter, queue.mConcurrencyLimiters[0]); + //APSARA_TEST_EQUAL(sConcurrencyLimiter, queue.mConcurrencyLimiters[0]); APSARA_TEST_TRUE(queue.mRateLimiter.has_value()); APSARA_TEST_EQUAL(maxRate, queue.mRateLimiter->mMaxSendBytesPerSecond); } { // resued queue - shared_ptr newLimiter = make_shared(); + shared_ptr newLimiter = make_shared(80); uint32_t maxRate = 10U; APSARA_TEST_TRUE( - sManager->CreateQueue(0, sFlusherId, sCtx, vector>{newLimiter}, maxRate)); + sManager->CreateQueue(0, sFlusherId, sCtx, {{"region", newLimiter}}, maxRate)); APSARA_TEST_EQUAL(1U, sManager->mQueues.size()); auto& queue = sManager->mQueues.at(0); APSARA_TEST_EQUAL(1U, queue.mConcurrencyLimiters.size()); - APSARA_TEST_EQUAL(newLimiter, queue.mConcurrencyLimiters[0]); + //APSARA_TEST_EQUAL(newLimiter, queue.mConcurrencyLimiters[0]); APSARA_TEST_TRUE(queue.mRateLimiter.has_value()); APSARA_TEST_EQUAL(maxRate, queue.mRateLimiter->mMaxSendBytesPerSecond); } @@ -128,9 +128,9 @@ void SenderQueueManagerUnittest::TestDeleteQueue() { QueueKey key1 = QueueKeyManager::GetInstance()->GetKey("name_1"); QueueKey key2 = QueueKeyManager::GetInstance()->GetKey("name_2"); sManager->CreateQueue( - key1, sFlusherId, sCtx, vector>{sConcurrencyLimiter}, sMaxRate); + key1, sFlusherId, sCtx, {{"region", sConcurrencyLimiter}}, sMaxRate); sManager->CreateQueue( - key2, sFlusherId, sCtx, vector>{sConcurrencyLimiter}, sMaxRate); + key2, sFlusherId, sCtx, {{"region", sConcurrencyLimiter}}, sMaxRate); sManager->PushQueue(key2, GenerateItem()); // queue exists and not marked deleted @@ -157,12 +157,12 @@ void SenderQueueManagerUnittest::TestGetQueue() { APSARA_TEST_EQUAL(nullptr, sManager->GetQueue(0)); // queue existed - sManager->CreateQueue(0, sFlusherId, sCtx, vector>{sConcurrencyLimiter}, sMaxRate); + sManager->CreateQueue(0, sFlusherId, sCtx, {{"region", sConcurrencyLimiter}}, sMaxRate); APSARA_TEST_NOT_EQUAL(nullptr, sManager->GetQueue(0)); } void SenderQueueManagerUnittest::TestPushQueue() { - sManager->CreateQueue(0, sFlusherId, sCtx, vector>{sConcurrencyLimiter}, sMaxRate); + sManager->CreateQueue(0, sFlusherId, sCtx, {{"region", sConcurrencyLimiter}}, sMaxRate); ExactlyOnceQueueManager::GetInstance()->CreateOrUpdateQueue(1, 0, sCtx, sCheckpoints); // queue belongs to normal queue @@ -182,13 +182,13 @@ void SenderQueueManagerUnittest::TestPushQueue() { APSARA_TEST_EQUAL(0, sManager->PushQueue(1, GenerateItem(true))); } -void SenderQueueManagerUnittest::TestGetAllAvailableItems() { +void SenderQueueManagerUnittest::TestGetAvailableItems() { // prepare nomal queue sManager->CreateQueue( 0, sFlusherId, sCtx, - vector>{FlusherSLS::GetRegionConcurrencyLimiter(mFlusher.mRegion)}, + {{"region", FlusherSLS::GetRegionConcurrencyLimiter(mFlusher.mRegion)}}, sMaxRate); for (size_t i = 0; i <= sManager->mQueueParam.GetCapacity(); ++i) { sManager->PushQueue(0, GenerateItem()); @@ -211,25 +211,26 @@ void SenderQueueManagerUnittest::TestGetAllAvailableItems() { { // no limits vector items; - sManager->GetAllAvailableItems(items, false); + sManager->GetAvailableItems(items, -1); APSARA_TEST_EQUAL(4U, items.size()); for (auto& item : items) { - item->mStatus = SendingStatus::IDLE; + item->mStatus.Set(SendingStatus::IDLE); } } auto regionConcurrencyLimiter = FlusherSLS::GetRegionConcurrencyLimiter(mFlusher.mRegion); { // with limits, limited by concurrency limiter - regionConcurrencyLimiter->SetLimit(3); + regionConcurrencyLimiter->SetCurrentLimit(3); + regionConcurrencyLimiter->SetInSendingCount(2); vector items; - sManager->GetAllAvailableItems(items); - APSARA_TEST_EQUAL(3U, items.size()); - APSARA_TEST_EQUAL(0, regionConcurrencyLimiter->GetLimit()); + sManager->GetAvailableItems(items, 80); + APSARA_TEST_EQUAL(1U, items.size()); + APSARA_TEST_EQUAL(3U, regionConcurrencyLimiter->GetInSendingCount()); } } void SenderQueueManagerUnittest::TestRemoveItem() { - sManager->CreateQueue(0, sFlusherId, sCtx, vector>{sConcurrencyLimiter}, sMaxRate); + sManager->CreateQueue(0, sFlusherId, sCtx, {{"region", sConcurrencyLimiter}}, sMaxRate); ExactlyOnceQueueManager::GetInstance()->CreateOrUpdateQueue(1, 0, sCtx, sCheckpoints); { // normal queue @@ -256,8 +257,8 @@ void SenderQueueManagerUnittest::TestRemoveItem() { } void SenderQueueManagerUnittest::TestIsAllQueueEmpty() { - sManager->CreateQueue(0, sFlusherId, sCtx, vector>{sConcurrencyLimiter}, sMaxRate); - sManager->CreateQueue(1, sFlusherId, sCtx, vector>{sConcurrencyLimiter}, sMaxRate); + sManager->CreateQueue(0, sFlusherId, sCtx, {{"region", sConcurrencyLimiter}}, sMaxRate); + sManager->CreateQueue(1, sFlusherId, sCtx, {{"region", sConcurrencyLimiter}}, sMaxRate); ExactlyOnceQueueManager::GetInstance()->CreateOrUpdateQueue(2, 0, sCtx, sCheckpoints); ExactlyOnceQueueManager::GetInstance()->CreateOrUpdateQueue(3, 2, sCtx, sCheckpoints); APSARA_TEST_TRUE(sManager->IsAllQueueEmpty()); @@ -296,7 +297,7 @@ UNIT_TEST_CASE(SenderQueueManagerUnittest, TestCreateQueue) UNIT_TEST_CASE(SenderQueueManagerUnittest, TestDeleteQueue) UNIT_TEST_CASE(SenderQueueManagerUnittest, TestGetQueue) UNIT_TEST_CASE(SenderQueueManagerUnittest, TestPushQueue) -UNIT_TEST_CASE(SenderQueueManagerUnittest, TestGetAllAvailableItems) +UNIT_TEST_CASE(SenderQueueManagerUnittest, TestGetAvailableItems) UNIT_TEST_CASE(SenderQueueManagerUnittest, TestRemoveItem) UNIT_TEST_CASE(SenderQueueManagerUnittest, TestIsAllQueueEmpty) diff --git a/core/unittest/queue/SenderQueueUnittest.cpp b/core/unittest/queue/SenderQueueUnittest.cpp index 92a4dffea4..62e0045c7e 100644 --- a/core/unittest/queue/SenderQueueUnittest.cpp +++ b/core/unittest/queue/SenderQueueUnittest.cpp @@ -24,25 +24,25 @@ class SenderQueueUnittest : public testing::Test { public: void TestPush(); void TestRemove(); - void TestGetAllAvailableItems(); + void TestGetAvailableItems(); void TestMetric(); protected: static void SetUpTestCase() { - sConcurrencyLimiter = make_shared(); + sConcurrencyLimiter = make_shared(80); sCtx.SetConfigName("test_config"); } void SetUp() override { mQueue.reset(new SenderQueue(sCap, sLowWatermark, sHighWatermark, sKey, sFlusherId, sCtx)); - mQueue->SetConcurrencyLimiters(vector>{sConcurrencyLimiter}); + mQueue->SetConcurrencyLimiters({{"region", sConcurrencyLimiter}}); mQueue->mRateLimiter = RateLimiter(100); mQueue->SetFeedback(&sFeedback); } void TearDown() override { sFeedback.Clear(); - sConcurrencyLimiter->Reset(); + sConcurrencyLimiter = make_shared(80); } private: @@ -117,7 +117,7 @@ void SenderQueueUnittest::TestRemove() { APSARA_TEST_FALSE(mQueue->Remove(items[0])); } -void SenderQueueUnittest::TestGetAllAvailableItems() { +void SenderQueueUnittest::TestGetAvailableItems() { vector items; for (size_t i = 0; i <= sCap; ++i) { auto item = GenerateItem(); @@ -127,65 +127,68 @@ void SenderQueueUnittest::TestGetAllAvailableItems() { { // no limits vector items; - mQueue->GetAllAvailableItems(items, false); + mQueue->GetAvailableItems(items, -1); APSARA_TEST_EQUAL(2U, items.size()); for (auto& item : items) { - item->mStatus = SendingStatus::IDLE; + item->mStatus.Set(SendingStatus::IDLE); } } { // with limits, limited by concurrency limiter mQueue->mRateLimiter->mMaxSendBytesPerSecond = 100; - sConcurrencyLimiter->SetLimit(1); + sConcurrencyLimiter->SetCurrentLimit(1); + sConcurrencyLimiter->SetInSendingCount(0); vector items; - mQueue->GetAllAvailableItems(items); + mQueue->GetAvailableItems(items, 80); APSARA_TEST_EQUAL(1U, items.size()); APSARA_TEST_EQUAL(sDataSize, mQueue->mRateLimiter->mLastSecondTotalBytes); - APSARA_TEST_EQUAL(0, sConcurrencyLimiter->GetLimit()); + APSARA_TEST_EQUAL(1, sConcurrencyLimiter->GetInSendingCount()); for (auto& item : items) { - item->mStatus = SendingStatus::IDLE; + item->mStatus.Set(SendingStatus::IDLE); } mQueue->mRateLimiter->mLastSecondTotalBytes = 0; } { // with limits, limited by rate limiter mQueue->mRateLimiter->mMaxSendBytesPerSecond = 5; - sConcurrencyLimiter->SetLimit(3); + sConcurrencyLimiter->SetCurrentLimit(3); + sConcurrencyLimiter->SetInSendingCount(0); vector items; - mQueue->GetAllAvailableItems(items); + mQueue->GetAvailableItems(items, 80); APSARA_TEST_EQUAL(1U, items.size()); APSARA_TEST_EQUAL(sDataSize, mQueue->mRateLimiter->mLastSecondTotalBytes); - APSARA_TEST_EQUAL(2, sConcurrencyLimiter->GetLimit()); + APSARA_TEST_EQUAL(1, sConcurrencyLimiter->GetInSendingCount()); mQueue->mRateLimiter->mLastSecondTotalBytes = 0; } { // with limits, does not work mQueue->mRateLimiter->mMaxSendBytesPerSecond = 100; - sConcurrencyLimiter->SetLimit(3); + sConcurrencyLimiter->SetCurrentLimit(3); + sConcurrencyLimiter->SetInSendingCount(0); vector items; - mQueue->GetAllAvailableItems(items); + mQueue->GetAvailableItems(items, 80); APSARA_TEST_EQUAL(1U, items.size()); APSARA_TEST_EQUAL(sDataSize, mQueue->mRateLimiter->mLastSecondTotalBytes); - APSARA_TEST_EQUAL(2, sConcurrencyLimiter->GetLimit()); + APSARA_TEST_EQUAL(1, sConcurrencyLimiter->GetInSendingCount()); } } void SenderQueueUnittest::TestMetric() { APSARA_TEST_EQUAL(5U, mQueue->mMetricsRecordRef->GetLabels()->size()); - APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_PROJECT, "")); - APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_CONFIG_NAME, "test_config")); - APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_COMPONENT_NAME, "sender_queue")); + APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PROJECT, "")); + APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PIPELINE_NAME, "test_config")); + APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_COMPONENT_NAME, METRIC_LABEL_VALUE_COMPONENT_NAME_SENDER_QUEUE)); APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_QUEUE_TYPE, "bounded")); - APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_FLUSHER_NODE_ID, sFlusherId)); + APSARA_TEST_TRUE(mQueue->mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_FLUSHER_PLUGIN_ID, sFlusherId)); auto item1 = GenerateItem(); auto dataSize = item1->mData.size(); auto ptr1 = item1.get(); mQueue->Push(std::move(item1)); - APSARA_TEST_EQUAL(1U, mQueue->mInItemsCnt->GetValue()); + APSARA_TEST_EQUAL(1U, mQueue->mInItemsTotal->GetValue()); APSARA_TEST_EQUAL(dataSize, mQueue->mInItemDataSizeBytes->GetValue()); - APSARA_TEST_EQUAL(1U, mQueue->mQueueSize->GetValue()); + APSARA_TEST_EQUAL(1U, mQueue->mQueueSizeTotal->GetValue()); APSARA_TEST_EQUAL(dataSize, mQueue->mQueueDataSizeByte->GetValue()); APSARA_TEST_EQUAL(1U, mQueue->mValidToPushFlag->GetValue()); @@ -195,25 +198,25 @@ void SenderQueueUnittest::TestMetric() { mQueue->Push(GenerateItem()); - APSARA_TEST_EQUAL(3U, mQueue->mInItemsCnt->GetValue()); + APSARA_TEST_EQUAL(3U, mQueue->mInItemsTotal->GetValue()); APSARA_TEST_EQUAL(dataSize * 3, mQueue->mInItemDataSizeBytes->GetValue()); - APSARA_TEST_EQUAL(2U, mQueue->mQueueSize->GetValue()); + APSARA_TEST_EQUAL(2U, mQueue->mQueueSizeTotal->GetValue()); APSARA_TEST_EQUAL(dataSize * 2, mQueue->mQueueDataSizeByte->GetValue()); APSARA_TEST_EQUAL(0U, mQueue->mValidToPushFlag->GetValue()); APSARA_TEST_EQUAL(1U, mQueue->mExtraBufferSize->GetValue()); APSARA_TEST_EQUAL(dataSize, mQueue->mExtraBufferDataSizeBytes->GetValue()); mQueue->Remove(ptr1); - APSARA_TEST_EQUAL(1U, mQueue->mOutItemsCnt->GetValue()); - APSARA_TEST_EQUAL(2U, mQueue->mQueueSize->GetValue()); + APSARA_TEST_EQUAL(1U, mQueue->mOutItemsTotal->GetValue()); + APSARA_TEST_EQUAL(2U, mQueue->mQueueSizeTotal->GetValue()); APSARA_TEST_EQUAL(dataSize * 2, mQueue->mQueueDataSizeByte->GetValue()); APSARA_TEST_EQUAL(0U, mQueue->mValidToPushFlag->GetValue()); APSARA_TEST_EQUAL(0U, mQueue->mExtraBufferSize->GetValue()); APSARA_TEST_EQUAL(0U, mQueue->mExtraBufferDataSizeBytes->GetValue()); mQueue->Remove(ptr2); - APSARA_TEST_EQUAL(2U, mQueue->mOutItemsCnt->GetValue()); - APSARA_TEST_EQUAL(1U, mQueue->mQueueSize->GetValue()); + APSARA_TEST_EQUAL(2U, mQueue->mOutItemsTotal->GetValue()); + APSARA_TEST_EQUAL(1U, mQueue->mQueueSizeTotal->GetValue()); APSARA_TEST_EQUAL(dataSize, mQueue->mQueueDataSizeByte->GetValue()); APSARA_TEST_EQUAL(1U, mQueue->mValidToPushFlag->GetValue()); } @@ -224,7 +227,7 @@ unique_ptr SenderQueueUnittest::GenerateItem() { UNIT_TEST_CASE(SenderQueueUnittest, TestPush) UNIT_TEST_CASE(SenderQueueUnittest, TestRemove) -UNIT_TEST_CASE(SenderQueueUnittest, TestGetAllAvailableItems) +UNIT_TEST_CASE(SenderQueueUnittest, TestGetAvailableItems) UNIT_TEST_CASE(SenderQueueUnittest, TestMetric) } // namespace logtail diff --git a/core/unittest/reader/ForceReadUnittest.cpp b/core/unittest/reader/ForceReadUnittest.cpp index aac74460f1..fa99c10eec 100644 --- a/core/unittest/reader/ForceReadUnittest.cpp +++ b/core/unittest/reader/ForceReadUnittest.cpp @@ -119,6 +119,7 @@ class ForceReadUnittest : public testing::Test { FileServer::GetInstance()->AddFileReaderConfig(mConfigName, &readerOpts, &ctx); FileServer::GetInstance()->AddMultilineConfig(mConfigName, &multilineOpts, &ctx); ProcessQueueManager::GetInstance()->CreateOrUpdateBoundedQueue(0, 0, ctx); + ProcessQueueManager::GetInstance()->EnablePop(mConfigName); } void TearDown() override { remove(utf8File.c_str()); } diff --git a/core/unittest/route/RouterUnittest.cpp b/core/unittest/route/RouterUnittest.cpp index 3e42ff722a..3a82eb7d18 100644 --- a/core/unittest/route/RouterUnittest.cpp +++ b/core/unittest/route/RouterUnittest.cpp @@ -13,7 +13,7 @@ // limitations under the License. #include "common/JsonUtil.h" -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/Pipeline.h" #include "pipeline/route/Router.h" #include "unittest/Unittest.h" @@ -151,16 +151,16 @@ void RouterUnittest::TestMetric() { router.Init(configs, ctx); APSARA_TEST_EQUAL(3U, router.mMetricsRecordRef->GetLabels()->size()); - APSARA_TEST_TRUE(router.mMetricsRecordRef.HasLabel(METRIC_LABEL_PROJECT, "")); - APSARA_TEST_TRUE(router.mMetricsRecordRef.HasLabel(METRIC_LABEL_CONFIG_NAME, "test_config")); - APSARA_TEST_TRUE(router.mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_COMPONENT_NAME, "router")); + APSARA_TEST_TRUE(router.mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PROJECT, "")); + APSARA_TEST_TRUE(router.mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_PIPELINE_NAME, "test_config")); + APSARA_TEST_TRUE(router.mMetricsRecordRef.HasLabel(METRIC_LABEL_KEY_COMPONENT_NAME, METRIC_LABEL_VALUE_COMPONENT_NAME_ROUTER)); PipelineEventGroup g(make_shared()); g.AddLogEvent(); auto size = g.DataSize(); router.Route(g); - APSARA_TEST_EQUAL(1U, router.mInEventsCnt->GetValue()); + APSARA_TEST_EQUAL(1U, router.mInEventsTotal->GetValue()); APSARA_TEST_EQUAL(size, router.mInGroupDataSizeBytes->GetValue()); } diff --git a/core/unittest/sender/FlusherRunnerUnittest.cpp b/core/unittest/sender/FlusherRunnerUnittest.cpp index 2940cd9fad..20a931cfb0 100644 --- a/core/unittest/sender/FlusherRunnerUnittest.cpp +++ b/core/unittest/sender/FlusherRunnerUnittest.cpp @@ -35,7 +35,7 @@ void FlusherRunnerUnittest::TestDispatch() { Json::Value tmp; PipelineContext ctx; flusher->SetContext(ctx); - flusher->SetMetricsRecordRef("name", "pluginId", "nodeId", "childNodeId"); + flusher->SetMetricsRecordRef("name", "1"); flusher->Init(Json::Value(), tmp); auto item = make_unique("content", 10, flusher.get(), flusher->GetQueueKey()); @@ -54,7 +54,7 @@ void FlusherRunnerUnittest::TestDispatch() { Json::Value tmp; PipelineContext ctx; flusher->SetContext(ctx); - flusher->SetMetricsRecordRef("name", "pluginId", "nodeId", "childNodeId"); + flusher->SetMetricsRecordRef("name", "1"); flusher->Init(Json::Value(), tmp); auto item = make_unique("content", 10, flusher.get(), flusher->GetQueueKey()); diff --git a/core/unittest/sender/SenderUnittest.cpp b/core/unittest/sender/SenderUnittest.cpp index 6c83796018..fcc1313bdb 100644 --- a/core/unittest/sender/SenderUnittest.cpp +++ b/core/unittest/sender/SenderUnittest.cpp @@ -12,56 +12,59 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "unittest/Unittest.h" +#include #include -#include "file_server/EventDispatcher.h" -#include "file_server/ConfigManager.h" + #include "app_config/AppConfig.h" -#include "file_server/reader/LogFileReader.h" +#include "file_server/ConfigManager.h" +#include "file_server/EventDispatcher.h" +#include "file_server/event/Event.h" #include "file_server/event_handler/EventHandler.h" +#include "file_server/reader/LogFileReader.h" #include "monitor/Monitor.h" -#include "file_server/event/Event.h" #include "sender/Sender.h" -#include +#include "unittest/Unittest.h" #if defined(__linux__) #include #endif -#include -#include -#include -#include +#include #include -#include +#include #include +#include +#include +#include +#include + +#include +#include +#include +#include +#include #include -#include #include -#include -#include -#include -#include "protobuf/sls/metric.pb.h" -#include "protobuf/sls/sls_logs.pb.h" -#include "monitor/LogtailAlarm.h" -#include "monitor/LogIntegrity.h" -#include "file_server/event_handler/LogInput.h" +#include + +#include "checkpoint/CheckpointManagerV2.h" +#include "common/Constants.h" #include "common/FileEncryption.h" -#include "runner/LogProcess.h" -#include "common/WaitObject.h" +#include "common/FileSystemUtil.h" #include "common/Lock.h" +#include "common/LogFileCollectOffsetIndicator.h" #include "common/MemoryBarrier.h" #include "common/StringTools.h" #include "common/Thread.h" -#include "common/Constants.h" -#include "common/FileSystemUtil.h" -#include "checkpoint/CheckpointManagerV2.h" +#include "common/WaitObject.h" +#include "file_server/event_handler/LogInput.h" #include "logger/Logger.h" +#include "monitor/LogIntegrity.h" +#include "monitor/LogtailAlarm.h" +#include "protobuf/sls/metric.pb.h" +#include "protobuf/sls/sls_logs.pb.h" +#include "runner/ProcessorRunner.h" #include "sdk/Client.h" #include "sdk/Common.h" #include "sdk/Exception.h" -#include "common/LogFileCollectOffsetIndicator.h" -#include -#include -#include using namespace std; using namespace sls_logs; @@ -456,7 +459,7 @@ static decltype(ExactlyOnceQueueManager::GetInstance()) sQueueM = nullptr; static decltype(EventDispatcher::GetInstance()) sEventDispatcher = nullptr; class SenderUnittest : public ::testing::Test { - static decltype(LogProcess::GetInstance()->GetQueue().mLogstoreQueueMap)* sProcessQueueMap; + static decltype(ProcessorRunner::GetInstance()->GetQueue().mLogstoreQueueMap)* sProcessQueueMap; static decltype(Sender::Instance()->GetQueue().mLogstoreSenderQueueMap)* sSenderQueueMap; void clearGlobalResource() { sCptM->rebuild(); @@ -860,11 +863,11 @@ class SenderUnittest : public ::testing::Test { auto const sysConfDir = gRootDir + PATH_SEPARATOR + ".ilogtail" + PATH_SEPARATOR; bfs::create_directories(sysConfDir); - AppConfig::GetInstance()->SetLogtailSysConfDir(sysConfDir); + AppConfig::GetInstance()->SetLoongcollectorConfDir(sysConfDir); sCptM = CheckpointManagerV2::GetInstance(); sQueueM = ExactlyOnceQueueManager::GetInstance(); sEventDispatcher = EventDispatcher::GetInstance(); - sProcessQueueMap = &(LogProcess::GetInstance()->GetQueue().mLogstoreQueueMap); + sProcessQueueMap = &(ProcessorRunner::GetInstance()->GetQueue().mLogstoreQueueMap); sSenderQueueMap = &(Sender::Instance()->GetQueue().mLogstoreSenderQueueMap); new Thread(&SenderUnittest::MockAsyncSendThread); @@ -914,7 +917,7 @@ class SenderUnittest : public ::testing::Test { PTScopedLock lock(gBufferLogGroupsLock); gBufferLogGroups.clear(); } - bfs::remove("ilogtail_config.json"); + bfs::remove("loongcollector_config.json"); { fsutil::Dir dir(gRootDir); dir.Open(); @@ -928,7 +931,7 @@ class SenderUnittest : public ::testing::Test { } sCptM->close(); bfs::remove_all(gRootDir); - bfs::create_directories(AppConfig::GetInstance()->GetLogtailSysConfDir()); + bfs::create_directories(AppConfig::GetInstance()->GetLoongcollectorConfDir()); sCptM->open(); if (gEnableExactlyOnce) { clearGlobalResource(); @@ -1018,7 +1021,7 @@ class SenderUnittest : public ::testing::Test { LogInput::GetInstance()->CleanEnviroments(); sleep(1); EventDispatcher::GetInstance()->CleanEnviroments(); - LogProcess::GetInstance()->CleanEnviroments(); + ProcessorRunner::GetInstance()->CleanEnviroments(); Sender::Instance()->RemoveSender(); if (gRealIpSendThread) { Sender::Instance()->mStopRealIpThread = true; @@ -1937,7 +1940,7 @@ class SenderUnittest : public ::testing::Test { sleep(1); Sender* pSender = Sender::Instance(); Aggregator* pAgg = Aggregator::GetInstance(); - if (LogProcess::GetInstance()->mLogFeedbackQueue.IsEmpty() && pAgg->IsMergeMapEmpty() + if (ProcessorRunner::GetInstance()->mLogFeedbackQueue.IsEmpty() && pAgg->IsMergeMapEmpty() && pSender->IsBatchMapEmpty() && pSender->GetSendingCount() == 0 && pSender->IsSecondaryBufferEmpty()) { break; } @@ -2000,7 +2003,7 @@ class SenderUnittest : public ::testing::Test { sleep(1); Sender* pSender = Sender::Instance(); Aggregator* pAgg = Aggregator::GetInstance(); - if (LogProcess::GetInstance()->mLogFeedbackQueue.IsEmpty() && pAgg->IsMergeMapEmpty() + if (ProcessorRunner::GetInstance()->mLogFeedbackQueue.IsEmpty() && pAgg->IsMergeMapEmpty() && pSender->IsBatchMapEmpty() && pSender->GetSendingCount() == 0 && pSender->IsSecondaryBufferEmpty()) { break; } diff --git a/core/unittest/serializer/SLSSerializerUnittest.cpp b/core/unittest/serializer/SLSSerializerUnittest.cpp index 7d57cef290..caa379e08f 100644 --- a/core/unittest/serializer/SLSSerializerUnittest.cpp +++ b/core/unittest/serializer/SLSSerializerUnittest.cpp @@ -33,7 +33,7 @@ class SLSSerializerUnittest : public ::testing::Test { void SetUp() override { mCtx.SetConfigName("test_config"); sFlusher->SetContext(mCtx); - sFlusher->SetMetricsRecordRef(FlusherSLS::sName, "1", "1", "1"); + sFlusher->SetMetricsRecordRef(FlusherSLS::sName, "1"); } private: diff --git a/core/unittest/serializer/SerializerUnittest.cpp b/core/unittest/serializer/SerializerUnittest.cpp index ed632e02ae..9d08338794 100644 --- a/core/unittest/serializer/SerializerUnittest.cpp +++ b/core/unittest/serializer/SerializerUnittest.cpp @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "monitor/MetricConstants.h" +#include "monitor/metric_constants/MetricConstants.h" #include "pipeline/plugin/interface/Flusher.h" #include "pipeline/serializer/Serializer.h" #include "unittest/Unittest.h" @@ -46,7 +46,7 @@ class SerializerUnittest : public ::testing::Test { void SetUp() override { mCtx.SetConfigName("test_config"); sFlusher->SetContext(mCtx); - sFlusher->SetMetricsRecordRef(FlusherMock::sName, "1", "1", "1"); + sFlusher->SetMetricsRecordRef(FlusherMock::sName, "1"); } private: @@ -67,11 +67,11 @@ void SerializerUnittest::TestMetric() { string output; string errorMsg; serializer.DoSerialize(std::move(input), output, errorMsg); - APSARA_TEST_EQUAL(1U, serializer.mInItemsCnt->GetValue()); + APSARA_TEST_EQUAL(1U, serializer.mInItemsTotal->GetValue()); APSARA_TEST_EQUAL(inputSize, serializer.mInItemSizeBytes->GetValue()); - APSARA_TEST_EQUAL(1U, serializer.mOutItemsCnt->GetValue()); + APSARA_TEST_EQUAL(1U, serializer.mOutItemsTotal->GetValue()); APSARA_TEST_EQUAL(output.size(), serializer.mOutItemSizeBytes->GetValue()); - APSARA_TEST_EQUAL(0U, serializer.mDiscardedItemsCnt->GetValue()); + APSARA_TEST_EQUAL(0U, serializer.mDiscardedItemsTotal->GetValue()); APSARA_TEST_EQUAL(0U, serializer.mDiscardedItemSizeBytes->GetValue()); } { @@ -81,11 +81,11 @@ void SerializerUnittest::TestMetric() { string output; string errorMsg; serializer.DoSerialize(std::move(input), output, errorMsg); - APSARA_TEST_EQUAL(1U, serializer.mInItemsCnt->GetValue()); + APSARA_TEST_EQUAL(1U, serializer.mInItemsTotal->GetValue()); APSARA_TEST_EQUAL(inputSize, serializer.mInItemSizeBytes->GetValue()); - APSARA_TEST_EQUAL(0U, serializer.mOutItemsCnt->GetValue()); + APSARA_TEST_EQUAL(0U, serializer.mOutItemsTotal->GetValue()); APSARA_TEST_EQUAL(0U, serializer.mOutItemSizeBytes->GetValue()); - APSARA_TEST_EQUAL(1U, serializer.mDiscardedItemsCnt->GetValue()); + APSARA_TEST_EQUAL(1U, serializer.mDiscardedItemsTotal->GetValue()); APSARA_TEST_EQUAL(inputSize, serializer.mDiscardedItemSizeBytes->GetValue()); } } diff --git a/core/unittest/spl/SplBenchmark.cpp b/core/unittest/spl/SplBenchmark.cpp index 8309af3ce4..098a2189b2 100644 --- a/core/unittest/spl/SplBenchmark.cpp +++ b/core/unittest/spl/SplBenchmark.cpp @@ -30,7 +30,7 @@ using namespace logtail; PluginInstance::PluginMeta getPluginMeta(){ - PluginInstance::PluginMeta pluginMeta{"testgetPluginID", "testNodeID", "testNodeChildID"}; + PluginInstance::PluginMeta pluginMeta{"1"}; return pluginMeta; } diff --git a/core/unittest/spl/SplUnittest.cpp b/core/unittest/spl/SplUnittest.cpp index 94ce95c50d..92184ef310 100644 --- a/core/unittest/spl/SplUnittest.cpp +++ b/core/unittest/spl/SplUnittest.cpp @@ -57,7 +57,7 @@ APSARA_UNIT_TEST_CASE(SplUnittest, TestTag, 6); //APSARA_UNIT_TEST_CASE(SplUnittest, TestMultiParse, 7); PluginInstance::PluginMeta getPluginMeta(){ - PluginInstance::PluginMeta pluginMeta{"testgetPluginID", "testNodeID", "testNodeChildID"}; + PluginInstance::PluginMeta pluginMeta{"1"}; return pluginMeta; } diff --git a/docker/Dockerfile.e2e-test b/docker/Dockerfile.e2e-test index 6cfda33fb1..785df4caa3 100644 --- a/docker/Dockerfile.e2e-test +++ b/docker/Dockerfile.e2e-test @@ -1,12 +1,12 @@ FROM golang:1.19 RUN go env -w GOPROXY="https://goproxy.cn,direct" -RUN mkdir -p /tmp/ilogtail +RUN mkdir -p /tmp/loongcollector WORKDIR /root -COPY . ./ilogtail +COPY . ./loongcollector -WORKDIR /root/ilogtail/test +WORKDIR /root/loongcollector/test RUN go mod download CMD ["sh", "-c", "while true; do sleep 3600; done"] \ No newline at end of file diff --git a/docker/Dockerfile_build b/docker/Dockerfile_build index 0f58b57e6f..eb1a7545ba 100644 --- a/docker/Dockerfile_build +++ b/docker/Dockerfile_build @@ -19,7 +19,7 @@ WORKDIR /src COPY . . ARG HOST_OS=Linux -ARG VERSION=2.0.0 +ARG VERSION=0.0.1 USER root diff --git a/docker/Dockerfile_coverage b/docker/Dockerfile_coverage index e0fedd193d..a445ba1b98 100644 --- a/docker/Dockerfile_coverage +++ b/docker/Dockerfile_coverage @@ -30,4 +30,4 @@ RUN python3 -m pip install --upgrade pip RUN cp /usr/local/python3/bin/pip3 /usr/bin/pip3 && pip3 install gcovr==7.0 RUN cp /usr/local/python3/bin/gcovr /usr/bin/gcovr -CMD ["bash", "-c", "gcovr --root . --lcov coverage.lcov --txt coverage.txt -e \".*sdk.*\" -e \".*observer.*\" -e \".*lo.*\" -e \".*unittest.*\" -e \".*config_server.*\" -e \".*fuse.*\" -e \".*go_pipeline.*\""] +CMD ["bash", "-c", "gcovr --root . --json coverage.json --json-summary-pretty --json-summary summary.json -e \".*sdk.*\" -e \".*observer.*\" -e \".*logger.*\" -e \".*unittest.*\" -e \".*config_server.*\" -e \".*go_pipeline.*\" -e \".*application.*\" -e \".*protobuf.*\" -e \".*runner.*\""] diff --git a/docker/Dockerfile_development_part b/docker/Dockerfile_development_part index b9f58f4546..b5282bb50a 100644 --- a/docker/Dockerfile_development_part +++ b/docker/Dockerfile_development_part @@ -15,27 +15,32 @@ FROM sls-opensource-registry.cn-shanghai.cr.aliyuncs.com/ilogtail-community-edition/ilogtail-build-linux:2.0.3 ARG HOST_OS=Linux -ARG VERSION=2.0.0 +ARG VERSION=0.0.1 USER root -WORKDIR /ilogtail +WORKDIR /loongcollector -COPY --from=build /src/core/build/ilogtail /ilogtail/ +RUN mkdir -p /loongcollector/conf +RUN mkdir -p /loongcollector/log +RUN mkdir -p /loongcollector/data +RUN mkdir -p /loongcollector/run + +COPY --from=build /src/core/build/loongcollector /loongcollector/ COPY ./scripts/download_ebpflib.sh /tmp/ -RUN chown -R $(whoami) /ilogtail && \ - chmod 755 /ilogtail/ilogtail && \ - mkdir /ilogtail/checkpoint && \ - if [ `uname -m` = "x86_64" ]; then /tmp/download_ebpflib.sh /ilogtail; fi && \ +RUN chown -R $(whoami) /loongcollector && \ + chmod 755 /loongcollector/loongcollector && \ + mkdir /loongcollector/data/checkpoint && \ + if [ `uname -m` = "x86_64" ]; then /tmp/download_ebpflib.sh /loongcollector; fi && \ rm /tmp/download_ebpflib.sh -COPY --from=build /src/output/libPluginBase.so /ilogtail/ -COPY --from=build /src/example_config/quick_start/ilogtail_config.json /ilogtail/ -COPY --from=build /src/core/build/go_pipeline/libPluginAdapter.so /ilogtail/ +COPY --from=build /src/output/libGoPluginBase.so /loongcollector/ +COPY --from=build /src/example_config/quick_start/loongcollector_config.json /loongcollector/ +COPY --from=build /src/core/build/go_pipeline/libGoPluginAdapter.so /loongcollector/ ENV HOST_OS=$HOST_OS ENV LOGTAIL_VERSION=$VERSION EXPOSE 18689 -ENTRYPOINT ["/ilogtail/ilogtail"] +ENTRYPOINT ["/loongcollector/loongcollector"] diff --git a/docker/Dockerfile_e2e b/docker/Dockerfile_e2e index 90ff3d9d01..1321a35ec7 100644 --- a/docker/Dockerfile_e2e +++ b/docker/Dockerfile_e2e @@ -15,27 +15,32 @@ FROM sls-opensource-registry.cn-shanghai.cr.aliyuncs.com/ilogtail-community-edition/ilogtail-build-linux:2.0.3 ARG HOST_OS=Linux -ARG VERSION=2.0.0 +ARG VERSION=0.0.1 USER root -WORKDIR /ilogtail +WORKDIR /loongcollector -COPY ./output/ilogtail /ilogtail/ +RUN mkdir -p /loongcollector/conf +RUN mkdir -p /loongcollector/log +RUN mkdir -p /loongcollector/data +RUN mkdir -p /loongcollector/run + +COPY ./output/loongcollector /loongcollector/loongcollector/ COPY ./scripts/download_ebpflib.sh /tmp/ -RUN chown -R $(whoami) /ilogtail && \ - chmod 755 /ilogtail/ilogtail && \ - mkdir /ilogtail/checkpoint && \ - if [ `uname -m` = "x86_64" ]; then /tmp/download_ebpflib.sh /ilogtail; fi && \ +RUN chown -R $(whoami) /loongcollector && \ + chmod 755 /loongcollector/loongcollector && \ + mkdir /loongcollector/data/checkpoint && \ + if [ `uname -m` = "x86_64" ]; then /tmp/download_ebpflib.sh /loongcollector; fi && \ rm /tmp/download_ebpflib.sh -COPY ./output/libPluginBase.so /ilogtail/ -COPY ./example_config/quick_start/ilogtail_config.json /ilogtail/ -COPY ./output/libPluginAdapter.so /ilogtail/ +COPY ./output/libGoPluginBase.so /loongcollector/ +COPY ./example_config/quick_start/loongcollector_config.json /loongcollector/ +COPY ./output/libGoPluginAdapter.so /loongcollector/ ENV HOST_OS=$HOST_OS ENV LOGTAIL_VERSION=$VERSION EXPOSE 18689 -ENTRYPOINT ["/ilogtail/ilogtail"] +ENTRYPOINT ["/loongcollector/loongcollector"] diff --git a/docker/Dockerfile_production b/docker/Dockerfile_production index aa1fdd3cf9..95b618b840 100644 --- a/docker/Dockerfile_production +++ b/docker/Dockerfile_production @@ -13,11 +13,11 @@ # limitations under the License. FROM --platform=$TARGETPLATFORM centos:centos7.9.2009 as build -ARG VERSION=2.0.0 +ARG VERSION=0.0.1 ARG TARGETPLATFORM WORKDIR /usr/local -COPY dist/ilogtail-${VERSION}.linux-*.tar.gz . -RUN tar -xzf ilogtail-${VERSION}.linux-${TARGETPLATFORM##*/}.tar.gz +COPY dist/loongcollector-${VERSION}.linux-*.tar.gz . +RUN tar -xzf loongcollector-${VERSION}.linux-${TARGETPLATFORM##*/}.tar.gz FROM centos:centos7.9.2009 @@ -30,19 +30,24 @@ RUN curl -L -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/ RUN yum update -y && yum upgrade -y && yum -y clean all && rm -fr /var/cache && rm -rf /core.* ARG HOST_OS=Linux -ARG VERSION=2.0.0 +ARG VERSION=0.0.1 ARG TARGETPLATFORM -COPY --from=build /usr/local/ilogtail-${VERSION} /usr/local/ilogtail -RUN chown -R $(whoami) /usr/local/ilogtail/ && \ - chmod 755 /usr/local/ilogtail/ilogtail && \ - mkdir /usr/local/ilogtail/checkpoint +COPY --from=build /usr/local/loongcollector-${VERSION} /usr/local/loongcollector +RUN chown -R $(whoami) /usr/local/loongcollector/ && \ + chmod 755 /usr/local/loongcollector/loongcollector && \ + mkdir -p /usr/local/loongcollector/data/checkpoint -WORKDIR /usr/local/ilogtail -COPY example_config/start_with_docker/ilogtail_config.json /usr/local/ilogtail/ -COPY scripts/ilogtail_control.sh /usr/local/ilogtail/ +WORKDIR /usr/local/loongcollector +RUN mkdir -p /usr/local/loongcollector/conf +RUN mkdir -p /usr/local/loongcollector/log +RUN mkdir -p /usr/local/loongcollector/data +RUN mkdir -p /usr/local/loongcollector/run + +COPY example_config/start_with_docker/loongcollector_config.json /usr/local/loongcollector/ +COPY scripts/loongcollector_control.sh /usr/local/loongcollector/ ENV HTTP_PROBE_PORT=7953 \ ALIYUN_LOGTAIL_USER_DEFINED_ID=default \ docker_file_cache_path=checkpoint/docker_path_config.json -CMD ["/usr/local/ilogtail/ilogtail_control.sh", "start_and_block"] +CMD ["/usr/local/loongcollector/loongcollector_control.sh", "start_and_block"] diff --git a/docker/Dockerfile_production_minimal b/docker/Dockerfile_production_minimal index 09a6c1fa6d..71071a4416 100644 --- a/docker/Dockerfile_production_minimal +++ b/docker/Dockerfile_production_minimal @@ -13,16 +13,16 @@ # limitations under the License. FROM --platform=$TARGETPLATFORM debian:bookworm as build -ARG VERSION=2.0.0 +ARG VERSION=0.0.1 ARG TARGETPLATFORM ARG UID=65532 WORKDIR /usr/local -COPY dist/ilogtail-${VERSION}.linux-*.tar.gz . -RUN tar -xzf ilogtail-${VERSION}.linux-${TARGETPLATFORM##*/}.tar.gz -RUN mv /usr/local/ilogtail-${VERSION} /usr/local/ilogtail && \ - mkdir /usr/local/ilogtail/checkpoint && \ - cp example_config/start_with_docker/ilogtail_config.json /usr/local/ilogtail/ && \ - chmod 755 /usr/local/ilogtail/ilogtail +COPY dist/loongcollector-${VERSION}.linux-*.tar.gz . +RUN tar -xzf loongcollector-${VERSION}.linux-${TARGETPLATFORM##*/}.tar.gz +RUN mv /usr/local/loongcollector-${VERSION} /usr/local/loongcollector && \ + mkdir -p /usr/local/loongcollector/data/checkpoint && \ + cp example_config/start_with_docker/loongcollector_config.json /usr/local/loongcollector/ && \ + chmod 755 /usr/local/loongcollector/loongcollector FROM gcr.lank8s.cn/distroless/cc-debian12:latest LABEL org.opencontainers.image.authors="yyuuttaaoo@gmail.com, jiangdeyan@gmail.com" @@ -30,14 +30,18 @@ LABEL org.opencontainers.image.authors="yyuuttaaoo@gmail.com, jiangdeyan@gmail.c COPY --from=build /usr/lib/x86_64-linux-gnu/libuuid.so.1.3.0 /lib/x86_64-linux-gnu/libuuid.so.1 ARG HOST_OS=Linux -ARG VERSION=2.0.0 +ARG VERSION=0.0.1 ARG TARGETPLATFORM -COPY --from=build --chown=$UID:$UID /usr/local/ilogtail /usr/local/ilogtail +COPY --from=build --chown=$UID:$UID /usr/local/loongcollector /usr/local/loongcollector -WORKDIR /usr/local/ilogtail +WORKDIR /usr/local/loongcollector +RUN mkdir -p /usr/local/loongcollector/conf +RUN mkdir -p /usr/local/loongcollector/log +RUN mkdir -p /usr/local/loongcollector/data +RUN mkdir -p /usr/local/loongcollector/run ENV HTTP_PROBE_PORT=7953 \ ALIYUN_LOGTAIL_USER_DEFINED_ID=default \ - docker_file_cache_path=checkpoint/docker_path_config.json -CMD ["/usr/local/ilogtail/ilogtail"] \ No newline at end of file + docker_file_cache_path=data/checkpoint/docker_path_config.json +CMD ["/usr/local/loongcollector/loongcollector"] \ No newline at end of file diff --git a/example_config/quick_start/ilogtail_config.json b/example_config/quick_start/loongcollector_config.json similarity index 100% rename from example_config/quick_start/ilogtail_config.json rename to example_config/quick_start/loongcollector_config.json diff --git a/example_config/start_with_docker/ilogtail_config.json b/example_config/start_with_docker/loongcollector_config.json similarity index 100% rename from example_config/start_with_docker/ilogtail_config.json rename to example_config/start_with_docker/loongcollector_config.json diff --git a/go.mod b/go.mod index f161a80a34..0e6b81e4d2 100644 --- a/go.mod +++ b/go.mod @@ -234,7 +234,7 @@ require ( github.com/valyala/fastjson v1.6.3 // indirect github.com/valyala/fastrand v1.1.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect - github.com/valyala/gozstd v1.21.1 // indirect + github.com/valyala/gozstd v1.17.0 // indirect github.com/valyala/histogram v1.2.0 // indirect github.com/valyala/quicktemplate v1.7.0 // indirect github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 // indirect diff --git a/go.sum b/go.sum index 8a55a481b4..ab5dcedea4 100644 --- a/go.sum +++ b/go.sum @@ -1592,8 +1592,8 @@ github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/valyala/gozstd v1.21.1 h1:TQFZVTk5zo7iJcX3o4XYBJujPdO31LFb4fVImwK873A= -github.com/valyala/gozstd v1.21.1/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ= +github.com/valyala/gozstd v1.17.0 h1:M4Ds4MIrw+pD+s6vYtuFZ8D3iEw9htzfdytOV3C3iQU= +github.com/valyala/gozstd v1.17.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ= github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ= github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY= github.com/valyala/quicktemplate v1.7.0 h1:LUPTJmlVcb46OOUY3IeD9DojFpAVbsG+5WFTcjMJzCM= diff --git a/pkg/config/global_config.go b/pkg/config/global_config.go index 445de39779..f5d3d13708 100644 --- a/pkg/config/global_config.go +++ b/pkg/config/global_config.go @@ -27,9 +27,17 @@ type GlobalConfig struct { DefaultLogQueueSize int DefaultLogGroupQueueSize int Tags map[string]string - // Directory to store logtail data, such as checkpoint, etc. - LogtailSysConfDir string - // Network identification from logtail. + // Directory to store loongcollector data, such as checkpoint, etc. + LoongcollectorConfDir string + // Directory to store loongcollector log. + LoongcollectorLogDir string + // Directory to store loongcollector data. + LoongcollectorDataDir string + // Directory to store loongcollector debug data. + LoongcollectorDebugDir string + // Directory to store loongcollector third party data. + LoongcollectorThirdPartyDir string + // Network identification from loongcollector. HostIP string Hostname string AlwaysOnline bool @@ -41,8 +49,8 @@ type GlobalConfig struct { EnableSlsMetricsFormat bool } -// LogtailGlobalConfig is the singleton instance of GlobalConfig. -var LogtailGlobalConfig = newGlobalConfig() +// LoongcollectorGlobalConfig is the singleton instance of GlobalConfig. +var LoongcollectorGlobalConfig = newGlobalConfig() // StatisticsConfigJson, AlarmConfigJson var BaseVersion = "0.1.0" // will be overwritten through ldflags at compile time @@ -50,13 +58,17 @@ var UserAgent = fmt.Sprintf("ilogtail/%v (%v)", BaseVersion, runtime.GOOS) // se func newGlobalConfig() (cfg GlobalConfig) { cfg = GlobalConfig{ - InputIntervalMs: 1000, - AggregatIntervalMs: 3000, - FlushIntervalMs: 3000, - DefaultLogQueueSize: 1000, - DefaultLogGroupQueueSize: 4, - LogtailSysConfDir: ".", - DelayStopSec: 300, + InputIntervalMs: 1000, + AggregatIntervalMs: 3000, + FlushIntervalMs: 3000, + DefaultLogQueueSize: 1000, + DefaultLogGroupQueueSize: 4, + LoongcollectorConfDir: "./conf/", + LoongcollectorLogDir: "./log/", + LoongcollectorDataDir: "./data/", + LoongcollectorDebugDir: "./debug/", + LoongcollectorThirdPartyDir: "./thirdparty/", + DelayStopSec: 300, } return } diff --git a/pkg/flags/flags.go b/pkg/flags/flags.go index 7de12b80e6..e8b133e684 100644 --- a/pkg/flags/flags.go +++ b/pkg/flags/flags.go @@ -120,6 +120,7 @@ var ( DeployMode = flag.String("DEPLOY_MODE", DeployDaemonset, "alibaba log deploy mode, daemonset or statefulset or singleton") EnableKubernetesMeta = flag.Bool("ENABLE_KUBERNETES_META", false, "enable kubernetes meta") ClusterID = flag.String("GLOBAL_CLUSTER_ID", "", "cluster id") + ClusterType = flag.String("GLOBAL_CLUSTER_TYPE", "", "cluster type, supporting ack, one, asi and k8s") ) func init() { @@ -148,6 +149,7 @@ func init() { _ = util.InitFromEnvString("DEPLOY_MODE", DeployMode, *DeployMode) _ = util.InitFromEnvBool("ENABLE_KUBERNETES_META", EnableKubernetesMeta, *EnableKubernetesMeta) _ = util.InitFromEnvString("GLOBAL_CLUSTER_ID", ClusterID, *ClusterID) + _ = util.InitFromEnvString("GLOBAL_CLUSTER_TYPE", ClusterType, *ClusterType) if len(*DefaultRegion) == 0 { *DefaultRegion = util.GuessRegionByEndpoint(*LogServiceEndpoint, "cn-hangzhou") diff --git a/pkg/helper/docker_center.go b/pkg/helper/docker_center.go index cb202c5c0e..6fe0efcf6c 100644 --- a/pkg/helper/docker_center.go +++ b/pkg/helper/docker_center.go @@ -631,7 +631,7 @@ func (dc *DockerCenter) CreateInfoDetail(info types.ContainerJSON, envConfigPref func getDockerCenterInstance() *DockerCenter { onceDocker.Do(func() { - logger.Init() + logger.InitLogger() // load EnvTags first LoadEnvTags() dockerCenterInstance = &DockerCenter{} diff --git a/pkg/helper/docker_cri_adapter.go b/pkg/helper/docker_cri_adapter.go index 7b902997dd..dcaef6c644 100644 --- a/pkg/helper/docker_cri_adapter.go +++ b/pkg/helper/docker_cri_adapter.go @@ -174,7 +174,7 @@ func NewCRIRuntimeWrapper(dockerCenter *DockerCenter) (*CRIRuntimeWrapper, error } var containerdClient *containerd.Client - if config.LogtailGlobalConfig.EnableContainerdUpperDirDetect { + if config.LoongcollectorGlobalConfig.EnableContainerdUpperDirDetect { containerdClient, err = containerd.New(containerdUnixSocket, containerd.WithDefaultNamespace("k8s.io")) if err == nil { _, err = containerdClient.Version(context.Background()) diff --git a/pkg/helper/dumper.go b/pkg/helper/dumper.go index 12fb81be6a..dc9d5d3cf0 100644 --- a/pkg/helper/dumper.go +++ b/pkg/helper/dumper.go @@ -25,9 +25,9 @@ import ( "sync" "time" + "github.com/alibaba/ilogtail/pkg/config" "github.com/alibaba/ilogtail/pkg/helper/async" "github.com/alibaba/ilogtail/pkg/logger" - "github.com/alibaba/ilogtail/pkg/util" ) type DumpDataReq struct { @@ -58,10 +58,11 @@ type Dumper struct { } func (d *Dumper) Init() { - _ = os.MkdirAll(path.Join(util.GetCurrentBinaryPath(), "dump"), 0750) + // 只有 service_http_server 插件会使用这个模块 + _ = os.MkdirAll(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorDebugDir, "dump"), 0750) d.input = make(chan *DumpData, 10) d.stop = make(chan struct{}) - files, err := GetFileListByPrefix(path.Join(util.GetCurrentBinaryPath(), "dump"), d.prefix, true, 0) + files, err := GetFileListByPrefix(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorDebugDir, "dump"), d.prefix, true, 0) if err != nil { logger.Warning(context.Background(), "LIST_HISTORY_DUMP_ALARM", "err", err) } else { @@ -97,7 +98,7 @@ func (d *Dumper) doDumpFile() { } } cutFile := func() (f *os.File, err error) { - nFile := path.Join(path.Join(util.GetCurrentBinaryPath(), "dump"), fileName+"_"+time.Now().Format("2006-01-02_15")) + nFile := path.Join(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorDebugDir, "dump"), fileName+"_"+time.Now().Format("2006-01-02_15")) if len(d.dumpDataKeepFiles) == 0 || d.dumpDataKeepFiles[len(d.dumpDataKeepFiles)-1] != nFile { d.dumpDataKeepFiles = append(d.dumpDataKeepFiles, nFile) } diff --git a/pkg/helper/dumper_test.go b/pkg/helper/dumper_test.go index 3c48168dd1..0b11a9491e 100644 --- a/pkg/helper/dumper_test.go +++ b/pkg/helper/dumper_test.go @@ -25,15 +25,15 @@ import ( "testing" "time" - "github.com/alibaba/ilogtail/pkg/util" + "github.com/alibaba/ilogtail/pkg/config" "github.com/stretchr/testify/require" ) func TestServiceHTTP_doDumpFile(t *testing.T) { - _, err := os.Stat(path.Join(util.GetCurrentBinaryPath(), "dump")) + _, err := os.Stat(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorDebugDir, "dump")) if err == nil { - files, findErr := GetFileListByPrefix(path.Join(util.GetCurrentBinaryPath(), "dump"), "custom", true, 0) + files, findErr := GetFileListByPrefix(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorDebugDir, "dump"), "custom", true, 0) require.NoError(t, findErr) for _, file := range files { _ = os.Remove(file) diff --git a/pkg/helper/k8smeta/k8s_meta_cache.go b/pkg/helper/k8smeta/k8s_meta_cache.go index 7fcdda4c6b..a8022c40c8 100644 --- a/pkg/helper/k8smeta/k8s_meta_cache.go +++ b/pkg/helper/k8smeta/k8s_meta_cache.go @@ -57,6 +57,14 @@ func (m *k8sMetaCache) Get(key []string) map[string][]*ObjectWrapper { return m.metaStore.Get(key) } +func (m *k8sMetaCache) GetSize() int { + return len(m.metaStore.Items) +} + +func (m *k8sMetaCache) GetQueueSize() int { + return len(m.eventCh) +} + func (m *k8sMetaCache) List() []*ObjectWrapper { return m.metaStore.List() } @@ -86,6 +94,7 @@ func (m *k8sMetaCache) watch(stopCh <-chan struct{}) { LastObservedTime: nowTime, }, } + metaManager.addEventCount.Add(1) }, UpdateFunc: func(oldObj interface{}, obj interface{}) { nowTime := time.Now().Unix() @@ -98,6 +107,7 @@ func (m *k8sMetaCache) watch(stopCh <-chan struct{}) { LastObservedTime: nowTime, }, } + metaManager.updateEventCount.Add(1) }, DeleteFunc: func(obj interface{}) { m.eventCh <- &K8sMetaEvent{ @@ -108,6 +118,7 @@ func (m *k8sMetaCache) watch(stopCh <-chan struct{}) { LastObservedTime: time.Now().Unix(), }, } + metaManager.deleteEventCount.Add(1) }, }) go factory.Start(stopCh) diff --git a/pkg/helper/k8smeta/k8s_meta_http_server.go b/pkg/helper/k8smeta/k8s_meta_http_server.go index 2aace0ce67..50fa757bab 100644 --- a/pkg/helper/k8smeta/k8s_meta_http_server.go +++ b/pkg/helper/k8smeta/k8s_meta_http_server.go @@ -23,9 +23,9 @@ type metadataHandler struct { metaManager *MetaManager } -func newMetadataHandler() *metadataHandler { +func newMetadataHandler(metaManager *MetaManager) *metadataHandler { metadataHandler := &metadataHandler{ - metaManager: GetMetaManagerInstance(), + metaManager: metaManager, } return metadataHandler } @@ -46,17 +46,10 @@ func (m *metadataHandler) K8sServerRun(stopCh <-chan struct{}) error { mux := http.NewServeMux() // TODO: add port in ip endpoint - mux.HandleFunc("/metadata/ip", m.handlePodMetaByUniqueID) - mux.HandleFunc("/metadata/containerid", m.handlePodMetaByUniqueID) - mux.HandleFunc("/metadata/host", m.handlePodMetaByHostIP) + mux.HandleFunc("/metadata/ip", m.handler(m.handlePodMetaByUniqueID)) + mux.HandleFunc("/metadata/containerid", m.handler(m.handlePodMetaByUniqueID)) + mux.HandleFunc("/metadata/host", m.handler(m.handlePodMetaByHostIP)) server.Handler = mux - for { - if m.metaManager.IsReady() { - break - } - time.Sleep(1 * time.Second) - logger.Warning(context.Background(), "K8S_META_SERVER_WAIT", "waiting for k8s meta manager to be ready") - } logger.Info(context.Background(), "k8s meta server", "started", "port", port) go func() { defer panicRecover() @@ -66,6 +59,21 @@ func (m *metadataHandler) K8sServerRun(stopCh <-chan struct{}) error { return nil } +func (m *metadataHandler) handler(handleFunc func(w http.ResponseWriter, r *http.Request)) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !m.metaManager.IsReady() { + w.WriteHeader(http.StatusServiceUnavailable) + return + } + startTime := time.Now() + m.metaManager.httpRequestCount.Add(1) + handleFunc(w, r) + latency := time.Since(startTime).Milliseconds() + m.metaManager.httpAvgDelayMs.Add(latency) + m.metaManager.httpMaxDelayMs.Set(float64(latency)) + } +} + func (m *metadataHandler) handlePodMetaByUniqueID(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() var rBody requestBody diff --git a/pkg/helper/k8smeta/k8s_meta_manager.go b/pkg/helper/k8smeta/k8s_meta_manager.go index dc4f3211e5..842fdd0c75 100644 --- a/pkg/helper/k8smeta/k8s_meta_manager.go +++ b/pkg/helper/k8smeta/k8s_meta_manager.go @@ -13,6 +13,7 @@ import ( "k8s.io/client-go/tools/clientcmd" controllerConfig "sigs.k8s.io/controller-runtime/pkg/client/config" + "github.com/alibaba/ilogtail/pkg/flags" "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/pipeline" @@ -24,6 +25,8 @@ var onceManager sync.Once type MetaCache interface { Get(key []string) map[string][]*ObjectWrapper + GetSize() int + GetQueueSize() int List() []*ObjectWrapper RegisterSendFunc(key string, sendFunc SendFunc, interval int) UnRegisterSendFunc(key string) @@ -40,29 +43,40 @@ type MetaManager struct { clientset *kubernetes.Clientset stopCh chan struct{} - eventCh chan *K8sMetaEvent - ready atomic.Bool - - cacheMap map[string]MetaCache - linkGenerator *LinkGenerator - linkRegisterMap map[string][]string - linkRegisterLock sync.RWMutex - - metricContext pipeline.Context + ready atomic.Bool + + metadataHandler *metadataHandler + cacheMap map[string]MetaCache + linkGenerator *LinkGenerator + linkRegisterMap map[string][]string + registerLock sync.RWMutex + + // self metrics + projectNames map[string]int + metricRecord pipeline.MetricsRecord + addEventCount pipeline.CounterMetric + updateEventCount pipeline.CounterMetric + deleteEventCount pipeline.CounterMetric + cacheResourceGauge pipeline.GaugeMetric + queueSizeGauge pipeline.GaugeMetric + httpRequestCount pipeline.CounterMetric + httpAvgDelayMs pipeline.CounterMetric + httpMaxDelayMs pipeline.GaugeMetric } func GetMetaManagerInstance() *MetaManager { onceManager.Do(func() { metaManager = &MetaManager{ - stopCh: make(chan struct{}), - eventCh: make(chan *K8sMetaEvent, 1000), + stopCh: make(chan struct{}), } + metaManager.metadataHandler = newMetadataHandler(metaManager) metaManager.cacheMap = make(map[string]MetaCache) for _, resource := range AllResources { metaManager.cacheMap[resource] = newK8sMetaCache(metaManager.stopCh, resource) } metaManager.linkGenerator = NewK8sMetaLinkGenerator(metaManager.cacheMap) metaManager.linkRegisterMap = make(map[string][]string) + metaManager.projectNames = make(map[string]int) }) return metaManager } @@ -84,7 +98,16 @@ func (m *MetaManager) Init(configPath string) (err error) { return err } m.clientset = clientset - m.metricContext = &helper.LocalContext{} + + m.metricRecord = pipeline.MetricsRecord{} + m.addEventCount = helper.NewCounterMetricAndRegister(&m.metricRecord, helper.MetricRunnerK8sMetaAddEventTotal) + m.updateEventCount = helper.NewCounterMetricAndRegister(&m.metricRecord, helper.MetricRunnerK8sMetaUpdateEventTotal) + m.deleteEventCount = helper.NewCounterMetricAndRegister(&m.metricRecord, helper.MetricRunnerK8sMetaDeleteEventTotal) + m.cacheResourceGauge = helper.NewGaugeMetricAndRegister(&m.metricRecord, helper.MetricRunnerK8sMetaCacheSize) + m.queueSizeGauge = helper.NewGaugeMetricAndRegister(&m.metricRecord, helper.MetricRunnerK8sMetaQueueSize) + m.httpRequestCount = helper.NewCounterMetricAndRegister(&m.metricRecord, helper.MetricRunnerK8sMetaHTTPRequestTotal) + m.httpAvgDelayMs = helper.NewAverageMetricAndRegister(&m.metricRecord, helper.MetricRunnerK8sMetaHTTPAvgDelayMs) + m.httpMaxDelayMs = helper.NewMaxMetricAndRegister(&m.metricRecord, helper.MetricRunnerK8sMetaHTTPMaxDelayMs) go func() { startTime := time.Now() @@ -106,16 +129,16 @@ func (m *MetaManager) IsReady() bool { return m.ready.Load() } -func (m *MetaManager) RegisterSendFunc(configName string, resourceType string, sendFunc SendFunc, interval int) { +func (m *MetaManager) RegisterSendFunc(projectName, configName, resourceType string, sendFunc SendFunc, interval int) { if cache, ok := m.cacheMap[resourceType]; ok { cache.RegisterSendFunc(configName, func(events []*K8sMetaEvent) { sendFunc(events) linkTypeList := make([]string, 0) - m.linkRegisterLock.RLock() + m.registerLock.RLock() if m.linkRegisterMap[configName] != nil { linkTypeList = append(linkTypeList, m.linkRegisterMap[configName]...) } - m.linkRegisterLock.RUnlock() + m.registerLock.RUnlock() for _, linkType := range linkTypeList { linkEvents := m.linkGenerator.GenerateLinks(events, linkType) if linkEvents != nil { @@ -123,35 +146,105 @@ func (m *MetaManager) RegisterSendFunc(configName string, resourceType string, s } } }, interval) + m.registerLock.Lock() + if cnt, ok := m.projectNames[projectName]; ok { + m.projectNames[projectName] = cnt + 1 + } else { + m.projectNames[projectName] = 1 + } + m.registerLock.Unlock() return } + // register link if !isEntity(resourceType) { - m.linkRegisterLock.Lock() + m.registerLock.Lock() if _, ok := m.linkRegisterMap[configName]; !ok { m.linkRegisterMap[configName] = make([]string, 0) } m.linkRegisterMap[configName] = append(m.linkRegisterMap[configName], resourceType) - m.linkRegisterLock.Unlock() + m.registerLock.Unlock() } else { logger.Error(context.Background(), "ENTITY_PIPELINE_REGISTER_ERROR", "resourceType not support", resourceType) } } -func (m *MetaManager) UnRegisterSendFunc(configName string, resourceType string) { +func (m *MetaManager) UnRegisterSendFunc(projectName, configName, resourceType string) { if cache, ok := m.cacheMap[resourceType]; ok { cache.UnRegisterSendFunc(configName) + m.registerLock.Lock() + if cnt, ok := m.projectNames[projectName]; ok { + if cnt == 1 { + delete(m.projectNames, projectName) + } else { + m.projectNames[projectName] = cnt - 1 + } + } + // unregister link + if !isEntity(resourceType) { + if registeredLink, ok := m.linkRegisterMap[configName]; ok { + idx := -1 + for i, v := range registeredLink { + if resourceType == v { + idx = i + break + } + } + if idx != -1 { + m.linkRegisterMap[configName] = append(registeredLink[:idx], registeredLink[idx+1:]...) + } + } + } + m.registerLock.Unlock() } else { logger.Error(context.Background(), "ENTITY_PIPELINE_UNREGISTER_ERROR", "resourceType not support", resourceType) } } -func (m *MetaManager) GetMetricContext() pipeline.Context { - return m.metricContext +func GetMetaManagerMetrics() []map[string]string { + manager := GetMetaManagerInstance() + if manager == nil || !manager.IsReady() { + return nil + } + // cache + queueSize := 0 + cacheSize := 0 + for _, cache := range manager.cacheMap { + queueSize += cache.GetQueueSize() + cacheSize += cache.GetSize() + + } + manager.queueSizeGauge.Set(float64(queueSize)) + manager.cacheResourceGauge.Set(float64(cacheSize)) + // set labels + manager.registerLock.RLock() + projectName := make([]string, 0) + projectName = append(projectName, *flags.DefaultLogProject) + for name := range manager.projectNames { + projectName = append(projectName, name) + } + manager.registerLock.RUnlock() + manager.metricRecord.Labels = []pipeline.Label{ + { + Key: "cluster_id", + Value: *flags.ClusterID, + }, + { + Key: "runner_name", + Value: "k8s_meta_manager", + }, + { + Key: "project", + Value: strings.Join(projectName, " "), + }, + } + + return []map[string]string{ + manager.metricRecord.ExportMetricRecords(), + } } func (m *MetaManager) runServer() { - metadataHandler := newMetadataHandler() - go metadataHandler.K8sServerRun(m.stopCh) + go m.metadataHandler.K8sServerRun(m.stopCh) } func isEntity(resourceType string) bool { diff --git a/pkg/helper/log_helper.go b/pkg/helper/log_helper.go index 2edca93577..173ac58b17 100644 --- a/pkg/helper/log_helper.go +++ b/pkg/helper/log_helper.go @@ -290,7 +290,7 @@ func NewMetricLogStringVal(name string, t int64, value string, labels *MetricLab } func formatLabelKey(key string) string { - if !config.LogtailGlobalConfig.EnableSlsMetricsFormat { + if !config.LoongcollectorGlobalConfig.EnableSlsMetricsFormat { return key } var newKey []byte @@ -315,7 +315,7 @@ func formatLabelKey(key string) string { } func formatLabelValue(value string) string { - if !config.LogtailGlobalConfig.EnableSlsMetricsFormat { + if !config.LoongcollectorGlobalConfig.EnableSlsMetricsFormat { return value } var newValue []byte @@ -337,7 +337,7 @@ func formatLabelValue(value string) string { } func formatNewMetricName(name string) string { - if !config.LogtailGlobalConfig.EnableSlsMetricsFormat { + if !config.LoongcollectorGlobalConfig.EnableSlsMetricsFormat { return name } var newName []byte diff --git a/pkg/helper/log_helper_test.go b/pkg/helper/log_helper_test.go index b17b526a93..730d4561e0 100644 --- a/pkg/helper/log_helper_test.go +++ b/pkg/helper/log_helper_test.go @@ -56,7 +56,7 @@ func TestMetricLabels_Append(t *testing.T) { require.Equal(t, `Time:1691646109 Contents: Contents: Contents: Contents: Time_ns:945000000 `, log.String()) var ml2 MetricLabels - config.LogtailGlobalConfig.EnableSlsMetricsFormat = true + config.LoongcollectorGlobalConfig.EnableSlsMetricsFormat = true ml2.Append("key@", "val|") log = NewMetricLog("name@", 1691646109945, 1, &ml2) diff --git a/pkg/helper/self_metrics_agent_constants.go b/pkg/helper/self_metrics_agent_constants.go new file mode 100644 index 0000000000..fbff3634b9 --- /dev/null +++ b/pkg/helper/self_metrics_agent_constants.go @@ -0,0 +1,44 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +import ( + "github.com/alibaba/ilogtail/pkg/pipeline" +) + +////////////////////////////////////////////////////////////////////////// +// agent +////////////////////////////////////////////////////////////////////////// + +// metric keys +const ( + MetricAgentMemoryGo = "agent_go_memory_used_mb" + MetricAgentGoRoutinesTotal = "agent_go_routines_total" +) + +func GetCommonLabels(context pipeline.Context, pluginMeta *pipeline.PluginMeta) []pipeline.LabelPair { + labels := make([]pipeline.LabelPair, 0) + labels = append(labels, pipeline.LabelPair{Key: MetricLabelKeyProject, Value: context.GetProject()}) + labels = append(labels, pipeline.LabelPair{Key: MetricLabelKeyLogstore, Value: context.GetLogstore()}) + labels = append(labels, pipeline.LabelPair{Key: MetricLabelKeyPipelineName, Value: context.GetConfigName()}) + + if len(pluginMeta.PluginID) > 0 { + labels = append(labels, pipeline.LabelPair{Key: MetricLabelKeyPluginID, Value: pluginMeta.PluginID}) + } + if len(pluginMeta.PluginType) > 0 { + labels = append(labels, pipeline.LabelPair{Key: MetricLabelKeyPluginType, Value: pluginMeta.PluginType}) + } + return labels +} diff --git a/pkg/helper/self_metrics_plugin_constants.go b/pkg/helper/self_metrics_plugin_constants.go new file mode 100644 index 0000000000..dc481b92cb --- /dev/null +++ b/pkg/helper/self_metrics_plugin_constants.go @@ -0,0 +1,101 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +////////////////////////////////////////////////////////////////////////// +// plugin +////////////////////////////////////////////////////////////////////////// + +// label keys +const ( + MetricLabelKeyProject = "project" + MetricLabelKeyLogstore = "logstore" + MetricLabelKeyPipelineName = "pipeline_name" + MetricLabelKeyPluginType = "plugin_type" + MetricLabelKeyPluginID = "plugin_id" +) + +// metric keys +const ( + MetricPluginInEventsTotal = "plugin_in_events_total" + MetricPluginInEventGroupsTotal = "plugin_in_event_groups_total" + MetricPluginInSizeBytes = "plugin_in_size_bytes" + MetricPluginOutEventsTotal = "plugin_out_events_total" + MetricPluginOutEventGroupsTotal = "plugin_out_event_groups_total" + MetricPluginOutSizeBytes = "plugin_out_size_bytes" + MetricPluginTotalDelayMs = "plugin_total_delay_ms" + MetricPluginTotalProcessTimeMs = "plugin_total_process_time_ms" +) + +/********************************************************** +* input_canal +**********************************************************/ +const ( + MetricPluginBinlogRotate = "plugin_binlog_rotate" + MetricPluginBinlogSync = "plugin_binlog_sync" + MetricPluginBinlogDdl = "plugin_binlog_ddl" + MetricPluginBinlogRow = "plugin_binlog_row" + MetricPluginBinlogXgid = "plugin_binlog_xgid" + MetricPluginBinlogCheckpoint = "plugin_binlog_checkpoint" + MetricPluginBinlogFilename = "plugin_binlog_filename" + MetricPluginBinlogGtid = "plugin_binlog_gtid" +) + +/********************************************************** +* metric_container_info +* service_docker_stdout_v2 +**********************************************************/ +const ( + MetricPluginContainerTotal = "plugin_container_total" + MetricPluginAddContainerTotal = "plugin_add_container_total" + MetricPluginRemoveContainerTotal = "plugin_remove_container_total" + MetricPluginUpdateContainerTotal = "plugin_update_container_total" +) + +/********************************************************** +* service_mysql +* service_rdb +**********************************************************/ +const ( + MetricPluginCollectAvgCostTimeMs = "plugin_collect_avg_cost_time_ms" + MetricPluginCollectTotal = "plugin_collect_total" +) + +/********************************************************** +* service_k8s_meta +**********************************************************/ +const ( + MetricCollectEntityTotal = "plugin_collect_entity_total" + MetricCollectLinkTotal = "plugin_collect_link_total" +) + +/********************************************************** +* all processor(所有解析类的处理插件通用指标。Todo:目前统计还不全、不准确) +**********************************************************/ +const ( + MetricPluginDiscardedEventsTotal = "plugin_discarded_events_total" + MetricPluginOutFailedEventsTotal = "plugin_out_failed_events_total" + MetricPluginOutKeyNotFoundEventsTotal = "plugin_out_key_not_found_events_total" + MetricPluginOutSuccessfulEventsTotal = "plugin_out_successful_events_total" +) + +/********************************************************** +* processor_anchor +* processor_regex +* processor_string_replace +**********************************************************/ +const ( + PluginPairsPerLogTotal = "plugin_pairs_per_log_total" +) diff --git a/pkg/helper/self_metrics_runner_constants.go b/pkg/helper/self_metrics_runner_constants.go new file mode 100644 index 0000000000..b3df4e06ba --- /dev/null +++ b/pkg/helper/self_metrics_runner_constants.go @@ -0,0 +1,33 @@ +// Copyright 2024 iLogtail Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helper + +////////////////////////////////////////////////////////////////////////// +// runner +////////////////////////////////////////////////////////////////////////// + +/********************************************************** +* k8s meta +**********************************************************/ +const ( + MetricRunnerK8sMetaAddEventTotal = "runner_k8s_meta_add_event_total" + MetricRunnerK8sMetaUpdateEventTotal = "runner_k8s_meta_update_event_total" + MetricRunnerK8sMetaDeleteEventTotal = "runner_k8s_meta_delete_event_total" + MetricRunnerK8sMetaCacheSize = "runner_k8s_meta_cache_size" + MetricRunnerK8sMetaQueueSize = "runner_k8s_meta_queue_size" + MetricRunnerK8sMetaHTTPRequestTotal = "runner_k8s_meta_http_request_total" + MetricRunnerK8sMetaHTTPAvgDelayMs = "runner_k8s_meta_avg_delay_ms" + MetricRunnerK8sMetaHTTPMaxDelayMs = "runner_k8s_meta_max_delay_ms" +) diff --git a/pkg/helper/self_metrics_v2_imp.go b/pkg/helper/self_metrics_v2_imp.go index 03299709b2..b9803f61ab 100644 --- a/pkg/helper/self_metrics_v2_imp.go +++ b/pkg/helper/self_metrics_v2_imp.go @@ -47,6 +47,8 @@ func newMetric(metricType pipeline.SelfMetricType, metricSet pipeline.MetricSet, return newCumulativeCounter(metricSet, labelValues) case pipeline.AverageType: return newAverage(metricSet, labelValues) + case pipeline.MaxType: + return newMax(metricSet, labelValues) case pipeline.CounterType: return newDeltaCounter(metricSet, labelValues) case pipeline.GaugeType: @@ -232,6 +234,42 @@ func (a *averageImp) Export() map[string]string { return a.Series.Export(metricValue.Name, strconv.FormatFloat(metricValue.Value, 'f', 4, 64)) } +// maxImp is a metric to compute the max value of a series of values in the last window. +// if there is no value added in the last window, zero will be returned. +type maxImp struct { + sync.RWMutex + value float64 + Series +} + +func newMax(ms pipeline.MetricSet, labelValues []string) pipeline.GaugeMetric { + m := &maxImp{ + Series: newSeries(ms, labelValues), + } + return m +} + +func (m *maxImp) Set(f float64) { + m.Lock() + defer m.Unlock() + if f > m.value { + m.value = f + } +} + +func (m *maxImp) Collect() pipeline.MetricValue[float64] { + m.RLock() + defer m.RUnlock() + metric := pipeline.MetricValue[float64]{Name: m.Name(), Value: m.value} + m.value = 0 + return metric +} + +func (m *maxImp) Export() map[string]string { + metricValue := m.Collect() + return m.Series.Export(metricValue.Name, strconv.FormatFloat(metricValue.Value, 'f', 4, 64)) +} + // latencyImp is a metric to compute the average latency of a series of values in the last window. type latencyImp struct { sync.Mutex diff --git a/pkg/helper/self_metrics_vector_imp.go b/pkg/helper/self_metrics_vector_imp.go index f2f8571d9e..41d49a4592 100644 --- a/pkg/helper/self_metrics_vector_imp.go +++ b/pkg/helper/self_metrics_vector_imp.go @@ -39,6 +39,7 @@ func SetMetricVectorCacheFactory(factory func(pipeline.MetricSet) MetricVectorCa type ( CumulativeCounterMetricVector = pipeline.MetricVector[pipeline.CounterMetric] AverageMetricVector = pipeline.MetricVector[pipeline.CounterMetric] + MaxMetricVector = pipeline.MetricVector[pipeline.GaugeMetric] CounterMetricVector = pipeline.MetricVector[pipeline.CounterMetric] GaugeMetricVector = pipeline.MetricVector[pipeline.GaugeMetric] LatencyMetricVector = pipeline.MetricVector[pipeline.LatencyMetric] @@ -65,6 +66,12 @@ func NewAverageMetricVector(metricName string, constLabels map[string]string, la return NewMetricVector[pipeline.CounterMetric](metricName, pipeline.AverageType, constLabels, labelNames) } +// NewMaxMetricVector creates a new MaxMetricVector. +// Note that MetricVector doesn't expose Collect API by default. Plugins Developers should be careful to collect metrics manually. +func NewMaxMetricVector(metricName string, constLabels map[string]string, labelNames []string) MaxMetricVector { + return NewMetricVector[pipeline.GaugeMetric](metricName, pipeline.MaxType, constLabels, labelNames) +} + // NewGaugeMetricVector creates a new GaugeMetricVector. // Note that MetricVector doesn't expose Collect API by default. Plugins Developers should be careful to collect metrics manually. func NewGaugeMetricVector(metricName string, constLabels map[string]string, labelNames []string) GaugeMetricVector { @@ -176,6 +183,13 @@ func NewAverageMetricAndRegister(c *pipeline.MetricsRecord, n string, lables ... return mv.WithLabels() } +// NewMaxMetricAndRegister creates a new MaxMetric and register it's metricVector to the MetricsRecord. +func NewMaxMetricAndRegister(c *pipeline.MetricsRecord, n string, lables ...*protocol.Log_Content) pipeline.GaugeMetric { + mv := NewMaxMetricVector(n, convertLabels(lables), nil) + c.RegisterMetricCollector(mv.(pipeline.MetricCollector)) + return mv.WithLabels() +} + // NewGaugeMetricAndRegister creates a new GaugeMetric and register it's metricVector to the MetricsRecord. func NewGaugeMetricAndRegister(c *pipeline.MetricsRecord, n string, lables ...*protocol.Log_Content) pipeline.GaugeMetric { mv := NewGaugeMetricVector(n, convertLabels(lables), nil) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 4d4e13ed69..802502f41b 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -20,6 +20,7 @@ import ( "flag" "fmt" "io" + "path" "os" "path/filepath" @@ -30,6 +31,7 @@ import ( "time" "github.com/alibaba/ilogtail/pkg" + "github.com/alibaba/ilogtail/pkg/config" "github.com/alibaba/ilogtail/pkg/util" "github.com/cihub/seelog" @@ -40,7 +42,7 @@ const ( asyncPattern = ` - + %s %s @@ -52,7 +54,7 @@ const ( syncPattern = ` - + %s %s @@ -98,7 +100,7 @@ var ( closedCatchStdout bool ) -func Init() { +func InitLogger() { once.Do(func() { initNormalLogger() catchStandardOutput() @@ -107,6 +109,8 @@ func Init() { func InitTestLogger(options ...ConfigOption) { once.Do(func() { + config.LoongcollectorGlobalConfig.LoongcollectorLogDir = "./" + config.LoongcollectorGlobalConfig.LoongcollectorConfDir = "./" initTestLogger(options...) catchStandardOutput() }) @@ -119,7 +123,7 @@ func initNormalLogger() { for _, option := range defaultProductionOptions { option() } - setLogConf(util.GetCurrentBinaryPath() + "plugin_logger.xml") + setLogConf(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorConfDir, "plugin_logger.xml")) } // initTestLogger extracted from Init method for unit test. @@ -132,7 +136,7 @@ func initTestLogger(options ...ConfigOption) { for _, option := range options { option() } - setLogConf(util.GetCurrentBinaryPath() + "plugin_logger.xml") + setLogConf(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorConfDir, "plugin_logger.xml")) } func Debug(ctx context.Context, kvPairs ...interface{}) { @@ -262,7 +266,7 @@ func Flush() { func setLogConf(logConfig string) { if !retainFlag { - _ = os.Remove(util.GetCurrentBinaryPath() + "plugin_logger.xml") + _ = os.Remove(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorConfDir, "plugin_logger.xml")) } debugFlag = 0 logtailLogger = seelog.Disabled @@ -321,7 +325,7 @@ func generateDefaultConfig() string { if memoryReceiverFlag { memoryReceiverFlagStr = "" } - return fmt.Sprintf(template, levelFlag, util.GetCurrentBinaryPath(), consoleStr, memoryReceiverFlagStr) + return fmt.Sprintf(template, levelFlag, config.LoongcollectorGlobalConfig.LoongcollectorLogDir, consoleStr, memoryReceiverFlagStr) } // Close the logger and recover the stdout and stderr diff --git a/pkg/logger/logger_test.go b/pkg/logger/logger_test.go index 539b7f1b82..f3bcc9a0d9 100644 --- a/pkg/logger/logger_test.go +++ b/pkg/logger/logger_test.go @@ -19,6 +19,7 @@ import ( "flag" "fmt" "os" + "path" "regexp" "strings" "sync" @@ -29,6 +30,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/alibaba/ilogtail/pkg" + "github.com/alibaba/ilogtail/pkg/config" "github.com/alibaba/ilogtail/pkg/util" ) @@ -46,12 +48,12 @@ func init() { } func clean() { - _ = os.Remove(util.GetCurrentBinaryPath() + "plugin_logger.xml") - _ = os.Remove(util.GetCurrentBinaryPath() + "logtail_plugin.LOG") + _ = os.Remove(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorConfDir, "plugin_logger.xml")) + _ = os.Remove(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorLogDir, "go_plugin.LOG")) } func readLog(index int) string { - bytes, _ := os.ReadFile(util.GetCurrentBinaryPath() + "logtail_plugin.LOG") + bytes, _ := os.ReadFile(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorLogDir, "go_plugin.LOG")) logs := strings.Split(string(bytes), "\n") if index > len(logs)-1 { return "" @@ -106,26 +108,26 @@ func Test_generateDefaultConfig(t *testing.T) { }{ { name: "production", - want: fmt.Sprintf(template, "info", util.GetCurrentBinaryPath(), "", ""), + want: fmt.Sprintf(template, "info", config.LoongcollectorGlobalConfig.LoongcollectorLogDir, "", ""), flagSetter: func() {}, }, { name: "test-debug-level", - want: fmt.Sprintf(template, "debug", util.GetCurrentBinaryPath(), "", ""), + want: fmt.Sprintf(template, "debug", config.LoongcollectorGlobalConfig.LoongcollectorLogDir, "", ""), flagSetter: func() { flag.Set(FlagLevelName, "debug") }, }, { name: "test-wrong-level", - want: fmt.Sprintf(template, "info", util.GetCurrentBinaryPath(), "", ""), + want: fmt.Sprintf(template, "info", config.LoongcollectorGlobalConfig.LoongcollectorLogDir, "", ""), flagSetter: func() { flag.Set(FlagLevelName, "debug111") }, }, { name: "test-open-console", - want: fmt.Sprintf(template, "info", util.GetCurrentBinaryPath(), "", ""), + want: fmt.Sprintf(template, "info", config.LoongcollectorGlobalConfig.LoongcollectorLogDir, "", ""), flagSetter: func() { flag.Set(FlagConsoleName, "true") }, diff --git a/pkg/logtail/PluginAdapter.dll b/pkg/logtail/GoPluginAdapter.dll similarity index 100% rename from pkg/logtail/PluginAdapter.dll rename to pkg/logtail/GoPluginAdapter.dll diff --git a/pkg/logtail/libGoPluginAdapter.so b/pkg/logtail/libGoPluginAdapter.so new file mode 100755 index 0000000000000000000000000000000000000000..5c90a1d9aae716389fe8a173cd9be6067fe2e087 GIT binary patch literal 36000 zcmeHwdw5jUx%b{PnJ|zrGu#7$jug@LZI>1$&gGgl3Oz~T@|{UQL*sgx??@jF*J zpYdjmMvV!XCv!;pijC8hhKYi9`hl~v-?%^TTuw_rQI4hO6#Oavp$Qz9exh79qUx)8 zeEp3nK1ISkID`9A&H$34dWBrCkdwZh7~rID=A=S-q`y)TpX%oeKIs!Lxth0wO%xug zVB{G85;7c|^rhTt$cZ9whA;RP)u7ah^fKAyL6Lvyi{x3^rXmd^{#IPQR@wc7NA9V9 z_K8m)`Q^dO?|)|P?Qc_AAE_CKKV{dh0_TRg6Z89R%AMP@&ZRiA!A1DH z1b++gN3IBe#Vj=XQ!*N4p@o9}`rh47+`<5ie?5bq$_)G)Gw?r<0e?0F9?gJnfdQzGq`J#8 zlLn-)l;FFbHW%iYvU;chi z@E=L#XX)lBPBAciTL?S>f3lLJ^j^S$TLpep;A~JqnA#t?)zC9ZnXc>-`sMid0V{6~ zAAG=R8X*9dURzf4NO@VbR0E;5jmkOX-m$rRMcJhK!Pp1jEKI)6`~t*cXMZ(HBgUKHpmT1Y_@ zs&W2&B9{=^!9y@{W7kISMqiK8ys4)x=vSJ1{Jtjo^(%qyp0>_lGt~nz!6scjupUgj zu~|g5_=En=jY@M@x4%>IZ$|wz2mJmG3h?eWsA=pBwnHAGon7Acu0~&wa@O3$_yhjn z`cN~N+PeaNrNh_R((d_CllH7Xtcj>hgSN^{WPPRdwj0$pemoyd55 zfVhKQ?c_nf;%jX52LejRhNiY2hPC!6zV%?2q8&bJGGv67#zt>|3{gTv_5bDfljdJ5ullftRM>c7Z!n@FRlOQt(}Zu1Ucg1TB4; zPrDcodsFz23tZ|;sBn6lH6;TeU$0qn?`6Mp+ohked!QYUAOa9&zyhiZv zO2H-nU<%$X^PhrCenr?r=JQU$Uz&nDoV?zhDY#wCM>|q+VXs|YE=Si6Sv<&oA56d# z*WJ4la9X2F_hz(a(~ zEdfHFX`o+zEt|)K?02Yv6U0B02ifnT22K!u4iB>5BMqD&oUXy7i#G5O;d3Mi|9Ar@ z2p3x;3^zZ%@F@3HbR5_}SoD3!JsUSqq%Cz*!5NwZK^moVCDN3!JsU zSqq%C!2fIuXnpUQwXo^CS1F1%Fc>sNSE0OG-&5xOyxb#;-$2yJytnXcJToC{jWsR7Duds(cdSUwl-oljQ%#!v;`G&GWwfD=Me2+^c_Ug)<(?E=&ut^ zTN*JNqkD*^EvcBo=$nY9t&Q06GXUnTCz`e{Vn-N#9nrL95gTH3Ezz`95gTOmN}_3t zBDR~+lBAT`sVhZT!eEc*QSv_GTXut5m zU�u&n2($jHVyg_P&3Ww)dn_Qx9k_oC!{af;mEgdE`j5^}Nba`QBy!K=Mkcc&*mA z?95UGHT~^iF4TViaz%+wq$0GU+76gr0H1oJOjr7`n-Q5$?1XB7vB^_D(8PXM@pb=R)2F##f24GF+FR=c;9D{wUPs?MLjdN7d|r{MSb17YO;^NcjTD z2lER^6$L3r4+&KTq-y)9D&8)jZ~Uk}S)K^7G3l_7Hj{McsC2wMeWq@P(hWBIn|BPH z2wt)_vV3HX7HL0d*7Q?kA=oYYPZ**!)u{FDH*00Xp%1uO_wP@vUbNG-kww2z;Plgv zK_OZY8*k?2wliJ@}{j|)} z_ekH(x#0?1c+Kp>lLIf_c3oLC;8brmLvCES?s2=m2JIS|zNOFp7@dsi6}BfS!v`za zR&n&aQz*&asKHoaLsVI`rx5z|lj;Zh&FGPpeRfeMYIkUbC4<+;`(xqL2P@cqaLJ&$ zVe0V9vOZkvTx(r*Yu9L~h&7&Yky}48@M3Urq+&6%?#VCpAa$n)A+3Kfc$u}ZGQY-J=*e%e7S`o= zTMKvQ_d>>6_(1*+=0BXji}}CG-_86d@(1C2!t>s_45}mj`Nu&XM1g2})u7vkTFbXr zhcC$on(Gi~UiHAS;1ZAifflLDN1u2e~Yn}CZvLPSK|Fi~nw1LMH3Fez&OEVgC zzRjbb@I>14bE%N7vP)~DCr(p&o?vG4=H9Pk{nBdvL$`hisiW-3%ALpx zDsMFn##^5v_0+Oyghc}r`EXOd52E~GQ^^uAyjXHVJLqI5H5kedI%m_U8hh;oCZog= z1QSuoAm#+tN$t(35`#t+giM~Q{BYv_}h zw+>b?>1KUu{$q6483rBVh<*g4=}(so4jkKjwOfCyWYD9(@g%Lg^bev35QmzY5WNwB zzS6AV>s0xR_hFe5{R<_o8%1TFS90QD#cWm!A0pyW3Zbvt+1!VI3T4m!9Rh&3k&-7w z43ObDHk3mPU!e_*1ZP)~utzM(wCh-)G<{cO>S@atHV;ah% z?WCqaug{_Z4Z~SHUK0$fz`#<}3l2K7NOO38bP4GWUvCx>(F;k7n3)rEg!Y0&c!q09 z29+=oQ#I1s&*wW@h;Ty8$HdR_KUeb8+)g}fd`jgPqif%Ld76GpzB&5b-;hdVvlG|yVMRA`6BP?+GD@shm{GYqN(P5tr*Q-2W=O*P z`Oret#jUvmBcY46@U2Q@(Z5hrDtR$l|1lN)RwZhsz}}Mvy~<1$MD^~7&c`S-{0A7H z+q>_K!Ft!7kaMlAjC7v%M7%avovZKdGZ=76Mv&U4?(qET@G7%@%B8>9chKO`54rk& zcSg!<;qH@Um59^qF8lDd`JS-zc;8Wjrgvjj!I?(UTbdNAtV)H?NbA83FkGR&k^)sTTx4w5Tlwj!dpnqBWZzYZT z$*PhO-kx0HnpRA(4eqeByYHx~MY=IWE`2#(UM*}3m*=rjJ6vmzXlN|u;K+Sucsy%g zTHhe9Cnvkh{_L@OKBWP(Y;Wi&E$YiY2~N=Z4w#3Z;N!T{T#bJHEsUT0aJ3Zu_#~< zq!=p&l=MEvg7w=$HG0p7l-OTr_(qZ#D+{78LGnOWo>Dzj#7G zzS|lw+M1d$E|H^owmVo|6<*g`1zS~?92*_7-QnfkT6k)d`q{obci$}s>`Ksr<(=8u zkK2XNy7ys$JctH=a(w8B*7vmPD(h?wJyRLJAzyQcJM*drUI{L;?pcfCM0-Ci4w(@~ zHB_l#%<{w23Ciw^sTz7E)L8e}2A&OO^}iT;2ZMgpf^vDnP5A}Y18-UT^RPs&=WeNSaY?jAXF=FBHAod3J6Z}h?i-_c+2gm1|&*c-Ebvah!n+?6l5Bc6PN$6EQi z{<2ef+1+>8SY7y;J90~wA=+>pCL%lUh5FzWH)ela>fnm)VD!7ppnH2y6OW@fpAVRddI5C0^@Fb7)7{qoj{zd|HS0hwLRBTt_TFr`+1mdN zY%5@G=F$I9rJtr{l1(d&YK6nD!l$Eeen9dEB9%r}f2BOKw4h2q5xosyyj?(h^fySw z1Iydok)`K(^p8C1#~%H8k9sQlBZ|?Fdh~Z__*)rS>WJQmMaLxZgUUvovoqyZfFtSD_y7 zdKpU(DUVgxlQe(#?H`YLExHtGxXKRS{;q6F)`2Zxk3Gy}oG?{8);BbcYp7{k^kFuW zTl;T93+IF8r!>e>ue%tg3se8w{Q zT9u#A`u$_H149e!RWxttrll0CAAsV}ThWU!W@5-V7i->!v6BK-e=HUgdJPY)LvTVvJT7u#Th=J(Sr=IfEW~4*>an;|C_Ls zw7}1hZRECvO_CS{sL_^UN%vJex{!WJ7tKKy(SSSy&xWj8xH7-K4|1xeR_1T!^Yq0? zlqIh{NhYT}X}ZG|{;Dl%rTn0HrN2elf2ik0e}LL^Vc3yNz=0P-)v*nzt@!wi6wL1M zx^AXh3tz$(tP?zCA;sGN6);90r=EHc`Votx?*{&wf_P{xvsvaZPD8xjI`NRs`e!5p!FEC8@`dRQqT(XqV?cnCDQZ4H~uZ^P9#r8F9)gT zmAo=+p?QE^ob{d3f}I#E&&62zK1;2Vre7Oz{>2maoAlk=(+_Ep>oIaWv5AAuiDl39;WF93X-z}}3Km)Ozkb%; zN0~ye@NWGlG;I`TppbPKljUOM0(&OtwTthid5?CiqDO`?Fx&$Ux?YZKHblR5R1E0* zW3;+_@))%Mr1c3HkyhwC(GCigP6~dL<_Q`DJh;-Q{&gn09#%rr{}C>OktGkd#UhJm z#?uN?S{JjlddVi9$Ohw+wAjD^7JV1#y7i}6SGyj22AEU`KF;91bj{gSJ@70hp=VHw zYOIV_vik31Kg05lTYou?(^P#EFv4#wh1wrl2hL!pgGXN^qpw6RD#K_a&szHzB222$ zY1x!t8r_XShYOs*Si$tgprb0hyfv)gqQlz14(h2lY2jJRkces>_#+rFjNORG5o9M6 zlb6#SS)NDNWDXjiX=JG3%D*R-KMnKp0rqi(`c4|H1EZ#Z)ZhrEyV?4^u_3?5f4zVcypx2%F<;!6Vcf+@mW}WU`V4` zmS@W#V=VEQ**qqSA&Xgq+5?xdm%Jidw?}Wvw+~N~`?qMT=uzXX{eMEK(T88NBav5w z?{ZcDB2_JhD*Z2e-AID+bi{k0*}7g;X>7pxM`CPb7pAj@DjeB#l??XUvbi1_CZjN3kS{7Y@T4~ z^^!pm8+t?Vqde&QdAv{q^D%YPg33-qx08yFA3Mu6A3bltHPfT$nk;iaLY`^b_$rO2 ztSW=~4&Ht)Sx9SN+P-n{mel`O%u8Bi+W(@d#zWIHpJZRbD1^S6h1!YTgL#vu9+`Lp z@zn{wDxDUXk!81mIr?8H42DVl9h&u?qr9M!os^-0HOT((_u2K&NXfCPlDA?%$7mEE z&-z{HQL#;6DjAHuj{Lx*EoFGeh&sW>5O?IZyy&lxALQ9u&OMoEpJUnD+ z9P$~x5H&_CR80C;)ADfRJ4CbHo-5gs9F0k%8fHuL0YA;B1Fu*I8WEs{FxJ4+*(gpO z1xyQWz?w2vh1j?t0tKWY#upqIUt_Z{JMso~p~zc*Gc_m$vzT>YM||uy%UG9)&9x5n z#AA%a&#fbgwsQW|;5we=}_o`4p8uC1Rb$|Gv%J`Ar}`6z#1 zsS@l7Z_c9&wqD+BZpX-kap*SktlH4qB(2NBc0f{54y;N|(7CiC}AK)Be8 z-GoYag*Ud+)y@^LTY~5m5^dwj^^yNm4QBKGi#5(6TvD9U!dLaZqejog9MrqYY#iPHvF=$s3G=}kk_pY!!plx!2N5#F zeH?Ri@5B!Q7`5=Uk7&;`Z2FiyT*YBtF3$t~CZh*~OEkU4hRvwo&m}gt8AZ{*BJzsT zU~Y8%Tc|$1ABBwuIe&v?E3xP`=3#7_DB;VnM7|vR*L>SPyrLC-;yk|eq;K#Hq6=VrEU6C;AC}{9c)=v3hgKzYUZ{oF;(~mlk@V=( za+&%UgISus5?egg=HY+l*XfaI-@^#?TUxM0|BTi#Fo?`NkFCkjld-R%4yYN?Sp~AP zpW4(}_dn7C*QY#|b)#KATiaI`IUXy`wGA;R`mlDviF*&aR-HJuT;>XZS*$ zjqE|YK&Yd`*Rw^bqDS|XYf$*+_O4A_b4_j4a(BIVORZeRy=Q$L{O2vquPJ1+fS4x;EP5 z{5+bH6pt{*OK4wUC%ZKU{Z00H0T`@kQJ~P?f-=Xc3H*V+^!jAU?evTza>TOmIq5Td zB{8FD@qAJag#{Y%v}Dk4N83Q=x_Y*d;?L20HPw=x9$>UL`GP(>JxnRh@p*Dxs5y~! zX2#8U>Xi)3vxM|X*`~H&E3`MYH87&}@cEQ()< zd=7uLzo*4NT06~MJzR_>^f_|S-2qWv1L`@(E9mGN%Lb+9FlH!(YhJ^=hXqsve7ZBI z8-5*p=U+!gx)Hw{K0QC~I5INwNBG_F-yniKOg=eR)o z3BLjUqwss-AAvsv-;4!`f-9c__zw6Q{3G!D;oD;)BM-puhCc+~hH1@=at~o;`W^TR zR=d05?|?r8nhCB&%8P^;v>Lqv28c3u;gtsUTv}WW!-Mcb6ITV7Fw4r!e2N3a$$eE z6jsVDwuoVsC2za2(qiA1wZc-+XIgD>7^*D=E{ok|$pZsKDUT20uMTo_t*flD*w#TK z&E13ey9M}F@PX%DW3gXNiBaAU<8K$@R*uG1S_)QJ9N-|GSY;|hDLUzdVJgah61aRW zkFWw@Q_-H-K|k@}Zz-_jun$Zfci0G1>VQ83dG_33mBqHrP-)5QGkPrcdS(rm#q8RD zfQyiw`@w$*{4RoEt!;*Dh#xk&N%Av0KMXzvZPgR!gNgc#%Pn^A1j)DpI}l{&L*UH^ z@4a!}3Zc_*Q=GXnrxM``iw!x_a+IaurT2Vnhn%0}VE(%d<(9mNvD{+cp5?L>Y%{qm zj=pR|^#n@+@`pHHMJ^=E>Z1noPe8t3$Zt1no?tOA&%vge(vCRlV`WqhR`-zKZbVgY z%UWqUt{V1Cu-IWLuCgM>3ASC3S&II37s5P0U=aC1Wo|Q7Qhrug1`P%n!BN3lPCQ#$ z4%OXT&~Ybp%q1OgRL+yY9su?R0Z4GW0ruHOHt)--v^WiqBP*~jZ@ZN_t1NcJi%1^$K_VDIBEymX>358 zUBqoOXrg|vmetGp<*$VPie$bO7JGM`Z#Lo%gKrS}E+W24i)}4(xFTmcOON6|Lp+UD zGJYMaDIQ;n_}N()TM++ql255Gw1qy{-f7s*TSC0Eq9u^-ddSvc+;fsFvq3wwSLBc- zXw2A*_#om}i1=*=*_MOx=2YatfY3clG+W+hham)Xo0Tv+YRlWQU6#_maVsoCs`1xZ6D*~`pcCyq z-iWHGZRg{UT=F;xslIXBvbUQeS$7$~g(gLJ1+n+|JoD$#ae?Zc?(kfM?QAN$vYOfp zjbMD7qOn^;99=sr8pRnpVXT#uF7+E4(;r3re^C4i8XuUgP~SJn`mWl4Ktt0tZlgS$ zb!RQ`|Jnlb{SWee5AyvEsqb^3seqhQ1SGvGg}yF@_KJos`RQF1f_I9+eN`8j_I8_8gs}U z5{j{b%A8E^%m~kap5Px5w6rs}kXZSCA~KGpB%|jDJ^#OX@;!E``Cek@$a3MY6MnPs zHwu57@b3}+kA=TS_y>glvhd##{t4k{pRX`CUHBIYe~Ivy3xA#Pn}xqo_}hekkMMsi z{5`@yApDnw|Bmoa2tRwSNMHDz8vQ-8o`>c8JQ@UT7ruP&$Sxn}YY@KFU~lB1mVs+q zEAM;v>sS8r@j*fi99HD}P6Ro|lkyJ+-dXjHm5+U#DlgxEGDc7UQvUVt{V_NAvC?$3 zZ|eezlKo1)59N#G_l)yTKWTY=cdER6Kgt)$zwqbL@F%g?N<@k4*6 zwfIr8vrKr=l`_t3Ita>eHxwGqx3(f|Gt}j!0giuG||x@%<>#%9e+yJ_L=QqFnj}e zR^%KCk2g=a1#yQ=C(M@s%^3&G^vO78iyW#=(`Vz!MCP0ih&as%Th86!HJxEZUe0~6 zxM_qD_FQ@|p-E90Rgl{V-6oY&j@EO z9|m=A?juAwIJGPHF``O1^o0Urq%JM_}7|>XzD3h;&=(uxceL|uOh~-?yGWlx4&tv#h3&QH; z$uy0OH_G=KPSv46osx3{Lyt7VgPVE-bf{A&K8o;o;~|N*kcMfKsDq9-4heMH2gLgo z*6CBa9P_#-pTSV!qp zR}l6S{5?PzZA+-t>qwT=oFMF4mg0KiqvF#}YTmJYP;fKVwZ@OtwKLT^cy%+?)$mr& zRBPeY&Q$B+A-U=0;8d5fJogI0l}sG)P7gB4^StbQN_Q9Ze;fp|lC5Bv z;cjKtCMtd~xpT^MtXo zFJ^ofqK(+`$IHHiVJLfEQ$7UXzC@LdmTnRZs}{1-$*qx6t_G#+q0-S+fm)bPQ{iT$ z+JnEmImt=kjZouJK=s`0-V3-e+Z86kol`N$&v=J|=ax`Udk*qX;cv$MR5#~cO#Q4B zEyzKw=po9D{SxYB{|b61VLw6f)$<+H$NmBOWBkqDCz9C-jaDUx1+u28?kOq|lS!e7?0iIAnPWr_#)+T>^XP>v zay&`lqI8OCwB+P)Z6upBF+q7LIC2GGqSOeOIGQ|0R8^#DJs)a|5pA0S1xs;pJ!uiw z#00OJNzh~gv7B>g%K0$E)MVU{hc}lMDOb%LEhd%SnfK4-P%^zpO&deGpJ7VKyC8?G%NPoVymz4u z7*U023F;}!MEp|K)2dtj{1fEHt{#8l;W2+1XiqEN1G+KT)zyyIn=U9T?&<0Z7KZ{o z^kUFryl}L5bLkTAl0^%8{7tRC-~t@7hBgGy+sHStTMer@txd@uFmzicT z|9;bS`0D+3dy%O$`xAtrrW9(HvERGd@2aJyQs`gBD*9ST{Dw(*4vAUOeNVK zlWB@+F2(%7RPq(bJ!#6%R*Ot(U%hG4?I!hlWa(bwIfwI{%X#Kxt2+{S3bKt8iipd? zxe7SfdD*IYsY&gJs@qKJ4W^Fljl?vIGj(vL*~!Vx&i<~+)Mrw&rx>!)z`WkB=4LNm zNbA1FORwAYb_aVDZ)2;kty2l$&BTZjbt&Hd>kYKx1-A6E-0t;j=+(tZ(Rdv&X_)x~&MGH9@S*V8Qr?OiPa98UXJ`L`&a-=x#t6p`^g$#~^*i=F+@)l>KS@(`Uo z(;J%c^4>0`vDM$WfnIMMFDBlXjGGrao7-Bh_I2Pz&CTt;mO%WC$Vsn1Cb>~oR%Tfy z(&B9l^>`b*+Cv>s9~f=0inpu1iL8sxr?l{1GWu3#(t!IRHXxJK-ti)0@fzbmtFNa? z^95R2LII?(u7q;H3K!}Ow6%2Nij_)9D(L-sv&7@u8Qi-o}_{9 zGvSf}-$p;LAyJ)47EtJY$s2@`P{SQv7#^f@%70ggXGhd~Tc=m)_VxHW0vDI@t(so&&>jzqLvlt+f46+MnhS$ zD6u25{z8gb-7wSPjnqvg;Dv`;An#J{E#lr{#T)7j1^kHRy^nW3yhuFM&1*Mluokzr z(3dFe74zqpT=s>VBlZOy!BD4vS&JW5Xln%N>uGFVhIwScl126fE%@KkwBEkpnuYcS z$iXta<{MfUpl#DlJpKjX4uA~eUXTU2^TgldYw?4waaW`mOZHXXZkQ<=`RmLvAHogN zya;z#_^Ps225IwvT$X}Mjr%2aa+Hxrr~Z!BSeH&?T^fz-G&faWx;(BuE%PuOQ*5!{;?V})b&`6tJ7&*oknAZ zx*n^sHl4=WG#WG1^;nJd=`_}-(U_sG$7;MboyKd^Xv|R8$9c^uxuaDmgR_$AT2gfH z6S<6a&2Q@>mY>oO99U)W7aRe!S_a>XLpRFcF#(NN;#+D08RK;eHB=Ocg1zo%HX?kXpIaesFhoV z>D7*+7sa)#mBGbvXtfMB#GxBx&?lfTQ44}I{*rFgf;8(*2!Q(TAyDF zG9|HjL=8y>Z;Y#5BZCQQzpS}r0@61kiBeqDraJnHMnhWM7(dTXlrds{l;{4*xJi-c z=gD}sBG28E@o|bgzfZ=^iah5}#&N1|Pegdwh`SB!Y4M!+_wEvEGn>cRf zp7K5Tq~AW5115oElP|3uXqF;9$3^|h`*?mYaCv`$#A!4pew#>FwtxxHPq-rmw+p-` z1uqqNcM47?Eu=@D%gb_Y08ZtS_f_SJa@{9zd4H8G@9&wO>B^DyoKcuni3)sI3Vuf5 z4Jr6^1LX((A_b>2Ou~=&lJ&acDsRnPQBdLCEAZYF{HFq! z_m@ejy}-$yvj3u(%ndPok|Mu1B>6uPdgOg;5;tOJQ^j3g@_R-S&y@eUjNhvKQ66|n z0r=_4>i|EM_xL58afRStD)8L`cM7~l@?Xq>Ndj*Mjt%%RfIz4Ere9Rjxt{?h{A zA<8AM1tw(i_O`oGFbZ!v!^h4G7XYXHH!S0f{~`E`1zsc4tq^!6(=$Uk66Ang;5CAO zC?)?*5?{(0BZ9wM;JXAa`&}>7KV{53MaRaIg>0ht49=_4;X5<%uS3J8@9CwU9MglR z_C&=Z{A30_FJ!>q1fK5ud+#&;vEMWNo8Xt{<8mCEhyf~Hxz5jk)3pct&S5^!xa>}g zGVo6p{HbTI^qfpOJ2U~Oa>*sA%;&}o`kg|5>KXh?$X>edF>cSG=LZ?^Ujk2;&qsuQ zd0^rLO^(h*WuN5;|BW*JIb*&Xc`8G?whZkpKLbu@uj%UX??|5Pka{Xir~T=^KY3jS zJ)IfwJ2K$k&453Y0Y8`le+4+T2YKxv+rxVdPqeTIk4tZdsTuGBhQmH$Jg4Jja+@;L z%Odboes(l*KAE580+&~s62C4(x*Zwt$rn}NTO_4hQ_7wij)MO_N}kfPY^WoHcu z0G^#~!qE(U_mQ1Yahg60>22=x(({Hq65Gl2u}K^};aia{9K&9H7e1d!vLd>uEzsjD zaui`BK3L@7QB7Umrcg(>0(Cexp@SnPA>bk$?%MwS7athqzI1@D@pv7g1_W* z_*vrKq9jf7gh6~Mvnb_&M~DbCrF34&on(tRnsPuTRAs20xb?jm>#wmV7zm>BiyD>D zkGXn-9bSAAl%0}!(HvUZyVm>Ky-hg640wH^%{UkA=x)b{N1KW+yL90a)JqyB_L*3( zucybi#fxWQ@eFh`j>i1dPJN)xKI%37(2o`CgxeND1?qk}P78P64ohrHuW;+o~l*HqPc>s;kH ze3Y#0GpdPsN=QDTGY5SwUL47`;LEClnSXNC>&1t2Oy=^m3^o+8G?A? zSAf~~Ws^B5m1IODNuRZ44IzHemDGUgyo-NgHd&f|%{Ce0-?L4|KJOdE^4r0@8&N{? zlex({Xh2Oy_@{4^F_w~iyFl`{j+4GI%zNTb|e@HkPOZ{$eaxDLBa56UL#GjwYCNuF51SezsT7n|IR{HN79O>JeUE>mr0DJm8HR;q5X%?7==#6-JqNN_ zO5s|RJGrkceQ{YMVrg$6*%g7r>w)NgGUggYK>8009GA_^$#tM0+4tYE+Ki`f6O7V7 zA`&DImo|!&m-|{yU?d{bm;2e$m(Q(%g*noZ=rj=Y44IUd``*%T70fdKl27_;5l_#g zNm}lYOWz^nQ}ZuiwtYu&QeN((OJ6<*C(}QcR{HUR2_d;pDgE10<>v}{;s1jgXo8b+66!}FG5y(B z@)+sUK60W<&HpzMN%9LNlT!qQPy0iOPTHSj$bB1s6qfZT&ljYBiDV>ZxYYD_0ZWvR zpF>m;H$7p(efcZXm-J5%u;5Q-Q$813U(GpE?Iq=qFs!tjl$Yf?1lh#&<$21kYglqh1NY*;QeOJ6 zA|_EjE>OcoY!sQ`q@48ML0qD|JfE?z=0bGWf1qZ?f$m7oMIeP#{u>bvJFNf! literal 0 HcmV?d00001 diff --git a/pkg/logtail/libPluginAdapter.so b/pkg/logtail/libPluginAdapter.so deleted file mode 100755 index 823180a8bdfae00a1ebbdcabc75a7a659a0fe6fb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20768 zcmeHP3vgW3c|LbnS6XRTZ(A!_*j^JmwS%=Q$q!^ec=a-?Ml!}PObWSP?OsVcUhVGe z!`LtsHzoyDxV6&`F@vWykV!~LW=NY!UFgFEhF8-mkaqA;n(?Hh6@*F5qXGKx_WRFc zb??<`Af4$w&^Kr@# z2ZVx?jtQ74s_brp9o6&GCKdMGvb?3@a!l4=Q5jy|zGWzky6J|U``G02!N<)z541k{ zzI!w$J>tL|xH_uXIBKv*q)_4qBj`H!~z%ZER_ z?*0$%VUP8N-+1Ybwx|B$#Jdeoe*YhS{PvH2(eV5K_)X7yg}>^4``))7z2oU8{_@VZ zf4(1PWfRpiP)2ga5_sPd_-^1=mhtldSD zCGi@mKPdGR5??3r&Q<~3Eb;e1$$y>Hr}j{yer}-%s}AQVW1FO(tn}lRcpKOEv11aK zFpDD7%-UFASRiVg-y!j^1HS;A>T_JS>t12T&VrJkU10$zKle*K;=q|Dhu_IA(5KSiTFxk{V zJeq;^Xl_4%_}=s$GZo)aNRfRaX(amDrh5ZYmZta>Vrz9xLHCpFr6SW21vZMsKh;xP^t%nS3ZI+8c_S znOq3AqIL#kz{vZOsZj8SdqY>b+OaHD5>MM8ZS+9T-_&+#|nR0IymXT z6@PBK@JGi=>6!!QEqJ3);oBE1I32@Odf9@jI+N^z1(&BVZgSCrt7A3^<}J9~b-Df} z3(n7z9A)ey3vS&-+rvVEj;Gdk1uVF=-1QdxS2=HpQYza@1S%1zM4%FZN(3qq_;rpz z;_ooG$u__Lm8M5~HhegaW(-J94pt_~wd{4@#Qx>!V7_d4-3 z<#_95&c8}LO_km{4?c3D|3v_i6Z#K`Z2Ks`9L4RVJc_F=UTj=_47%(YNumEB*WdCL zA{#D8W-mpa{`vOE(;vAb+H;XtF6WoQ!Aj}CQ@l9YxO#ZO{#gGl5V1mNU*vfI0QW1SSxfz3ZdMY~fO5cHcY-Jr8%0 zIs6yEjbDn)?fYmHwL7up&l{+i=g(0?=Ktd|T0N)7xbi;OH-R$+2T#PM1vd}w3$9)$7~bcF?4qq2O}x=_ z`lDP(y;P_2&$w?1S%1zM4%FZN(3qqs6?O=fl34_5vWAq|0Dvx zWqY!7TR=TB3rrSLfpr=2U&n0l;og!2nL4I*y049(cJvJ9jb9XtanM&T6^rytZ~ME& z;(k!R-LnkMdIl3^X+Tu4|~i+LeGOA*cGy zLC&8^mgHY5$?t*uJ;?bpR3*=GG57y9$m`G!{!CWMUGb9rvyktBJOjE={-KimO~8*r z&Y#_u?01*sRcP<)kn?AcS z74vLZ0Md*_q+DG`())0f{DQC-B%tueM9*?+Oist{)gaPx4^S&P2)83ZV%lV;OY+{qw@_n zT*&d&A~p09J5wyxvgUF#d4<;Y9ea^JM7;#2ruP+_jkFmd%X z)`Pe`trP+(eXHG(y{61XyPEqI{~)w#JT)^!G|viDsm4`9%OGm1YO)-xzJi1LAQ6v# z8g{&Us_yTg!-nhYz^hJlQohzx_XuPc^j~^50`>j~y84GTJQiN+P5r-WDMTLcQ^fo? zXPUfEKv%!anRags5!Z{H3HVOHsm?Uct@q8sx2_4U+c!ZjT!QQKxk%G3xUg>nY3hQD z_%@QuYQgRD{WXkmk!NS0D9avMqXBGjb8?LpYJNN`>fV@ zH|R;<$7C1MYUjY6_R*l$y9IaFS3_Kn;2!fe5Z5cXCw$9^+oaV|kDv2>flAmS>UrLG z1<9_~Jah2=#syg`Yw3h(P3MXa$#jfVR&kY<6@;(m__Eib zuC>r>9jw;f7Y0FEb`u;N5X^wrNs~0@@CpWYu&rT z9AwL{BM&$8o!+@BLOav;0UDx>bE$Lfg?6S6r!)Tx@S}~drelQb)U^M7&W5Oku2XS8 z4H|8HNU#U}f56%OLivFIZti_TNcXa~K{D>=r8!rOXlK4xDy@Hwt+eYqRB3dY)wWCN z0sl9s!C@&q=zo~J4|LKF>^il}e=V_tQaa{;h?IvU%lr>eO^2l`kAELYN2IigwS9(+ zyLnBWYgM!}VVn&`T)ORX>G9iY+AF29rkkWx)^u}+t)^R~RMvE>WEpF_np(Yq*TuOG zMmv+Gy3jdV+pv~S5a=V;zMdMls@l0qMLY9%q)o+Yt9odpQIE!`>8n3SL(FyRpudT1 z0~={83%2pt6oTfouM1b?!=mrKFt^Zv7$U_HnAUe@MCIe*CuzKL;Rc z*48z&dfg434b56hO%rA^t~TawxW;>}cbj(%*!)X%eB!~4T!EY5>&5+;Cinc z2kaV|ZNM@6vs_q9R=APfs<|7yUg3>wz4aF7ebDepK&R3Os9z`^d#@(TY+M7kA&8q= z;ov$<7@Ic>Puy^O2SF_o;x!jg>l#F5nH_hvTT^m8|ty8&Y)!#TIlw4_E0VpjhVSnLN0kR zrV{z-!bB*R&YGbNUk)Ue#AK(j8s>05olfS=eAl*6Hl5Cg3b`yTj|pL=P3TD9R%7et zuB;iKj^?|PiB#c8Xj>?eiX{tilW|qageDUCoDIQ?W@5#S?Ey|OmdQ{>%gUxnf~7xk zb22zhr~drWjG03g0~u9Hq>;rShDIrSG-qJdQWEQ{gc{T1tk$K*w7X5yeV%@Om8+{& z_rIpMy{2og>3stL3jjBButVWJ*Xvh4rR#sJH{YVKJ*;qHm523& ztG9*l_3pZ$-cXXPKCGu)+geF7)2*-YoRSt-9M*4gt>qS1Y|?$7*R@+*F7%jTq$ej0 ztP~r)(a4(7xRJ?cnGu_gCQ>Yibzcz47|AtNMs7Nt%}*32S!QAnEex}XvB-)%^%^}` ziAD>#uo4YC)QyQ~&Sc5-R4$MH|Gas0sij@eNlgX#hUTT2iNV6;q=`yRCX!|<&0^DL z><}#}D^(c_&sZ#-noLaXjLu+5+hj62l`E}Fvn>!Ky9HHVe^o#7V#EqrBbH7UX5c=z z(5cKwC*u?;3{W;DM$*DkHu5kN%^pG}X-pt+)6r}^63tEX0&?)YzlTcZjVh#aiK!H> z0jPCkx?^-~#K8WLqK8e3)eFU%Z}}!o`4-+yv|tosb?%;)f9O*?qUs>#T(hl zvUpQz-Kf=@8!M32eJVTN!qhrZJKoC5-6wJ5Z3VTS)UJ=K&#)B{c{kQQcbCQ0-6^|` zn_>P^Cb=0VL1mJ=bQjJpbhA!7AUQXGo53!^3;}b1cZ+rulRyA2tmi)H=wHkyIeyZC zuaUUAuc`c9jf`fA1C>W<2YyaXLq2TWYPX5DTA*6dnlpd6RoY&5DWm@h# z0Z#)Da7+8PoQ_1v1G-<>!rOX-8!V%2vW->`L zZ^naHZ|dDzUWtDc!H8zF(W8c$%4d(VNlZdaBVL%9ISLaC$G@UrQ(baWVHi7h4~&l( zBRhxb^9u%MWX4EDf|23f%(!9f#=(Ix8GjOl! zbvU$Po~J=cX~yXTkW<6f6bBB@~w&mLHX{G6rN>GZ`fVnad>e zEXcpt5zNzUFUWf{m`%r{`6vsT({e~o$6>2DVIjt*uuyrjq9WjcS0_4?h@n1dIL0Q( zOPk2$SP(nQ40Zr0nG=O6m`77n*s~;?NKKNx5KT;E&BKbL3lo(`Y!>hA{|ki9y!5OI zhXbC}yid`!5?91IoeM(nOFTSMz^G$CRR^&hpO!x7w@g1Vz`9(2|_x(~~2b z>?3KU2M~23p zvRCs;^*ytI46KTqDp!qz3$V48uda8_g+vJvK}uU?ujoHJ>{(cRE?@gJm0}y)%9nt`YV})xeRJQ1(`; z_FZc|56JHrwaWwJDhTOZr}|Hwf9Uw4`rc}v84=tOhjUe~(rsSCe*OlbLDxgp 0 { - labels = append(labels, LabelPair{Key: "plugin_id", Value: pluginMeta.PluginID}) - } - if len(pluginMeta.NodeID) > 0 { - labels = append(labels, LabelPair{Key: "node_id", Value: pluginMeta.NodeID}) - } - if len(pluginMeta.ChildNodeID) > 0 { - labels = append(labels, LabelPair{Key: "child_node_id", Value: pluginMeta.ChildNodeID}) - } - if len(pluginMeta.PluginType) > 0 { - labels = append(labels, LabelPair{Key: "plugin_name", Value: pluginMeta.PluginType}) - } - return labels -} - // Context for plugin type Context interface { InitContext(project, logstore, configName string) diff --git a/pkg/pipeline/plugin.go b/pkg/pipeline/plugin.go index ef0509a750..a2ddc4e721 100644 --- a/pkg/pipeline/plugin.go +++ b/pkg/pipeline/plugin.go @@ -31,8 +31,6 @@ type PluginContext struct { type PluginMeta struct { PluginID string - NodeID string - ChildNodeID string PluginType string PluginTypeWithID string } diff --git a/pkg/pipeline/self_metrics.go b/pkg/pipeline/self_metrics.go index b63d49f86e..ef84b046d5 100644 --- a/pkg/pipeline/self_metrics.go +++ b/pkg/pipeline/self_metrics.go @@ -21,6 +21,7 @@ const ( CounterType // counter in the last window. CumulativeCounterType // cumulative counter. AverageType // average value in the last window. + MaxType // max value in the last window. LatencyType // average latency in the last window. StringType // string value. GaugeType // gauge value in the last window. diff --git a/pkg/protocol/decoder/influxdb/decoder_test.go b/pkg/protocol/decoder/influxdb/decoder_test.go index 777a072d4b..ccf910a5d9 100644 --- a/pkg/protocol/decoder/influxdb/decoder_test.go +++ b/pkg/protocol/decoder/influxdb/decoder_test.go @@ -141,7 +141,7 @@ func TestFieldsExtend(t *testing.T) { for _, testCase := range cases { decoder := &Decoder{FieldsExtend: testCase.enableFieldsExtend} - config.LogtailGlobalConfig.EnableSlsMetricsFormat = testCase.enableSlsMetricsFormat + config.LoongcollectorGlobalConfig.EnableSlsMetricsFormat = testCase.enableSlsMetricsFormat logs, err := decoder.Decode([]byte(txtWithDotNames), &http.Request{}, nil) if testCase.wantErr { assert.NotNil(t, err) diff --git a/pkg/protocol/decoder/opentelemetry/decoder_test.go b/pkg/protocol/decoder/opentelemetry/decoder_test.go index 292795386c..ad74ca012f 100644 --- a/pkg/protocol/decoder/opentelemetry/decoder_test.go +++ b/pkg/protocol/decoder/opentelemetry/decoder_test.go @@ -208,7 +208,7 @@ func TestDecoder_Decode_MetricsUntyped(t *testing.T) { } func TestDecoder_Decode_MetricsAll(t *testing.T) { - config.LogtailGlobalConfig.EnableSlsMetricsFormat = true + config.LoongcollectorGlobalConfig.EnableSlsMetricsFormat = true type args struct { md func() pmetric.Metrics } diff --git a/pkg/util/util.go b/pkg/util/util.go index dbdeeb1184..d3484703c1 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -230,15 +230,6 @@ func CutString(val string, maxLen int) string { return val[0:maxLen] } -func GetCurrentBinaryPath() string { - ex, err := os.Executable() - if err != nil { - return "./" - } - exPath := filepath.Dir(ex) - return exPath + "/" -} - func PathExists(path string) (bool, error) { _, err := os.Stat(path) if err == nil { diff --git a/plugin_main/plugin_export.go b/plugin_main/plugin_export.go index 66350d8973..9edb4b7eb8 100644 --- a/plugin_main/plugin_export.go +++ b/plugin_main/plugin_export.go @@ -94,7 +94,6 @@ import "C" //nolint:typecheck var initOnce sync.Once var loadOnce sync.Once -var started bool //export InitPluginBase func InitPluginBase() int { @@ -111,33 +110,30 @@ func LoadGlobalConfig(jsonStr string) int { // Only the first call will return non-zero. retcode := 0 loadOnce.Do(func() { - logger.Info(context.Background(), "load global config", jsonStr) if len(jsonStr) >= 2 { // For invalid JSON, use default value and return 0 - if err := json.Unmarshal([]byte(jsonStr), &config.LogtailGlobalConfig); err != nil { - logger.Error(context.Background(), "LOAD_PLUGIN_ALARM", "load global config error", err) + if err := json.Unmarshal([]byte(jsonStr), &config.LoongcollectorGlobalConfig); err != nil { + fmt.Println("load global config error", "GlobalConfig", jsonStr, "err", err) retcode = 1 } - config.UserAgent = fmt.Sprintf("ilogtail/%v (%v) ip/%v", config.BaseVersion, runtime.GOOS, config.LogtailGlobalConfig.HostIP) + logger.InitLogger() + logger.Info(context.Background(), "load global config", jsonStr) + config.UserAgent = fmt.Sprintf("ilogtail/%v (%v) ip/%v", config.BaseVersion, runtime.GOOS, config.LoongcollectorGlobalConfig.HostIP) } }) if retcode == 0 { // Update when both of them are not empty. logger.Debugf(context.Background(), "host IP: %v, hostname: %v", - config.LogtailGlobalConfig.HostIP, config.LogtailGlobalConfig.Hostname) - if len(config.LogtailGlobalConfig.Hostname) > 0 && len(config.LogtailGlobalConfig.HostIP) > 0 { - util.SetNetworkIdentification(config.LogtailGlobalConfig.HostIP, config.LogtailGlobalConfig.Hostname) + config.LoongcollectorGlobalConfig.HostIP, config.LoongcollectorGlobalConfig.Hostname) + if len(config.LoongcollectorGlobalConfig.Hostname) > 0 && len(config.LoongcollectorGlobalConfig.HostIP) > 0 { + util.SetNetworkIdentification(config.LoongcollectorGlobalConfig.HostIP, config.LoongcollectorGlobalConfig.Hostname) } } return retcode } -//export LoadConfig -func LoadConfig(project string, logstore string, configName string, logstoreKey int64, jsonStr string) int { +//export LoadPipeline +func LoadPipeline(project string, logstore string, configName string, logstoreKey int64, jsonStr string) int { logger.Debug(context.Background(), "load config", configName, logstoreKey, "\n"+jsonStr) - if started { - logger.Error(context.Background(), "CONFIG_LOAD_ALARM", "cannot load config before hold on the running configs") - return 1 - } defer func() { if err := recover(); err != nil { trace := make([]byte, 2048) @@ -158,81 +154,77 @@ func LoadConfig(project string, logstore string, configName string, logstoreKey return 0 } -//export UnloadConfig -func UnloadConfig(project string, logstore string, configName string) int { +//export UnloadPipeline +func UnloadPipeline(configName string) int { logger.Debug(context.Background(), "unload config", configName) - return 0 -} - -//export ProcessRawLog -func ProcessRawLog(configName string, rawLog []byte, packID string, topic string) int { - plugin, flag := pluginmanager.LogtailConfig[configName] - if !flag { - return -1 - } - - // rawLog will be copied when it is converted to string, packID and topic - // are unused now, so deep copy is unnecessary. - return plugin.ProcessRawLog(rawLog, util.StringDeepCopy(packID), topic) -} - -//export ProcessRawLogV2 -func ProcessRawLogV2(configName string, rawLog []byte, packID string, topic string, tags []byte) int { - config, exists := pluginmanager.LogtailConfig[configName] - if !exists { - return -1 + err := pluginmanager.UnloadPartiallyLoadedConfig(util.StringDeepCopy(configName)) + if err != nil { + return 1 } - return config.ProcessRawLogV2(rawLog, util.StringDeepCopy(packID), util.StringDeepCopy(topic), tags) + return 0 } //export ProcessLog func ProcessLog(configName string, logBytes []byte, packID string, topic string, tags []byte) int { - config, exists := pluginmanager.LogtailConfig[configName] - if !exists { - logger.Debug(context.Background(), "config not found", configName) + pluginmanager.LogtailConfigLock.RLock() + config, flag := pluginmanager.LogtailConfig[configName] + if !flag { return -1 } + pluginmanager.LogtailConfigLock.RUnlock() return config.ProcessLog(logBytes, util.StringDeepCopy(packID), util.StringDeepCopy(topic), tags) } //export ProcessLogGroup func ProcessLogGroup(configName string, logBytes []byte, packID string) int { - config, exists := pluginmanager.LogtailConfig[configName] - if !exists { - logger.Debug(context.Background(), "config not found", configName) + pluginmanager.LogtailConfigLock.RLock() + config, flag := pluginmanager.LogtailConfig[configName] + pluginmanager.LogtailConfigLock.RUnlock() + if !flag { + logger.Error(context.Background(), "PLUGIN_ALARM", "config not found", configName) return -1 } return config.ProcessLogGroup(logBytes, util.StringDeepCopy(packID)) } -//export HoldOn -func HoldOn(exitFlag int) { - logger.Info(context.Background(), "Hold on", "start", "flag", exitFlag) - if started { - err := pluginmanager.HoldOn(exitFlag != 0) - if err != nil { - logger.Error(context.Background(), "PLUGIN_ALARM", "hold on error", err) - } +//export StopAllPipelines +func StopAllPipelines(withInputFlag int) { + logger.Info(context.Background(), "Stop all", "start", "with input", withInputFlag) + err := pluginmanager.StopAllPipelines(withInputFlag != 0) + if err != nil { + logger.Error(context.Background(), "PLUGIN_ALARM", "stop all error", err) } - started = false - logger.Info(context.Background(), "Hold on", "success") - if exitFlag != 0 { + logger.Info(context.Background(), "Stop all", "success", "with input", withInputFlag) + // Stop with input first, without input last. + if withInputFlag == 0 { logger.Info(context.Background(), "logger", "close and recover") + logger.Flush() logger.Close() } } -//export Resume -func Resume() { - logger.Info(context.Background(), "Resume", "start") - if !started { - err := pluginmanager.Resume() - if err != nil { - logger.Error(context.Background(), "PLUGIN_ALARM", "resume error", err) - } +//export Stop +func Stop(configName string, removedFlag int) { + logger.Info(context.Background(), "Stop", "start", "config", configName, "removed", removedFlag) + err := pluginmanager.Stop(configName, removedFlag != 0) + if err != nil { + logger.Error(context.Background(), "PLUGIN_ALARM", "stop error", err) + } +} + +//export StopBuiltInModules +func StopBuiltInModules() { + pluginmanager.StopBuiltInModulesConfig() +} + +//export Start +func Start(configName string) { + logger.Info(context.Background(), "Start", "start", "config", configName) + err := pluginmanager.Start(configName) + if err != nil { + logger.Error(context.Background(), "PLUGIN_ALARM", "start error", err) } - started = true - logger.Info(context.Background(), "Resume", "success") + logger.Info(context.Background(), "Start", "success", "config", configName) } //export CtlCmd @@ -242,7 +234,7 @@ func CtlCmd(configName string, cmdID int, cmdDetail string) { //export GetContainerMeta func GetContainerMeta(containerID string) *C.struct_containerMeta { - logger.Init() + logger.InitLogger() meta := helper.GetContainerMeta(containerID) if meta == nil { logger.Debug(context.Background(), "get meta", "") @@ -332,11 +324,10 @@ func initPluginBase(cfgStr string) int { // Only the first call will return non-zero. rst := 0 initOnce.Do(func() { - logger.Init() + LoadGlobalConfig(cfgStr) InitHTTPServer() setGCPercentForSlowStart() logger.Info(context.Background(), "init plugin base, version", config.BaseVersion) - LoadGlobalConfig(cfgStr) if *flags.DeployMode == flags.DeploySingleton && *flags.EnableKubernetesMeta { instance := k8smeta.GetMetaManagerInstance() err := instance.Init("") @@ -351,6 +342,20 @@ func initPluginBase(cfgStr string) int { logger.Error(context.Background(), "PLUGIN_ALARM", "init plugin error", err) rst = 1 } + if pluginmanager.StatisticsConfig != nil { + pluginmanager.StatisticsConfig.Start() + } + if pluginmanager.AlarmConfig != nil { + pluginmanager.AlarmConfig.Start() + } + if pluginmanager.ContainerConfig != nil { + pluginmanager.ContainerConfig.Start() + } + err := pluginmanager.CheckPointManager.Init() + if err != nil { + logger.Error(context.Background(), "CHECKPOINT_INIT_ALARM", "init checkpoint manager error", err) + } + pluginmanager.CheckPointManager.Start() }) return rst } diff --git a/plugin_main/plugin_http.go b/plugin_main/plugin_http.go index 88c1b4fa04..3f34312881 100644 --- a/plugin_main/plugin_http.go +++ b/plugin_main/plugin_http.go @@ -112,20 +112,19 @@ func HandleLoadConfig(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte("parse body error")) return } - HoldOn(0) for _, cfg := range loadConfigs { - LoadConfig(cfg.Project, cfg.Logstore, cfg.ConfigName, cfg.LogstoreKey, cfg.JSONStr) + Stop(cfg.ConfigName, 0) + LoadPipeline(cfg.Project, cfg.Logstore, cfg.ConfigName, cfg.LogstoreKey, cfg.JSONStr) + Start(cfg.ConfigName) } - Resume() } // HandleHoldOn hold on the ilogtail process. func HandleHoldOn(w http.ResponseWriter, r *http.Request) { controlLock.Lock() defer controlLock.Unlock() - HoldOn(1) - // flush async logs when hold on with exit flag. - logger.Flush() + StopAllPipelines(1) + StopAllPipelines(0) w.WriteHeader(http.StatusOK) } diff --git a/plugin_main/plugin_main.go b/plugin_main/plugin_main.go index cb1e7e83e7..578d12c04e 100644 --- a/plugin_main/plugin_main.go +++ b/plugin_main/plugin_main.go @@ -166,21 +166,19 @@ func main() { p := fmt.Sprintf("PluginProject_%d", i) l := fmt.Sprintf("PluginLogstore_%d", i) c := fmt.Sprintf("1.0#PluginProject_%d##Config%d", i, i) - if LoadConfig(p, l, c, 123, cfg) != 0 { + if LoadPipeline(p, l, c, 123, cfg) != 0 { logger.Warningf(context.Background(), "START_PLUGIN_ALARM", "%s_%s_%s start fail, config is %s", p, l, c, cfg) return } + Start(c) } - Resume() - // handle the first shutdown signal gracefully, and exit directly if FileIOFlag is true if !*flags.FileIOFlag { <-signals.SetupSignalHandler() } - logger.Info(context.Background(), "########################## exit process begin ##########################") - HoldOn(1) - logger.Info(context.Background(), "########################## exit process done ##########################") + StopAllPipelines(1) + StopAllPipelines(0) } func generatePluginDoc() { diff --git a/plugin_main/plugin_main_test.go b/plugin_main/plugin_main_test.go index 120c977cbc..31d95adc9d 100644 --- a/plugin_main/plugin_main_test.go +++ b/plugin_main/plugin_main_test.go @@ -98,24 +98,29 @@ func TestHangConfigWhenStop(t *testing.T) { // Initialize plugin and run config. require.Equal(t, 0, InitPluginBase()) - require.Equal(t, 0, LoadConfig("project", "logstore", configName, 0, badConfigStr)) - config, exists := pluginmanager.LogtailConfig[configName] - require.True(t, exists) + require.Equal(t, 0, LoadPipeline("project", "logstore", configName, 0, badConfigStr)) + config := pluginmanager.ToStartPipelineConfigWithoutInput + require.NotNil(t, config) require.Equal(t, configName, config.ConfigName) flusher, _ := pluginmanager.GetConfigFlushers(config.PluginRunner)[0].(*BadFlusher) flusher.Shutdown = shutdown - Resume() + Start(configName) time.Sleep(time.Second * 2) + require.Nil(t, pluginmanager.ToStartPipelineConfigWithoutInput) + pluginmanager.LogtailConfigLock.RLock() + _, exists := pluginmanager.LogtailConfig[configName] + require.True(t, exists) + pluginmanager.LogtailConfigLock.RUnlock() // Stop config, it will hang. - HoldOn(0) + Stop(configName, 0) time.Sleep(time.Second * 2) config, exists = pluginmanager.DisabledLogtailConfig[configName] require.Equal(t, configName, config.ConfigName) require.True(t, exists) - // Load again, fail. + // Load again, succeed. Changed since independently reload time.Sleep(time.Second) - require.Equal(t, 1, LoadConfig("project", "logstore", configName, 0, badConfigStr)) + require.Equal(t, 0, LoadPipeline("project", "logstore", configName, 0, badConfigStr)) // Notify the config to quit so that it can be enabled again. close(shutdown) @@ -123,40 +128,45 @@ func TestHangConfigWhenStop(t *testing.T) { require.Empty(t, pluginmanager.DisabledLogtailConfig) // Load again, succeed. - require.Equal(t, 0, LoadConfig("project", "logstore", configName, 0, badConfigStr)) - config, exists = pluginmanager.LogtailConfig[configName] - require.True(t, exists) + require.Equal(t, 0, LoadPipeline("project", "logstore", configName, 0, badConfigStr)) + config = pluginmanager.ToStartPipelineConfigWithoutInput + require.NotNil(t, config) require.Equal(t, configName, config.ConfigName) + Start(configName) + time.Sleep(time.Second) + pluginmanager.LogtailConfigLock.RLock() + _, exists = pluginmanager.LogtailConfig[configName] + pluginmanager.LogtailConfigLock.RUnlock() + require.True(t, exists) flusher, _ = pluginmanager.GetConfigFlushers(config.PluginRunner)[0].(*BadFlusher) shutdown = make(chan int) flusher.Shutdown = shutdown - Resume() - time.Sleep(time.Second) // Stop config, hang again. - HoldOn(0) + Stop(config.ConfigNameWithSuffix, 0) time.Sleep(time.Second * 2) - config, exists = pluginmanager.DisabledLogtailConfig[configName] + config, exists = pluginmanager.DisabledLogtailConfig[config.ConfigNameWithSuffix] require.True(t, exists) require.Equal(t, configName, config.ConfigName) - // Load again, fail. - time.Sleep(time.Second) - require.Equal(t, 1, LoadConfig("project", "logstore", configName, 0, badConfigStr)) - // Change config detail so that it can be loaded again. + // Change config detail, load a new pipeline. validConfigStr := fmt.Sprintf(configTemplateJSONStr, 4) - require.Equal(t, 0, LoadConfig("project", "logstore", configName, 0, validConfigStr)) - Resume() + require.Equal(t, 0, LoadPipeline("project", "logstore", configName, 0, validConfigStr)) + Start(configName) time.Sleep(time.Second * 2) + pluginmanager.LogtailConfigLock.RLock() config, exists = pluginmanager.LogtailConfig[configName] + pluginmanager.LogtailConfigLock.RUnlock() require.True(t, exists) require.Equal(t, configName, config.ConfigName) - require.Empty(t, pluginmanager.DisabledLogtailConfig) // Quit. time.Sleep(time.Second) - HoldOn(1) - require.Empty(t, pluginmanager.DisabledLogtailConfig) + StopAllPipelines(1) + StopAllPipelines(0) + pluginmanager.LogtailConfigLock.RLock() + require.Empty(t, pluginmanager.LogtailConfig) + pluginmanager.LogtailConfigLock.RUnlock() // Close hanged goroutine. close(shutdown) @@ -169,36 +179,54 @@ func TestSlowConfigWhenStop(t *testing.T) { // Initialize plugin and run config. require.Equal(t, 0, InitPluginBase()) - require.Equal(t, 0, LoadConfig("project", "logstore", configName, 0, badConfigStr)) + require.Equal(t, 0, LoadPipeline("project", "logstore", configName, 0, badConfigStr)) + config := pluginmanager.ToStartPipelineConfigWithoutInput + require.NotNil(t, config) + require.Equal(t, configName, config.ConfigName) + Start(configName) config, exists := pluginmanager.LogtailConfig[configName] require.True(t, exists) require.Equal(t, configName, config.ConfigName) - Resume() time.Sleep(time.Second * 2) + pluginmanager.LogtailConfigLock.RLock() + config, ok := pluginmanager.LogtailConfig[configName] + pluginmanager.LogtailConfigLock.RUnlock() + require.True(t, ok) + require.Equal(t, configName, config.ConfigName) // Stop config, it will hang. - HoldOn(0) + Stop(configName, 0) config, exists = pluginmanager.DisabledLogtailConfig[configName] require.True(t, exists) require.Equal(t, configName, config.ConfigName) - // Load again, fail. + // Load again, success. Changed since independently reload time.Sleep(time.Second) - require.Equal(t, 1, LoadConfig("project", "logstore", configName, 0, badConfigStr)) + require.Equal(t, 0, LoadPipeline("project", "logstore", configName, 0, badConfigStr)) require.Empty(t, pluginmanager.LogtailConfig) + require.Nil(t, pluginmanager.ToStartPipelineConfigWithInput) // Wait more time, so that the config can finish stopping. time.Sleep(time.Second * 5) // Load again, succeed. - require.Equal(t, 0, LoadConfig("project", "logstore", configName, 0, badConfigStr)) + require.Equal(t, 0, LoadPipeline("project", "logstore", configName, 0, badConfigStr)) + config = pluginmanager.ToStartPipelineConfigWithoutInput + require.NotNil(t, config) + require.Equal(t, configName, config.ConfigName) + Start(configName) config, exists = pluginmanager.LogtailConfig[configName] require.True(t, exists) require.Equal(t, configName, config.ConfigName) - Resume() time.Sleep(time.Second) + pluginmanager.LogtailConfigLock.RLock() + config, ok = pluginmanager.LogtailConfig[configName] + pluginmanager.LogtailConfigLock.RUnlock() + require.True(t, ok) + require.Equal(t, configName, config.ConfigName) // Quit. time.Sleep(time.Second) - HoldOn(1) + StopAllPipelines(1) + StopAllPipelines(0) time.Sleep(time.Second * 6) require.Empty(t, pluginmanager.DisabledLogtailConfig) } diff --git a/pluginmanager/always_online_manager.go b/pluginmanager/always_online_manager.go deleted file mode 100644 index 37af0c5620..0000000000 --- a/pluginmanager/always_online_manager.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2021 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pluginmanager - -import ( - "sync" - "time" - - "github.com/alibaba/ilogtail/pkg/logger" -) - -const alwaysOnlineTimeoutCheckInterval = time.Second * time.Duration(3) - -var instanceAlwaysOnlineManager *AlwaysOnlineManager -var onceAlwaysOnlineManager sync.Once - -type alwaysOnlineItem struct { - config *LogstoreConfig - addedTime time.Time - timeout time.Duration -} - -// AlwaysOnlineManager is used to manage the plugins that do not want to stop when config reloading -type AlwaysOnlineManager struct { - configMap map[string]*alwaysOnlineItem - lock sync.Mutex -} - -// GetAlwaysOnlineManager get a AlwaysOnlineManager instance -func GetAlwaysOnlineManager() *AlwaysOnlineManager { - onceAlwaysOnlineManager.Do( - func() { - instanceAlwaysOnlineManager = &AlwaysOnlineManager{ - configMap: make(map[string]*alwaysOnlineItem), - } - go instanceAlwaysOnlineManager.run() - }, - ) - return instanceAlwaysOnlineManager -} - -// AddCachedConfig add cached config into manager, manager will stop and delete this config when timeout -func (aom *AlwaysOnlineManager) AddCachedConfig(config *LogstoreConfig, timeout time.Duration) { - aom.lock.Lock() - defer aom.lock.Unlock() - alwaysOnlineItem := &alwaysOnlineItem{ - config: config, - addedTime: time.Now(), - timeout: timeout, - } - aom.configMap[config.ConfigNameWithSuffix] = alwaysOnlineItem -} - -// GetCachedConfig get cached config from manager and delete this item, so manager will not close this config -func (aom *AlwaysOnlineManager) GetCachedConfig(configName string) (config *LogstoreConfig, ok bool) { - aom.lock.Lock() - defer aom.lock.Unlock() - if item, ok := aom.configMap[configName]; ok { - delete(aom.configMap, configName) - return item.config, true - } - return nil, false -} - -// GetDeletedConfigs returns cached configs not in @existConfigs. -func (aom *AlwaysOnlineManager) GetDeletedConfigs( - existConfigs map[string]*LogstoreConfig) map[string]*LogstoreConfig { - aom.lock.Lock() - defer aom.lock.Unlock() - ret := make(map[string]*LogstoreConfig) - for name, cfg := range aom.configMap { - if _, exists := existConfigs[name]; !exists { - ret[name] = cfg.config - delete(aom.configMap, name) - } - } - return ret -} - -func (aom *AlwaysOnlineManager) run() { - for { - time.Sleep(alwaysOnlineTimeoutCheckInterval) - - var toDeleteItems []*alwaysOnlineItem - nowTime := time.Now() - - aom.lock.Lock() - for key, item := range aom.configMap { - if nowTime.After(item.addedTime.Add(item.timeout)) { - toDeleteItems = append(toDeleteItems, item) - delete(aom.configMap, key) - } - } - aom.lock.Unlock() - - if len(toDeleteItems) == 0 { - continue - } - for _, item := range toDeleteItems { - go func(config *LogstoreConfig, addTime time.Time) { - defer panicRecover(config.ConfigName) - logger.Info(config.Context.GetRuntimeContext(), "delete timeout config, add time", addTime, "config", config.ConfigName) - err := config.Stop(true) - logger.Info(config.Context.GetRuntimeContext(), "delete timeout config done", config.ConfigName, "error", err) - }(item.config, item.addedTime) - } - } -} diff --git a/pluginmanager/always_online_manager_test.go b/pluginmanager/always_online_manager_test.go deleted file mode 100644 index e14629fce4..0000000000 --- a/pluginmanager/always_online_manager_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2021 iLogtail Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pluginmanager - -import ( - "strconv" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/alibaba/ilogtail/pkg/config" - _ "github.com/alibaba/ilogtail/pkg/logger/test" - "github.com/alibaba/ilogtail/pkg/protocol" - "github.com/alibaba/ilogtail/plugins/test/mock" -) - -// init change the logtail config dir to avoid change the config on the production when testing. -func init() { - config.LogtailGlobalConfig.LogtailSysConfDir = "." -} - -func TestAlwaysOnlineManager(t *testing.T) { - aom := GetAlwaysOnlineManager() - newLogstoreConfig := func(name string, hash string) *LogstoreConfig { - conf := &LogstoreConfig{} - conf.ConfigName = config.GetRealConfigName(name) - conf.ConfigNameWithSuffix = name - conf.configDetailHash = hash - conf.Context = mock.NewEmptyContext("p", "l", "c") - conf.PluginRunner = &pluginv1Runner{LogstoreConfig: conf, FlushOutStore: NewFlushOutStore[protocol.LogGroup]()} - conf.pauseChan = make(chan struct{}) - conf.resumeChan = make(chan struct{}) - conf.PluginRunner.Init(1, 1) - return conf - } - for i := 0; i < 1000; i++ { - aom.AddCachedConfig(newLogstoreConfig(strconv.Itoa(i)+"/1", strconv.Itoa(i)), time.Minute) - } - - config := newLogstoreConfig("x/1", "x") - aom.AddCachedConfig(config, time.Second*time.Duration(5)) - require.Equal(t, len(aom.configMap), 1001) - - time.Sleep(time.Second) - for i := 0; i < 500; i++ { - var ok bool - config, ok = aom.GetCachedConfig(strconv.Itoa(i) + "/1") - require.Equal(t, ok, true) - require.Equal(t, config.ConfigName, config.configDetailHash) - require.Equal(t, config.ConfigName, strconv.Itoa(i)) - } - require.Equal(t, len(aom.configMap), 501) - - time.Sleep(time.Second * time.Duration(8)) - config, ok := aom.GetCachedConfig("x/1") - require.True(t, config == nil) - require.Equal(t, ok, false) -} diff --git a/pluginmanager/checkpoint_manager.go b/pluginmanager/checkpoint_manager.go index 456883da23..b2d1019e49 100644 --- a/pluginmanager/checkpoint_manager.go +++ b/pluginmanager/checkpoint_manager.go @@ -23,22 +23,26 @@ import ( "sync" "time" + "github.com/syndtr/goleveldb/leveldb" + "github.com/alibaba/ilogtail/pkg/config" "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/util" - - "github.com/syndtr/goleveldb/leveldb" ) -var CheckPointFile = flag.String("CheckPointFile", "checkpoint", "checkpoint file name, base dir(binary dir)") +var CheckPointFile = flag.String("CheckPointFile", "go_plugin_checkpoint", "checkpoint file name, base dir(binary dir)") var CheckPointCleanInterval = flag.Int("CheckPointCleanInterval", 600, "checkpoint clean interval, second") var MaxCleanItemPerInterval = flag.Int("MaxCleanItemPerInterval", 1000, "max clean items per interval") +const DefaultCleanThreshold = 6 // one hour + type checkPointManager struct { - db *leveldb.DB - shutdown chan struct{} - waitgroup sync.WaitGroup - initFlag bool + db *leveldb.DB + shutdown chan struct{} + waitgroup sync.WaitGroup + initFlag bool + configCounter map[string]int + cleanThreshold int } var CheckPointManager checkPointManager @@ -79,13 +83,17 @@ func (p *checkPointManager) Init() error { return nil } p.shutdown = make(chan struct{}, 1) - logtailConfigDir := config.LogtailGlobalConfig.LogtailSysConfDir - pathExist, err := util.PathExists(logtailConfigDir) + p.configCounter = make(map[string]int) + p.cleanThreshold = DefaultCleanThreshold + logtailDataDir := config.LoongcollectorGlobalConfig.LoongcollectorDataDir + pathExist, err := util.PathExists(logtailDataDir) var dbPath string if err == nil && pathExist { - dbPath = filepath.Join(logtailConfigDir, *CheckPointFile) + dbPath = filepath.Join(logtailDataDir, *CheckPointFile) } else { - dbPath = util.GetCurrentBinaryPath() + *CheckPointFile + // c++程序如果这个目录创建失败会直接exit,所以这里一般应该不会走进来 + logger.Error(context.Background(), "CHECKPOINT_ALARM", "logtailDataDir not exist", logtailDataDir, "err", err) + return err } p.db, err = leveldb.OpenFile(dbPath, nil) @@ -103,8 +111,8 @@ func (p *checkPointManager) Init() error { return nil } -func (p *checkPointManager) HoldOn() { - logger.Info(context.Background(), "checkpoint", "HoldOn") +func (p *checkPointManager) Stop() { + logger.Info(context.Background(), "checkpoint", "Stop") if p.db == nil { return } @@ -112,8 +120,8 @@ func (p *checkPointManager) HoldOn() { p.waitgroup.Wait() } -func (p *checkPointManager) Resume() { - logger.Info(context.Background(), "checkpoint", "Resume") +func (p *checkPointManager) Start() { + logger.Info(context.Background(), "checkpoint", "Start") if p.db == nil { return } @@ -124,7 +132,7 @@ func (p *checkPointManager) Resume() { func (p *checkPointManager) run() { for { if util.RandomSleep(time.Second*time.Duration(*CheckPointCleanInterval), 0.1, p.shutdown) { - logger.Info(context.Background(), "checkpoint", "HoldOn success") + logger.Info(context.Background(), "checkpoint", "Stop success") p.waitgroup.Done() return } @@ -143,13 +151,9 @@ func (p *checkPointManager) keyMatch(key []byte) bool { // configName in checkpoint is real config Name, while configName in LogtailConfig has suffix '/1' or '/2' // since checkpoint is only used in input, so configName can only be 'realConfigName/1', meaning go pipeline with input configName += "/1" + LogtailConfigLock.RLock() _, existFlag := LogtailConfig[configName] - if existFlag { - return true - } - DisabledLogtailConfigLock.Lock() - defer DisabledLogtailConfigLock.Unlock() - _, existFlag = DisabledLogtailConfig[configName] + LogtailConfigLock.RUnlock() return existFlag } @@ -167,6 +171,8 @@ func (p *checkPointManager) check() { if len(cleanItems) >= *MaxCleanItemPerInterval { break } + } else { + delete(p.configCounter, string(iter.Key())) } } iter.Release() @@ -175,7 +181,11 @@ func (p *checkPointManager) check() { logger.Warning(context.Background(), "CHECKPOINT_ALARM", "iterate checkpoint error", err) } for _, key := range cleanItems { - _ = p.db.Delete([]byte(key), nil) - logger.Info(context.Background(), "no config, delete checkpoint", key) + p.configCounter[key]++ + if p.configCounter[key] > p.cleanThreshold { + _ = p.db.Delete([]byte(key), nil) + logger.Info(context.Background(), "no config, delete checkpoint", key) + delete(p.configCounter, key) + } } } diff --git a/pluginmanager/checkpoint_manager_test.go b/pluginmanager/checkpoint_manager_test.go index f1d0504eb5..ac7cbbf296 100644 --- a/pluginmanager/checkpoint_manager_test.go +++ b/pluginmanager/checkpoint_manager_test.go @@ -16,11 +16,19 @@ package pluginmanager import ( "context" + "os" "testing" "time" + + "github.com/alibaba/ilogtail/pkg/config" ) +func MkdirDataDir() { + os.MkdirAll(config.LoongcollectorGlobalConfig.LoongcollectorDataDir, 0750) +} + func Test_checkPointManager_SaveGetCheckpoint(t *testing.T) { + MkdirDataDir() CheckPointManager.Init() tests := []string{"xx", "xx", "213##13143", "~/.."} for _, tt := range tests { @@ -39,6 +47,7 @@ func Test_checkPointManager_SaveGetCheckpoint(t *testing.T) { } func Test_checkPointManager_HoldOn(t *testing.T) { + MkdirDataDir() CheckPointManager.Init() t.Run("hold on resume", func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(10)) @@ -46,8 +55,8 @@ func Test_checkPointManager_HoldOn(t *testing.T) { shutdown := make(chan struct{}, 1) go func() { for i := 0; i < 100; i++ { - CheckPointManager.Resume() - CheckPointManager.HoldOn() + CheckPointManager.Start() + CheckPointManager.Stop() } shutdown <- struct{}{} }() @@ -61,11 +70,22 @@ func Test_checkPointManager_HoldOn(t *testing.T) { } func Test_checkPointManager_run(t *testing.T) { + MkdirDataDir() CheckPointManager.Init() t.Run("hold on resume", func(t *testing.T) { CheckPointManager.SaveCheckpoint("1", "xx", []byte("xxxxx")) CheckPointManager.SaveCheckpoint("2", "yy", []byte("yyyyyy")) *CheckPointCleanInterval = 1 + CheckPointManager.cleanThreshold = 3 + if data, err := CheckPointManager.GetCheckpoint("1", "xx"); err != nil || string(data) != "xxxxx" { + t.Errorf("checkPointManager.GetCheckpoint() error, %v %v", err, string(data)) + } + + if data, err := CheckPointManager.GetCheckpoint("2", "yy"); err != nil || string(data) != "yyyyyy" { + t.Errorf("checkPointManager.GetCheckpoint() error, %v %v", err, string(data)) + } + CheckPointManager.Start() + time.Sleep(time.Second * time.Duration(1)) if data, err := CheckPointManager.GetCheckpoint("1", "xx"); err != nil || string(data) != "xxxxx" { t.Errorf("checkPointManager.GetCheckpoint() error, %v %v", err, string(data)) } @@ -73,7 +93,6 @@ func Test_checkPointManager_run(t *testing.T) { if data, err := CheckPointManager.GetCheckpoint("2", "yy"); err != nil || string(data) != "yyyyyy" { t.Errorf("checkPointManager.GetCheckpoint() error, %v %v", err, string(data)) } - CheckPointManager.Resume() time.Sleep(time.Second * time.Duration(5)) if data, err := CheckPointManager.GetCheckpoint("1", "xx"); err == nil { t.Errorf("checkPointManager.GetCheckpoint() error, %v %v", err, string(data)) @@ -85,14 +104,17 @@ func Test_checkPointManager_run(t *testing.T) { }) *CheckPointCleanInterval = 3600 - CheckPointManager.HoldOn() + CheckPointManager.Stop() } func Test_checkPointManager_keyMatch(t *testing.T) { + MkdirDataDir() CheckPointManager.Init() t.Run("key match", func(t *testing.T) { + LogtailConfigLock.Lock() LogtailConfig["test_1/1"] = nil LogtailConfig["test_2/1"] = nil + LogtailConfigLock.Unlock() if got := CheckPointManager.keyMatch([]byte("test_1")); got { t.Errorf("checkPointManager.Test_checkPointManager_keyMatch()") } @@ -112,7 +134,9 @@ func Test_checkPointManager_keyMatch(t *testing.T) { if got := CheckPointManager.keyMatch([]byte("texst_1^xxx")); got { t.Errorf("checkPointManager.Test_checkPointManager_keyMatch()") } + LogtailConfigLock.Lock() delete(LogtailConfig, "test_1/1") delete(LogtailConfig, "test_2/1") + LogtailConfigLock.Unlock() }) } diff --git a/pluginmanager/config_update_test.go b/pluginmanager/config_update_test.go index a9f19b2b55..e046164a6d 100644 --- a/pluginmanager/config_update_test.go +++ b/pluginmanager/config_update_test.go @@ -19,11 +19,11 @@ package pluginmanager import ( "context" - "strings" "testing" "time" "github.com/alibaba/ilogtail/pkg/logger" + _ "github.com/alibaba/ilogtail/plugins/aggregator/baseagg" "github.com/alibaba/ilogtail/plugins/flusher/checker" "github.com/stretchr/testify/suite" @@ -43,73 +43,82 @@ type configUpdateTestSuite struct { func (s *configUpdateTestSuite) BeforeTest(suiteName, testName string) { logger.Infof(context.Background(), "========== %s %s test start ========================", suiteName, testName) logger.Info(context.Background(), "load logstore config", updateConfigName) - s.NoError(LoadMockConfig(updateConfigName, updateConfigName, updateConfigName, GetTestConfig(updateConfigName))) - s.NoError(Resume()) + s.NoError(LoadAndStartMockConfig(updateConfigName, updateConfigName, updateConfigName, GetTestConfig(updateConfigName))) time.Sleep(time.Second * time.Duration(1)) } func (s *configUpdateTestSuite) AfterTest(suiteName, testName string) { logger.Infof(context.Background(), "========== %s %s test end ========================", suiteName, testName) - s.NoError(HoldOn(false)) + s.NoError(StopAllPipelines(false)) + s.NoError(StopAllPipelines(true)) + LogtailConfigLock.Lock() LogtailConfig = make(map[string]*LogstoreConfig) - DisabledLogtailConfig = make(map[string]*LogstoreConfig) + LogtailConfigLock.Unlock() } func (s *configUpdateTestSuite) TestConfigUpdate() { // block config + LogtailConfigLock.RLock() config := LogtailConfig[updateConfigName] + LogtailConfigLock.RUnlock() s.NotNil(config, "%s logstrore config should exist", updateConfigName) checkFlusher, ok := GetConfigFlushers(config.PluginRunner)[0].(*checker.FlusherChecker) s.True(ok) s.Equal(0, checkFlusher.GetLogCount(), "the block flusher checker doesn't have any logs") // update same hang config - s.NoError(HoldOn(false)) + s.NoError(Stop(updateConfigName, false)) s.Equal(0, checkFlusher.GetLogCount(), "the hold on block flusher checker doesn't have any logs") - err := LoadMockConfig(updateConfigName, updateConfigName, updateConfigName, GetTestConfig(updateConfigName)) - s.True(strings.Contains(err.Error(), "failed to create config because timeout stop has happened on it")) - s.NoError(LoadMockConfig(noblockUpdateConfigName, noblockUpdateConfigName, noblockUpdateConfigName, GetTestConfig(noblockUpdateConfigName))) - s.NoError(Resume()) - s.Nil(LogtailConfig[updateConfigName], "the stopping config only allow to load same config when stopped") + _ = LoadAndStartMockConfig(updateConfigName, updateConfigName, updateConfigName, GetTestConfig(updateConfigName)) + // Since independently load config, reload block config will be allowed + s.NoError(LoadAndStartMockConfig(noblockUpdateConfigName, noblockUpdateConfigName, noblockUpdateConfigName, GetTestConfig(noblockUpdateConfigName))) + LogtailConfigLock.RLock() + s.NotNil(LogtailConfig[updateConfigName]) s.NotNil(LogtailConfig[noblockUpdateConfigName]) + LogtailConfigLock.RUnlock() // unblock old config checkFlusher.Block = false time.Sleep(time.Second * time.Duration(5)) s.Equal(10000, checkFlusher.GetLogCount()) // this magic number(10000) must exceed number of logs can be hold in processor channel(LogsChan) + aggregator buffer(defaultLogGroup) + flusher channel(LogGroupsChan) + LogtailConfigLock.RLock() s.Equal(20000, GetConfigFlushers(LogtailConfig[noblockUpdateConfigName].PluginRunner)[0].(*checker.FlusherChecker).GetLogCount()) + LogtailConfigLock.RUnlock() } func (s *configUpdateTestSuite) TestConfigUpdateMany() { + LogtailConfigLock.RLock() config := LogtailConfig[updateConfigName] + LogtailConfigLock.RUnlock() s.NotNil(config, "%s logstrore config should exist", updateConfigName) checkFlusher, ok := GetConfigFlushers(config.PluginRunner)[0].(*checker.FlusherChecker) s.True(ok) s.Equal(0, checkFlusher.GetLogCount(), "the hold on block flusher checker doesn't have any logs") // load block config - for i := 0; i < 5; i++ { - s.NoError(HoldOn(false)) - err := LoadMockConfig(updateConfigName, updateConfigName, updateConfigName, GetTestConfig(updateConfigName)) - s.True(strings.Contains(err.Error(), "failed to create config because timeout stop has happened on it")) - s.NoError(Resume()) - s.Nil(LogtailConfig[updateConfigName], "the stopping config only allow to load same config when stopped") - } + Stop(updateConfigName, false) + err := LoadAndStartMockConfig(updateConfigName, updateConfigName, updateConfigName, GetTestConfig(updateConfigName)) + s.Nil(err) + s.NotNil(LogtailConfig[updateConfigName]) s.Equal(0, checkFlusher.GetLogCount(), "the hold on block flusher checker doesn't have any logs") checkFlusher.Block = false time.Sleep(time.Second * time.Duration(5)) s.Equal(checkFlusher.GetLogCount(), 10000) // load normal config - for i := 0; i < 5; i++ { - s.NoError(HoldOn(false)) - s.NoError(LoadMockConfig(noblockUpdateConfigName, noblockUpdateConfigName, noblockUpdateConfigName, GetTestConfig(noblockUpdateConfigName))) - s.NoError(Resume()) + for i := 0; i < 3; i++ { + s.NoError(StopAllPipelines(true)) + s.NoError(StopAllPipelines(false)) + s.NoError(LoadAndStartMockConfig(noblockUpdateConfigName, noblockUpdateConfigName, noblockUpdateConfigName, GetTestConfig(noblockUpdateConfigName))) + LogtailConfigLock.RLock() s.NotNil(LogtailConfig[noblockUpdateConfigName]) + LogtailConfigLock.RUnlock() time.Sleep(time.Millisecond) } + LogtailConfigLock.RLock() checkFlusher, ok = GetConfigFlushers(LogtailConfig[noblockUpdateConfigName].PluginRunner)[0].(*checker.FlusherChecker) + LogtailConfigLock.RUnlock() s.True(ok) time.Sleep(time.Second * time.Duration(5)) s.Equal(checkFlusher.GetLogCount(), 20000) @@ -117,25 +126,26 @@ func (s *configUpdateTestSuite) TestConfigUpdateMany() { func (s *configUpdateTestSuite) TestConfigUpdateName() { time.Sleep(time.Second * time.Duration(1)) + LogtailConfigLock.RLock() config := LogtailConfig[updateConfigName] + LogtailConfigLock.RUnlock() s.NotNil(config) checkFlusher, ok := GetConfigFlushers(config.PluginRunner)[0].(*checker.FlusherChecker) defer func() { checkFlusher.Block = false time.Sleep(time.Second * 5) - s.Equal(checkFlusher.GetLogCount(), 10000) + s.Equal(checkFlusher.GetLogCount(), 20000) }() s.True(ok) - s.NoError(HoldOn(false)) s.Equal(0, checkFlusher.GetLogCount(), "the hold on blocking flusher checker doesn't have any logs") - s.NoError(LoadMockConfig(updateConfigName+"_", updateConfigName+"_", updateConfigName+"_", GetTestConfig(updateConfigName))) - s.NoError(Resume()) - + s.NoError(LoadAndStartMockConfig(updateConfigName+"_", updateConfigName+"_", updateConfigName+"_", GetTestConfig(updateConfigName))) { - s.Nil(LogtailConfig[updateConfigName]) + LogtailConfigLock.RLock() + s.NotNil(LogtailConfig[updateConfigName]) s.NotNil(LogtailConfig[updateConfigName+"_"]) checkFlusher, ok := GetConfigFlushers(LogtailConfig[updateConfigName+"_"].PluginRunner)[0].(*checker.FlusherChecker) + LogtailConfigLock.RUnlock() s.True(ok) s.Equal(checkFlusher.GetLogCount(), 0) checkFlusher.Block = false @@ -144,31 +154,34 @@ func (s *configUpdateTestSuite) TestConfigUpdateName() { } } -func (s *configUpdateTestSuite) TestHoldOnExit() { +func (s *configUpdateTestSuite) TestStopAllExit() { + LogtailConfigLock.RLock() config := LogtailConfig[updateConfigName] + LogtailConfigLock.RUnlock() s.NotNil(config) checkFlusher, ok := GetConfigFlushers(config.PluginRunner)[0].(*checker.FlusherChecker) s.True(ok) checkFlusher.Block = false time.Sleep(time.Second * time.Duration(5)) - s.NoError(HoldOn(true)) + s.NoError(StopAllPipelines(true)) + s.NoError(StopAllPipelines(false)) s.Equal(20000, checkFlusher.GetLogCount()) - s.NoError(Resume()) } -func (s *configUpdateTestSuite) TestHoldOnExitTimeout() { +func (s *configUpdateTestSuite) TestStopAllExitTimeout() { time.Sleep(time.Second * time.Duration(1)) + LogtailConfigLock.RLock() config := LogtailConfig[updateConfigName] + LogtailConfigLock.RUnlock() s.NotNil(config) checkFlusher, ok := GetConfigFlushers(config.PluginRunner)[0].(*checker.FlusherChecker) s.True(ok) s.Equal(0, checkFlusher.GetLogCount()) - s.NoError(HoldOn(true)) + s.NoError(StopAllPipelines(true)) + s.NoError(StopAllPipelines(false)) time.Sleep(time.Second) s.Equal(0, checkFlusher.GetLogCount()) checkFlusher.Block = false time.Sleep(time.Second * time.Duration(5)) s.Equal(10000, checkFlusher.GetLogCount()) - time.Sleep(time.Second * 10) - s.NoError(Resume()) } diff --git a/pluginmanager/container_config_manager.go b/pluginmanager/container_config_manager.go index 2b71dc462e..5ec7aaf047 100644 --- a/pluginmanager/container_config_manager.go +++ b/pluginmanager/container_config_manager.go @@ -103,9 +103,11 @@ func recordDeleteContainers(logGroup *protocol.LogGroup, containerIDs map[string projectSet := make(map[string]struct{}) // get project list + LogtailConfigLock.RLock() for _, logstoreConfig := range LogtailConfig { projectSet[logstoreConfig.ProjectName] = struct{}{} } + LogtailConfigLock.RUnlock() keys := make([]string, 0, len(projectSet)) for k := range projectSet { if len(k) > 0 { @@ -128,6 +130,8 @@ func refreshEnvAndLabel() { envSet = make(map[string]struct{}) containerLabelSet = make(map[string]struct{}) k8sLabelSet = make(map[string]struct{}) + + LogtailConfigLock.RLock() for _, logstoreConfig := range LogtailConfig { if logstoreConfig.CollectingContainersMeta { for key := range logstoreConfig.EnvSet { @@ -141,6 +145,7 @@ func refreshEnvAndLabel() { } } } + LogtailConfigLock.RUnlock() logger.Info(context.Background(), "envSet", envSet, "containerLabelSet", containerLabelSet, "k8sLabelSet", k8sLabelSet) } @@ -149,6 +154,7 @@ func compareEnvAndLabel() (diffEnvSet, diffContainerLabelSet, diffK8sLabelSet ma diffEnvSet = make(map[string]struct{}) diffContainerLabelSet = make(map[string]struct{}) diffK8sLabelSet = make(map[string]struct{}) + LogtailConfigLock.RLock() for _, logstoreConfig := range LogtailConfig { if logstoreConfig.CollectingContainersMeta { for key := range logstoreConfig.EnvSet { @@ -171,6 +177,7 @@ func compareEnvAndLabel() (diffEnvSet, diffContainerLabelSet, diffK8sLabelSet ma } } } + LogtailConfigLock.RUnlock() return diffEnvSet, diffContainerLabelSet, diffK8sLabelSet } @@ -178,9 +185,11 @@ func getContainersToRecord(containerIDs map[string]struct{}) (map[string]struct{ projectSet := make(map[string]struct{}) recordedContainerIds := make(map[string]struct{}) + LogtailConfigLock.RLock() for _, logstoreConfig := range LogtailConfig { projectSet[logstoreConfig.ProjectName] = struct{}{} } + LogtailConfigLock.RUnlock() keys := make([]string, 0, len(projectSet)) for k := range projectSet { if len(k) > 0 { @@ -227,9 +236,11 @@ func compareEnvAndLabelAndRecordContainer() []*helper.ContainerDetail { if len(diffEnvSet) != 0 || len(diffContainerLabelSet) != 0 || len(diffK8sLabelSet) != 0 { projectSet := make(map[string]struct{}) + LogtailConfigLock.RLock() for _, logstoreConfig := range LogtailConfig { projectSet[logstoreConfig.ProjectName] = struct{}{} } + LogtailConfigLock.RUnlock() keys := make([]string, 0, len(projectSet)) for k := range projectSet { if len(k) > 0 { @@ -267,10 +278,14 @@ func compareEnvAndLabelAndRecordContainer() []*helper.ContainerDetail { } func isCollectContainers() bool { + found := false + LogtailConfigLock.RLock() for _, logstoreConfig := range LogtailConfig { if logstoreConfig.CollectingContainersMeta { - return true + found = true + break } } - return false + LogtailConfigLock.RUnlock() + return found } diff --git a/pluginmanager/container_config_manager_test.go b/pluginmanager/container_config_manager_test.go index 8acdefc109..d5e6fad93b 100644 --- a/pluginmanager/container_config_manager_test.go +++ b/pluginmanager/container_config_manager_test.go @@ -155,7 +155,12 @@ func loadMockConfig() error { "type": "dockerStdout" } }` - return LoadLogstoreConfig(project, logstore, configName, 666, configStr) + err := LoadLogstoreConfig(project, logstore, configName, 666, configStr) + if err != nil { + return err + } + LogtailConfig[configName] = ToStartPipelineConfigWithInput + return nil } func (s *containerConfigTestSuite) TestLargeCountLog() { @@ -169,7 +174,7 @@ func (s *containerConfigTestSuite) TestLargeCountLog() { "DefaultLogGroupQueueSize": 4, "Tags" : { "base_version" : "0.1.0", - "logtail_version" : "0.16.19" + "loongcollector_version" : "0.16.19" } }, "inputs" : [ diff --git a/pluginmanager/context_imp.go b/pluginmanager/context_imp.go index 24eb046759..a58e698bbc 100644 --- a/pluginmanager/context_imp.go +++ b/pluginmanager/context_imp.go @@ -59,7 +59,7 @@ func (p *ContextImp) GetExtension(name string, cfg any) (pipeline.Extension, err } // create if not found - pluginMeta := p.logstoreC.genPluginMeta(name, false, false) + pluginMeta := p.logstoreC.genPluginMeta(name) err := loadExtension(pluginMeta, p.logstoreC, cfg) if err != nil { return nil, err diff --git a/pluginmanager/logstore_config.go b/pluginmanager/logstore_config.go index 3afbad7bf6..c31de03736 100644 --- a/pluginmanager/logstore_config.go +++ b/pluginmanager/logstore_config.go @@ -22,7 +22,6 @@ import ( "fmt" "strconv" "strings" - "sync" "sync/atomic" "github.com/alibaba/ilogtail/pkg/config" @@ -31,7 +30,6 @@ import ( "github.com/alibaba/ilogtail/pkg/models" "github.com/alibaba/ilogtail/pkg/pipeline" "github.com/alibaba/ilogtail/pkg/protocol" - "github.com/alibaba/ilogtail/pkg/util" "github.com/alibaba/ilogtail/plugins/input" ) @@ -93,7 +91,7 @@ type LogstoreConfig struct { ConfigName string ConfigNameWithSuffix string LogstoreKey int64 - FlushOutFlag bool + FlushOutFlag atomic.Bool // Each LogstoreConfig can have its independent GlobalConfig if the "global" field // is offered in configuration, see build-in StatisticsConfig and AlarmConfig. GlobalConfig *config.GlobalConfig @@ -103,26 +101,17 @@ type LogstoreConfig struct { Statistics LogstoreStatistics PluginRunner PluginRunner // private fields - alreadyStarted bool // if this flag is true, do not start it when config Resume configDetailHash string - // processShutdown chan struct{} - // flushShutdown chan struct{} - pauseChan chan struct{} - resumeChan chan struct{} - // processWaitSema sync.WaitGroup - // flushWaitSema sync.WaitGroup - pauseOrResumeWg sync.WaitGroup K8sLabelSet map[string]struct{} ContainerLabelSet map[string]struct{} EnvSet map[string]struct{} CollectingContainersMeta bool pluginID int32 - nodeID int32 } func (p *LogstoreStatistics) Init(context pipeline.Context) { - labels := pipeline.GetCommonLabels(context, &pipeline.PluginMeta{}) + labels := helper.GetCommonLabels(context, &pipeline.PluginMeta{}) metricsRecord := context.RegisterLogstoreConfigMetricRecord(labels) p.CollecLatencytMetric = helper.NewLatencyMetricAndRegister(metricsRecord, "collect_latency") p.RawLogMetric = helper.NewCounterMetricAndRegister(metricsRecord, "raw_log") @@ -142,60 +131,34 @@ func (p *LogstoreStatistics) Init(context pipeline.Context) { // 4. Start inputs (including metrics and services), just like aggregator, each input // has its own goroutine. func (lc *LogstoreConfig) Start() { - lc.FlushOutFlag = false + lc.FlushOutFlag.Store(false) logger.Info(lc.Context.GetRuntimeContext(), "config start", "begin") - lc.pauseChan = make(chan struct{}, 1) - lc.resumeChan = make(chan struct{}, 1) - lc.PluginRunner.Run() logger.Info(lc.Context.GetRuntimeContext(), "config start", "success") } // Stop stops plugin instances and corresponding goroutines of config. -// @exitFlag passed from Logtail, indicates that if Logtail will quit after this. +// @removedFlag passed from C++, indicates that if config will be removed after this. // Procedures: // 1. SetUrgent to all flushers to indicate them current state. // 2. Stop all input plugins, stop generating logs. // 3. Stop processor goroutine, pass all existing logs to aggregator. // 4. Stop all aggregator plugins, make all logs to LogGroups. // 5. Set stopping flag, stop flusher goroutine. -// 6. If Logtail is exiting and there are remaining data, try to flush once. +// 6. If config will be removed and there are remaining data, try to flush once. // 7. Stop flusher plugins. -func (lc *LogstoreConfig) Stop(exitFlag bool) error { - logger.Info(lc.Context.GetRuntimeContext(), "config stop", "begin", "exit", exitFlag) - if err := lc.PluginRunner.Stop(exitFlag); err != nil { +func (lc *LogstoreConfig) Stop(removedFlag bool) error { + logger.Info(lc.Context.GetRuntimeContext(), "config stop", "begin", "removing", removedFlag) + if err := lc.PluginRunner.Stop(removedFlag); err != nil { return err } logger.Info(lc.Context.GetRuntimeContext(), "Plugin Runner stop", "done") - close(lc.pauseChan) - close(lc.resumeChan) logger.Info(lc.Context.GetRuntimeContext(), "config stop", "success") return nil } -func (lc *LogstoreConfig) pause() { - lc.pauseOrResumeWg.Add(1) - lc.pauseChan <- struct{}{} - lc.pauseOrResumeWg.Wait() -} - -func (lc *LogstoreConfig) waitForResume() { - lc.pauseOrResumeWg.Done() - select { - case <-lc.resumeChan: - lc.pauseOrResumeWg.Done() - case <-GetFlushCancelToken(lc.PluginRunner): - } -} - -func (lc *LogstoreConfig) resume() { - lc.pauseOrResumeWg.Add(1) - lc.resumeChan <- struct{}{} - lc.pauseOrResumeWg.Wait() -} - const ( rawStringKey = "content" defaultTagPrefix = "__tag__:__prefix__" @@ -206,14 +169,6 @@ var ( tagSeparator = []byte("~=~") ) -func (lc *LogstoreConfig) ProcessRawLog(rawLog []byte, packID string, topic string) int { - log := &protocol.Log{} - log.Contents = append(log.Contents, &protocol.Log_Content{Key: rawStringKey, Value: string(rawLog)}) - logger.Debug(context.Background(), "Process raw log ", packID, topic, len(rawLog)) - lc.PluginRunner.ReceiveRawLog(&pipeline.LogWithContext{Log: log, Context: map[string]interface{}{"source": packID, "topic": topic}}) - return 0 -} - // extractTags extracts tags from rawTags and append them into log. // Rule: k1~=~v1^^^k2~=~v2 // rawTags @@ -372,8 +327,6 @@ func hasDockerStdoutInput(plugins map[string]interface{}) bool { return false } -var enableAlwaysOnlineForStdout = true - func createLogstoreConfig(project string, logstore string, configName string, logstoreKey int64, jsonStr string) (*LogstoreConfig, error) { var err error contextImp := &ContextImp{} @@ -389,21 +342,6 @@ func createLogstoreConfig(project string, logstore string, configName string, lo } contextImp.logstoreC = logstoreC - // Check if the config has been disabled (keep disabled if config detail is unchanged). - DisabledLogtailConfigLock.Lock() - if disabledConfig, hasDisabled := DisabledLogtailConfig[configName]; hasDisabled { - if disabledConfig.configDetailHash == logstoreC.configDetailHash { - DisabledLogtailConfigLock.Unlock() - return nil, fmt.Errorf("failed to create config because timeout "+ - "stop has happened on it: %v", configName) - } - delete(DisabledLogtailConfig, configName) - DisabledLogtailConfigLock.Unlock() - logger.Info(contextImp.GetRuntimeContext(), "retry timeout config because config detail has changed") - } else { - DisabledLogtailConfigLock.Unlock() - } - var plugins = make(map[string]interface{}) if err = json.Unmarshal([]byte(jsonStr), &plugins); err != nil { return nil, err @@ -414,26 +352,6 @@ func createLogstoreConfig(project string, logstore string, configName string, lo return nil, err } - // check AlwaysOnlineManager - if oldConfig, ok := GetAlwaysOnlineManager().GetCachedConfig(configName); ok { - logger.Info(contextImp.GetRuntimeContext(), "find alwaysOnline config", oldConfig.ConfigName, "config compare", oldConfig.configDetailHash == logstoreC.configDetailHash, - "new config hash", logstoreC.configDetailHash, "old config hash", oldConfig.configDetailHash) - if oldConfig.configDetailHash == logstoreC.configDetailHash { - logstoreC = oldConfig - logstoreC.alreadyStarted = true - logger.Info(contextImp.GetRuntimeContext(), "config is same after reload, use it again", GetFlushStoreLen(logstoreC.PluginRunner)) - return logstoreC, nil - } - oldConfig.resume() - _ = oldConfig.Stop(false) - logstoreC.PluginRunner.Merge(oldConfig.PluginRunner) - logger.Info(contextImp.GetRuntimeContext(), "config is changed after reload", "stop and create a new one") - } else if lastConfig, hasLastConfig := LastLogtailConfig[configName]; hasLastConfig { - // Move unsent LogGroups from last config to new config. - logstoreC.PluginRunner.Merge(lastConfig.PluginRunner) - } - - enableAlwaysOnline := enableAlwaysOnlineForStdout && hasDockerStdoutInput(plugins) logstoreC.ContainerLabelSet = make(map[string]struct{}) logstoreC.EnvSet = make(map[string]struct{}) logstoreC.K8sLabelSet = make(map[string]struct{}) @@ -506,11 +424,11 @@ func createLogstoreConfig(project string, logstore string, configName string, lo } } - logstoreC.GlobalConfig = &config.LogtailGlobalConfig + logstoreC.GlobalConfig = &config.LoongcollectorGlobalConfig // If plugins config has "global" field, then override the logstoreC.GlobalConfig - if pluginConfigInterface, flag := plugins["global"]; flag || enableAlwaysOnline { + if pluginConfigInterface, flag := plugins["global"]; flag { pluginConfig := &config.GlobalConfig{} - *pluginConfig = config.LogtailGlobalConfig + *pluginConfig = config.LoongcollectorGlobalConfig if flag { configJSONStr, err := json.Marshal(pluginConfigInterface) //nolint:govet if err != nil { @@ -521,9 +439,6 @@ func createLogstoreConfig(project string, logstore string, configName string, lo return nil, err } } - if enableAlwaysOnline { - pluginConfig.AlwaysOnline = true - } logstoreC.GlobalConfig = pluginConfig logger.Debug(contextImp.GetRuntimeContext(), "load plugin config", *logstoreC.GlobalConfig) } @@ -562,7 +477,7 @@ func createLogstoreConfig(project string, logstore string, configName string, lo } pluginType := getPluginType(pluginTypeWithIDStr) logger.Debug(contextImp.GetRuntimeContext(), "add extension", pluginType) - err = loadExtension(logstoreC.genPluginMeta(pluginTypeWithIDStr, false, false), logstoreC, extension["detail"]) + err = loadExtension(logstoreC.genPluginMeta(pluginTypeWithIDStr), logstoreC, extension["detail"]) if err != nil { return nil, err } @@ -584,10 +499,10 @@ func createLogstoreConfig(project string, logstore string, configName string, lo if _, isMetricInput := pipeline.MetricInputs[pluginType]; isMetricInput { // Load MetricInput plugin defined in pipeline.MetricInputs // pipeline.MetricInputs will be renamed in a future version - err = loadMetric(logstoreC.genPluginMeta(pluginTypeWithIDStr, true, false), logstoreC, input["detail"]) + err = loadMetric(logstoreC.genPluginMeta(pluginTypeWithIDStr), logstoreC, input["detail"]) } else if _, isServiceInput := pipeline.ServiceInputs[pluginType]; isServiceInput { // Load ServiceInput plugin defined in pipeline.ServiceInputs - err = loadService(logstoreC.genPluginMeta(pluginTypeWithIDStr, true, false), logstoreC, input["detail"]) + err = loadService(logstoreC.genPluginMeta(pluginTypeWithIDStr), logstoreC, input["detail"]) } if err != nil { return nil, err @@ -614,7 +529,7 @@ func createLogstoreConfig(project string, logstore string, configName string, lo if pluginTypeWithIDStr, ok := pluginTypeWithID.(string); ok { pluginType := getPluginType(pluginTypeWithIDStr) logger.Debug(contextImp.GetRuntimeContext(), "add processor", pluginType) - err = loadProcessor(logstoreC.genPluginMeta(pluginTypeWithIDStr, true, false), i, logstoreC, processor["detail"]) + err = loadProcessor(logstoreC.genPluginMeta(pluginTypeWithIDStr), i, logstoreC, processor["detail"]) if err != nil { return nil, err } @@ -641,7 +556,7 @@ func createLogstoreConfig(project string, logstore string, configName string, lo if pluginTypeWithIDStr, ok := pluginTypeWithID.(string); ok { pluginType := getPluginType(pluginTypeWithIDStr) logger.Debug(contextImp.GetRuntimeContext(), "add aggregator", pluginType) - err = loadAggregator(logstoreC.genPluginMeta(pluginTypeWithIDStr, true, false), logstoreC, aggregator["detail"]) + err = loadAggregator(logstoreC.genPluginMeta(pluginTypeWithIDStr), logstoreC, aggregator["detail"]) if err != nil { return nil, err } @@ -664,19 +579,14 @@ func createLogstoreConfig(project string, logstore string, configName string, lo if flushersFound { flushers, ok := pluginConfig.([]interface{}) if ok { - flushersLen := len(flushers) - for num, flusherInterface := range flushers { + for _, flusherInterface := range flushers { flusher, ok := flusherInterface.(map[string]interface{}) if ok { if pluginTypeWithID, ok := flusher["type"]; ok { if pluginTypeWithIDStr, ok := pluginTypeWithID.(string); ok { pluginType := getPluginType(pluginTypeWithIDStr) logger.Debug(contextImp.GetRuntimeContext(), "add flusher", pluginType) - lastOne := false - if num == flushersLen-1 { - lastOne = true - } - err = loadFlusher(logstoreC.genPluginMeta(pluginTypeWithIDStr, true, lastOne), logstoreC, flusher["detail"]) + err = loadFlusher(logstoreC.genPluginMeta(pluginTypeWithIDStr), logstoreC, flusher["detail"]) if err != nil { return nil, err } @@ -730,7 +640,9 @@ func initPluginRunner(lc *LogstoreConfig) (PluginRunner, error) { func LoadLogstoreConfig(project string, logstore string, configName string, logstoreKey int64, jsonStr string) error { if len(jsonStr) == 0 { logger.Info(context.Background(), "delete config", configName, "logstore", logstore) + LogtailConfigLock.Lock() delete(LogtailConfig, configName) + LogtailConfigLock.Unlock() return nil } logger.Info(context.Background(), "load config", configName, "logstore", logstore) @@ -738,10 +650,28 @@ func LoadLogstoreConfig(project string, logstore string, configName string, logs if err != nil { return err } - LogtailConfig[configName] = logstoreC + if logstoreC.PluginRunner.IsWithInputPlugin() { + ToStartPipelineConfigWithInput = logstoreC + } else { + ToStartPipelineConfigWithoutInput = logstoreC + } return nil } +func UnloadPartiallyLoadedConfig(configName string) error { + logger.Info(context.Background(), "unload config", configName) + if ToStartPipelineConfigWithInput.ConfigNameWithSuffix == configName { + ToStartPipelineConfigWithInput = nil + return nil + } + if ToStartPipelineConfigWithoutInput.ConfigNameWithSuffix == configName { + ToStartPipelineConfigWithoutInput = nil + return nil + } + logger.Error(context.Background(), "unload config", "config not found", configName) + return fmt.Errorf("config not found") +} + func loadBuiltinConfig(name string, project string, logstore string, configName string, cfgStr string) (*LogstoreConfig, error) { logger.Infof(context.Background(), "load built-in config %v, config name: %v, logstore: %v", name, configName, logstore) @@ -861,42 +791,30 @@ func getPluginType(pluginTypeWithID string) string { return pluginTypeWithID } -func (lc *LogstoreConfig) genPluginMeta(pluginTypeWithID string, genNodeID bool, lastOne bool) *pipeline.PluginMeta { - nodeID := "" - childNodeID := "" +func (lc *LogstoreConfig) genPluginMeta(pluginTypeWithID string) *pipeline.PluginMeta { if isPluginTypeWithID(pluginTypeWithID) { pluginTypeWithID := pluginTypeWithID if idx := strings.IndexByte(pluginTypeWithID, '#'); idx != -1 { pluginTypeWithID = pluginTypeWithID[:idx] } if ids := strings.IndexByte(pluginTypeWithID, '/'); ids != -1 { - if genNodeID { - nodeID, childNodeID = lc.genNodeID(lastOne) - } if pluginID, err := strconv.ParseInt(pluginTypeWithID[ids+1:], 10, 32); err == nil { atomic.StoreInt32(&lc.pluginID, int32(pluginID)) } return &pipeline.PluginMeta{ - PluginTypeWithID: pluginTypeWithID, - PluginType: pluginTypeWithID[:ids], - PluginID: pluginTypeWithID[ids+1:], - NodeID: nodeID, - ChildNodeID: childNodeID, + PluginTypeWithID: getPluginTypeWithID(pluginTypeWithID), + PluginType: getPluginType(pluginTypeWithID), + PluginID: getPluginID(pluginTypeWithID), } } } pluginType := pluginTypeWithID pluginID := lc.genPluginID() - if genNodeID { - nodeID, childNodeID = lc.genNodeID(lastOne) - } pluginTypeWithID = fmt.Sprintf("%s/%s", pluginType, pluginID) return &pipeline.PluginMeta{ - PluginTypeWithID: pluginTypeWithID, - PluginType: pluginType, - PluginID: pluginID, - NodeID: nodeID, - ChildNodeID: childNodeID, + PluginTypeWithID: getPluginTypeWithID(pluginTypeWithID), + PluginType: getPluginType(pluginTypeWithID), + PluginID: getPluginID(pluginTypeWithID), } } @@ -907,6 +825,29 @@ func isPluginTypeWithID(pluginTypeWithID string) bool { return false } +func getPluginID(pluginTypeWithID string) string { + slashCount := strings.Count(pluginTypeWithID, "/") + switch slashCount { + case 0: + return "" + case 1: + if idx := strings.IndexByte(pluginTypeWithID, '/'); idx != -1 { + return pluginTypeWithID[idx+1:] + } + default: + if firstIdx := strings.IndexByte(pluginTypeWithID, '/'); firstIdx != -1 { + if lastIdx := strings.LastIndexByte(pluginTypeWithID, '/'); lastIdx != -1 { + return pluginTypeWithID[firstIdx+1 : lastIdx] + } + } + } + return "" +} + +func getPluginTypeWithID(pluginTypeWithID string) string { + return fmt.Sprintf("%s/%s", getPluginType(pluginTypeWithID), getPluginID(pluginTypeWithID)) +} + func GetPluginPriority(pluginTypeWithID string) int { if idx := strings.IndexByte(pluginTypeWithID, '#'); idx != -1 { val, err := strconv.Atoi(pluginTypeWithID[idx+1:]) @@ -922,15 +863,8 @@ func (lc *LogstoreConfig) genPluginID() string { return fmt.Sprintf("%v", atomic.AddInt32(&lc.pluginID, 1)) } -func (lc *LogstoreConfig) genNodeID(lastOne bool) (string, string) { - id := atomic.AddInt32(&lc.nodeID, 1) - if lastOne { - return fmt.Sprintf("%v", id), fmt.Sprintf("%v", -1) - } - return fmt.Sprintf("%v", id), fmt.Sprintf("%v", id+1) -} - func init() { + LogtailConfigLock.Lock() LogtailConfig = make(map[string]*LogstoreConfig) - _ = util.InitFromEnvBool("ALIYUN_LOGTAIL_ENABLE_ALWAYS_ONLINE_FOR_STDOUT", &enableAlwaysOnlineForStdout, true) + LogtailConfigLock.Unlock() } diff --git a/pluginmanager/logstore_config_test.go b/pluginmanager/logstore_config_test.go index 01675e3fc7..cc03df3768 100644 --- a/pluginmanager/logstore_config_test.go +++ b/pluginmanager/logstore_config_test.go @@ -48,7 +48,9 @@ type logstoreConfigTestSuite struct { func (s *logstoreConfigTestSuite) BeforeTest(suiteName, testName string) { logger.Infof(context.Background(), "========== %s %s test start ========================", suiteName, testName) + LogtailConfigLock.Lock() LogtailConfig = make(map[string]*LogstoreConfig) + LogtailConfigLock.Unlock() } func (s *logstoreConfigTestSuite) AfterTest(suiteName, testName string) { @@ -97,7 +99,7 @@ func (s *logstoreConfigTestSuite) TestPluginGlobalConfig() { } ] }` - s.NoError(LoadMockConfig("project", "logstore", "1", str), "load config fail") + s.NoError(LoadAndStartMockConfig("project", "logstore", "1", str), "load config fail") s.Equal(len(LogtailConfig), 1) s.Equal(LogtailConfig["1"].ConfigName, "1") config := LogtailConfig["1"] @@ -120,9 +122,9 @@ func (s *logstoreConfigTestSuite) TestPluginGlobalConfig() { } func (s *logstoreConfigTestSuite) TestLoadConfig() { - s.NoError(LoadMockConfig("project", "logstore", "1")) - s.NoError(LoadMockConfig("project", "logstore", "3")) - s.NoError(LoadMockConfig("project", "logstore", "2")) + s.NoError(LoadAndStartMockConfig("project", "logstore", "1")) + s.NoError(LoadAndStartMockConfig("project", "logstore", "3")) + s.NoError(LoadAndStartMockConfig("project", "logstore", "2")) s.Equal(len(LogtailConfig), 3) s.Equal(LogtailConfig["1"].ConfigName, "1") s.Equal(LogtailConfig["2"].ConfigName, "2") @@ -138,7 +140,7 @@ func (s *logstoreConfigTestSuite) TestLoadConfig() { s.Equal(len(config.PluginRunner.(*pluginv1Runner).AggregatorPlugins), 1) s.Equal(len(config.PluginRunner.(*pluginv1Runner).FlusherPlugins), 2) // global config - s.Equal(config.GlobalConfig, &global_config.LogtailGlobalConfig) + s.Equal(config.GlobalConfig, &global_config.LoongcollectorGlobalConfig) // check plugin inner info reg, ok := config.PluginRunner.(*pluginv1Runner).ProcessorPlugins[0].Processor.(*regex.ProcessorRegex) @@ -240,7 +242,7 @@ func (s *logstoreConfigTestSuite) TestLoadConfigWithExtension() { } ` - s.NoError(LoadMockConfig("project", "logstore", "test", jsonStr)) + s.NoError(LoadAndStartMockConfig("project", "logstore", "test", jsonStr)) s.Equal(len(LogtailConfig), 1) config := LogtailConfig["test"] s.Equal(config.ProjectName, "project") @@ -254,7 +256,7 @@ func (s *logstoreConfigTestSuite) TestLoadConfigWithExtension() { s.Equal(len(config.PluginRunner.(*pluginv1Runner).FlusherPlugins), 2) s.Equal(len(config.PluginRunner.(*pluginv1Runner).ExtensionPlugins), 1) // global config - s.Equal(config.GlobalConfig, &global_config.LogtailGlobalConfig) + s.Equal(config.GlobalConfig, &global_config.LoongcollectorGlobalConfig) // check plugin inner info _, ok := config.PluginRunner.(*pluginv1Runner).ProcessorPlugins[0].Processor.(*regex.ProcessorRegex) @@ -327,7 +329,7 @@ func (s *logstoreConfigTestSuite) TestGetExtension() { } ` - s.NoError(LoadMockConfig("project", "logstore", "test", jsonStr)) + s.NoError(LoadAndStartMockConfig("project", "logstore", "test", jsonStr)) s.Equal(len(LogtailConfig), 1) config := LogtailConfig["test"] s.Equal(config.ProjectName, "project") @@ -341,7 +343,7 @@ func (s *logstoreConfigTestSuite) TestGetExtension() { s.Equal(len(config.PluginRunner.(*pluginv1Runner).FlusherPlugins), 2) s.Equal(len(config.PluginRunner.(*pluginv1Runner).ExtensionPlugins), 1) // global config - s.Equal(config.GlobalConfig, &global_config.LogtailGlobalConfig) + s.Equal(config.GlobalConfig, &global_config.LoongcollectorGlobalConfig) // check plugin inner info _, ok := config.PluginRunner.(*pluginv1Runner).ProcessorPlugins[0].Processor.(*regex.ProcessorRegex) @@ -440,7 +442,7 @@ func TestLogstoreConfig_ProcessRawLogV2(t *testing.T) { l.PluginRunner = &pluginv1Runner{ LogsChan: make(chan *pipeline.LogWithContext, 10), } - l.GlobalConfig = &config.LogtailGlobalConfig + l.GlobalConfig = &config.LoongcollectorGlobalConfig l.GlobalConfig.UsingOldContentTag = true { assert.Equal(t, 0, l.ProcessRawLogV2(rawLogs, "", topic, tags)) @@ -527,51 +529,45 @@ func TestLogstoreConfig_ProcessRawLogV2(t *testing.T) { func Test_genPluginMeta(t *testing.T) { l := new(LogstoreConfig) { - result := l.genPluginMeta("testPlugin", false, false) + result := l.genPluginMeta("testPlugin") assert.Equal(t, "testPlugin", result.PluginType) assert.Regexp(t, `testPlugin/\d+`, result.PluginTypeWithID) assert.Regexp(t, `\d+`, result.PluginID) - assert.Equal(t, "", result.NodeID) - assert.Equal(t, "", result.ChildNodeID) } { - result := l.genPluginMeta("testPlugin", true, false) + result := l.genPluginMeta("testPlugin") assert.Equal(t, "testPlugin", result.PluginType) assert.Regexp(t, `testPlugin/\d+`, result.PluginTypeWithID) assert.Regexp(t, `\d+`, result.PluginID) - assert.Regexp(t, `\d+`, result.NodeID) - assert.Regexp(t, `\d+`, result.ChildNodeID) } { - result := l.genPluginMeta("testPlugin", true, true) + result := l.genPluginMeta("testPlugin") assert.Equal(t, "testPlugin", result.PluginType) assert.Regexp(t, `testPlugin/\d+`, result.PluginTypeWithID) assert.Regexp(t, `\d+`, result.PluginID) - assert.Regexp(t, `\d+`, result.NodeID) - assert.Regexp(t, `-1`, result.ChildNodeID) } { - result := l.genPluginMeta("testPlugin/customID", false, false) + result := l.genPluginMeta("testPlugin/customID") assert.Equal(t, "testPlugin", result.PluginType) assert.Equal(t, "testPlugin/customID", result.PluginTypeWithID) assert.Equal(t, "customID", result.PluginID) - assert.Equal(t, "", result.NodeID) - assert.Equal(t, "", result.ChildNodeID) } { - result := l.genPluginMeta("testPlugin/customID", true, false) + result := l.genPluginMeta("testPlugin/customID") assert.Equal(t, "testPlugin", result.PluginType) assert.Equal(t, "testPlugin/customID", result.PluginTypeWithID) assert.Equal(t, "customID", result.PluginID) - assert.Regexp(t, `\d+`, result.NodeID) - assert.Regexp(t, `\d+`, result.ChildNodeID) } { - result := l.genPluginMeta("testPlugin/customID", true, true) + result := l.genPluginMeta("testPlugin/customID") + assert.Equal(t, "testPlugin", result.PluginType) + assert.Equal(t, "testPlugin/customID", result.PluginTypeWithID) + assert.Equal(t, "customID", result.PluginID) + } + { + result := l.genPluginMeta("testPlugin/customID/123") assert.Equal(t, "testPlugin", result.PluginType) assert.Equal(t, "testPlugin/customID", result.PluginTypeWithID) assert.Equal(t, "customID", result.PluginID) - assert.Regexp(t, `\d+`, result.NodeID) - assert.Regexp(t, `-1`, result.ChildNodeID) } } diff --git a/pluginmanager/metric_export.go b/pluginmanager/metric_export.go index 43bfbd61fa..8fab36eb2d 100644 --- a/pluginmanager/metric_export.go +++ b/pluginmanager/metric_export.go @@ -17,6 +17,9 @@ import ( goruntimemetrics "runtime/metrics" "strconv" "strings" + + "github.com/alibaba/ilogtail/pkg/helper" + "github.com/alibaba/ilogtail/pkg/helper/k8smeta" ) const ( @@ -50,6 +53,8 @@ func GetGoDirectMetrics() []map[string]string { metrics := make([]map[string]string, 0) // go plugin metrics metrics = append(metrics, GetGoPluginMetrics()...) + // k8s meta metrics + metrics = append(metrics, k8smeta.GetMetaManagerMetrics()...) return metrics } @@ -71,9 +76,11 @@ func GetGoCppProvidedMetrics() []map[string]string { // go 插件指标,直接输出 func GetGoPluginMetrics() []map[string]string { metrics := make([]map[string]string, 0) + LogtailConfigLock.RLock() for _, config := range LogtailConfig { metrics = append(metrics, config.Context.ExportMetricRecords()...) } + LogtailConfigLock.RUnlock() return metrics } @@ -83,12 +90,10 @@ func GetAgentStat() []map[string]string { metric := map[string]string{} // key is the metric key in runtime/metrics, value is agent's metric key metricNames := map[string]string{ - // cpu - // "": "agent_go_cpu_percent", // mem. All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes. - "/memory/classes/total:bytes": "agent_go_memory_used_mb", + "/memory/classes/total:bytes": helper.MetricAgentMemoryGo, // go routines cnt. Count of live goroutines. - "/sched/goroutines:goroutines": "agent_go_routines_total", + "/sched/goroutines:goroutines": helper.MetricAgentGoRoutinesTotal, } // metrics to read from runtime/metrics diff --git a/pluginmanager/plugin_manager.go b/pluginmanager/plugin_manager.go index 6dc59ca741..b8eca504f6 100644 --- a/pluginmanager/plugin_manager.go +++ b/pluginmanager/plugin_manager.go @@ -16,6 +16,7 @@ package pluginmanager import ( "context" + "fmt" "runtime" "runtime/debug" "sync" @@ -29,18 +30,25 @@ import ( ) // Following variables are exported so that tests of main package can reference them. +var LogtailConfigLock sync.RWMutex var LogtailConfig map[string]*LogstoreConfig -var LastLogtailConfig map[string]*LogstoreConfig + +// Configs that are inited and will be started. +// One config may have multiple Go pipelines, such as ContainerInfo (with input) and static file (without input). +var ToStartPipelineConfigWithInput *LogstoreConfig +var ToStartPipelineConfigWithoutInput *LogstoreConfig var ContainerConfig *LogstoreConfig +// Configs that were disabled because of slow or hang config. +var DisabledLogtailConfigLock sync.RWMutex +var DisabledLogtailConfig = make(map[string]*LogstoreConfig) + +var LastUnsendBuffer = make(map[string]PluginRunner) + // Two built-in logtail configs to report statistics and alarm (from system and other logtail configs). var StatisticsConfig *LogstoreConfig var AlarmConfig *LogstoreConfig -// Configs that were disabled because of slow or hang config. -var DisabledLogtailConfigLock sync.Mutex -var DisabledLogtailConfig = make(map[string]*LogstoreConfig) - var statisticsConfigJSON = `{ "global": { "InputIntervalMs" : 60000, @@ -50,7 +58,7 @@ var statisticsConfigJSON = `{ "DefaultLogGroupQueueSize": 4, "Tags" : { "base_version" : "` + config.BaseVersion + `", - "logtail_version" : "` + config.BaseVersion + `" + "loongcollector_version" : "` + config.BaseVersion + `" } }, "inputs" : [ @@ -70,7 +78,7 @@ var alarmConfigJSON = `{ "DefaultLogGroupQueueSize": 4, "Tags" : { "base_version" : "` + config.BaseVersion + `", - "logtail_version" : "` + config.BaseVersion + `" + "loongcollector_version" : "` + config.BaseVersion + `" } }, "inputs" : [ @@ -90,7 +98,7 @@ var containerConfigJSON = `{ "DefaultLogGroupQueueSize": 4, "Tags" : { "base_version" : "` + config.BaseVersion + `", - "logtail_version" : "` + config.BaseVersion + `" + "loongcollector_version" : "` + config.BaseVersion + `" } }, "inputs" : [ @@ -136,21 +144,13 @@ func Init() (err error) { // timeoutStop wrappers LogstoreConfig.Stop with timeout (5s by default). // @return true if Stop returns before timeout, otherwise false. -func timeoutStop(config *LogstoreConfig, flag bool) bool { - if !flag && config.GlobalConfig.AlwaysOnline { - config.pause() - GetAlwaysOnlineManager().AddCachedConfig(config, time.Duration(config.GlobalConfig.DelayStopSec)*time.Second) - logger.Info(config.Context.GetRuntimeContext(), "Pause config and add into always online manager", "done") - return true - } - +func timeoutStop(config *LogstoreConfig, removedFlag bool) bool { done := make(chan int) go func() { logger.Info(config.Context.GetRuntimeContext(), "Stop config in goroutine", "begin") - _ = config.Stop(flag) + _ = config.Stop(removedFlag) close(done) logger.Info(config.Context.GetRuntimeContext(), "Stop config in goroutine", "end") - // The config is valid but stop slowly, allow it to load again. DisabledLogtailConfigLock.Lock() if _, exists := DisabledLogtailConfig[config.ConfigNameWithSuffix]; !exists { @@ -159,7 +159,7 @@ func timeoutStop(config *LogstoreConfig, flag bool) bool { } delete(DisabledLogtailConfig, config.ConfigNameWithSuffix) DisabledLogtailConfigLock.Unlock() - logger.Info(config.Context.GetRuntimeContext(), "Valid but slow stop config, enable it again", config.ConfigName) + logger.Info(config.Context.GetRuntimeContext(), "Valid but slow stop config", config.ConfigName) }() select { case <-done: @@ -169,22 +169,45 @@ func timeoutStop(config *LogstoreConfig, flag bool) bool { } } -// HoldOn stops all config instance and checkpoint manager so that it is ready -// to load new configs or quit. +// StopAllPipelines stops all pipelines so that it is ready +// to quit. // For user-defined config, timeoutStop is used to avoid hanging. -func HoldOn(exitFlag bool) error { +func StopAllPipelines(withInput bool) error { defer panicRecover("Run plugin") - for _, logstoreConfig := range LogtailConfig { - if hasStopped := timeoutStop(logstoreConfig, exitFlag); !hasStopped { - // TODO: This alarm can not be sent to server in current alarm design. - logger.Error(logstoreConfig.Context.GetRuntimeContext(), "CONFIG_STOP_TIMEOUT_ALARM", - "timeout when stop config, goroutine might leak") - DisabledLogtailConfigLock.Lock() - DisabledLogtailConfig[logstoreConfig.ConfigNameWithSuffix] = logstoreConfig - DisabledLogtailConfigLock.Unlock() + LogtailConfigLock.Lock() + for configName, logstoreConfig := range LogtailConfig { + matchFlag := false + if withInput { + if logstoreConfig.PluginRunner.IsWithInputPlugin() { + matchFlag = true + } + } else { + if !logstoreConfig.PluginRunner.IsWithInputPlugin() { + matchFlag = true + } + } + if matchFlag { + logger.Info(logstoreConfig.Context.GetRuntimeContext(), "Stop config", configName) + if hasStopped := timeoutStop(logstoreConfig, true); !hasStopped { + // TODO: This alarm can not be sent to server in current alarm design. + logger.Error(logstoreConfig.Context.GetRuntimeContext(), "CONFIG_STOP_TIMEOUT_ALARM", + "timeout when stop config, goroutine might leak") + // TODO: The key should be versioned. Current implementation will overwrite the previous version when reload a block config multiple times. + DisabledLogtailConfigLock.Lock() + DisabledLogtailConfig[logstoreConfig.ConfigNameWithSuffix] = logstoreConfig + DisabledLogtailConfigLock.Unlock() + } } } + LogtailConfig = make(map[string]*LogstoreConfig) + LogtailConfigLock.Unlock() + + return nil +} + +// StopBuiltInModulesConfig stops built-in services (self monitor, alarm, container and checkpoint manager). +func StopBuiltInModulesConfig() { if StatisticsConfig != nil { if *flags.ForceSelfCollect { logger.Info(context.Background(), "force collect the static metrics") @@ -192,7 +215,8 @@ func HoldOn(exitFlag bool) error { StatisticsConfig.PluginRunner.RunPlugins(pluginMetricInput, control) control.WaitCancel() } - _ = StatisticsConfig.Stop(exitFlag) + _ = StatisticsConfig.Stop(true) + StatisticsConfig = nil } if AlarmConfig != nil { if *flags.ForceSelfCollect { @@ -201,7 +225,8 @@ func HoldOn(exitFlag bool) error { AlarmConfig.PluginRunner.RunPlugins(pluginMetricInput, control) control.WaitCancel() } - _ = AlarmConfig.Stop(exitFlag) + _ = AlarmConfig.Stop(true) + AlarmConfig = nil } if ContainerConfig != nil { if *flags.ForceSelfCollect { @@ -210,53 +235,65 @@ func HoldOn(exitFlag bool) error { ContainerConfig.PluginRunner.RunPlugins(pluginMetricInput, control) control.WaitCancel() } - _ = ContainerConfig.Stop(exitFlag) + _ = ContainerConfig.Stop(true) + ContainerConfig = nil } - // clear all config - LastLogtailConfig = LogtailConfig - LogtailConfig = make(map[string]*LogstoreConfig) - CheckPointManager.HoldOn() - return nil + CheckPointManager.Stop() } -// Resume starts all configs. -func Resume() error { +// Stop stop the given config. ConfigName is with suffix. +func Stop(configName string, removedFlag bool) error { defer panicRecover("Run plugin") - if StatisticsConfig != nil { - StatisticsConfig.Start() - } - if AlarmConfig != nil { - AlarmConfig.Start() - } - if ContainerConfig != nil { - ContainerConfig.Start() - } - // Remove deleted configs from online manager. - deletedCachedConfigs := GetAlwaysOnlineManager().GetDeletedConfigs(LogtailConfig) - for _, cfg := range deletedCachedConfigs { - go func(config *LogstoreConfig) { - defer panicRecover(config.ConfigName) - logger.Infof(config.Context.GetRuntimeContext(), "always online config %v is deleted, stop it", config.ConfigName) - err := config.Stop(true) - logger.Infof(config.Context.GetRuntimeContext(), "always online config %v stopped, error: %v", config.ConfigName, err) - }(cfg) - } - for _, logstoreConfig := range LogtailConfig { - if logstoreConfig.alreadyStarted { - logstoreConfig.resume() - continue + LogtailConfigLock.RLock() + if config, exists := LogtailConfig[configName]; exists { + LogtailConfigLock.RUnlock() + if hasStopped := timeoutStop(config, removedFlag); !hasStopped { + logger.Error(config.Context.GetRuntimeContext(), "CONFIG_STOP_TIMEOUT_ALARM", + "timeout when stop config, goroutine might leak") + DisabledLogtailConfigLock.Lock() + DisabledLogtailConfig[config.ConfigNameWithSuffix] = config + DisabledLogtailConfigLock.Unlock() + } + if !removedFlag { + LastUnsendBuffer[configName] = config.PluginRunner } - logstoreConfig.Start() + logger.Info(config.Context.GetRuntimeContext(), "Stop config now", configName) + LogtailConfigLock.Lock() + delete(LogtailConfig, configName) + LogtailConfigLock.Unlock() + return nil } + LogtailConfigLock.RUnlock() + return fmt.Errorf("config not found: %s", configName) +} - err := CheckPointManager.Init() - if err != nil { - logger.Error(context.Background(), "CHECKPOINT_INIT_ALARM", "init checkpoint manager error", err) +// Start starts the given config. ConfigName is with suffix. +func Start(configName string) error { + defer panicRecover("Run plugin") + if ToStartPipelineConfigWithInput != nil && ToStartPipelineConfigWithInput.ConfigNameWithSuffix == configName { + ToStartPipelineConfigWithInput.Start() + LogtailConfigLock.Lock() + LogtailConfig[ToStartPipelineConfigWithInput.ConfigNameWithSuffix] = ToStartPipelineConfigWithInput + LogtailConfigLock.Unlock() + ToStartPipelineConfigWithInput = nil + return nil + } else if ToStartPipelineConfigWithoutInput != nil && ToStartPipelineConfigWithoutInput.ConfigNameWithSuffix == configName { + ToStartPipelineConfigWithoutInput.Start() + LogtailConfigLock.Lock() + LogtailConfig[ToStartPipelineConfigWithoutInput.ConfigNameWithSuffix] = ToStartPipelineConfigWithoutInput + LogtailConfigLock.Unlock() + ToStartPipelineConfigWithoutInput = nil + return nil } - CheckPointManager.Resume() - // clear last logtail config - LastLogtailConfig = make(map[string]*LogstoreConfig) - return nil + // should never happen + var loadedConfigName string + if ToStartPipelineConfigWithInput != nil { + loadedConfigName = ToStartPipelineConfigWithInput.ConfigNameWithSuffix + } + if ToStartPipelineConfigWithoutInput != nil { + loadedConfigName += " " + ToStartPipelineConfigWithoutInput.ConfigNameWithSuffix + } + return fmt.Errorf("config unmatch with the loaded pipeline: given %s, expect %s", configName, loadedConfigName) } func init() { diff --git a/pluginmanager/plugin_manager_test.go b/pluginmanager/plugin_manager_test.go index 960a84cb26..e6a45f23a0 100644 --- a/pluginmanager/plugin_manager_test.go +++ b/pluginmanager/plugin_manager_test.go @@ -62,7 +62,7 @@ func (s *managerTestSuite) AfterTest(suiteName, testName string) { /* func (s *managerTestSuite) TestResumeHoldOn() { for i := 0; i < 10; i++ { - s.NoError(LoadMockConfig(), "got err when logad config") + s.NoError(LoadAndStartMockConfig(), "got err when logad config") s.NoError(Resume(), "got err when resume") time.Sleep(time.Millisecond * time.Duration(10)) s.NoError(HoldOn(false), "got err when hold on") @@ -71,20 +71,18 @@ func (s *managerTestSuite) TestResumeHoldOn() { */ func (s *managerTestSuite) TestPluginManager() { - s.NoError(LoadMockConfig(), "got err when logad config") - s.NoError(Resume(), "got err when resume") + s.NoError(LoadAndStartMockConfig(), "got err when logad config") time.Sleep(time.Millisecond * time.Duration(10)) - s.NoError(HoldOn(false), "got err when hold on") + s.NoError(Stop("test_config", false), "got err when hold on") for i := 0; i < 5; i++ { - s.NoError(LoadMockConfig(), "got err when logad config") - s.NoError(Resume(), "got err when resume") + s.NoError(LoadAndStartMockConfig(), "got err when logad config") time.Sleep(time.Millisecond * time.Duration(1500)) config, ok := LogtailConfig["test_config"] s.True(ok) s.Equal(2, len(GetConfigFlushers(config.PluginRunner))) c, ok := GetConfigFlushers(config.PluginRunner)[1].(*checker.FlusherChecker) s.True(ok) - s.NoError(HoldOn(false), "got err when hold on") + s.NoError(Stop("test_config", false), "got err when hold on") s.Equal(200, c.GetLogCount()) } } @@ -99,7 +97,7 @@ func GetTestConfig(configName string) string { } // project, logstore, config, configJsonStr -func LoadMockConfig(args ...string) error { +func LoadAndStartMockConfig(args ...string) error { project := "test_prj" if len(args) > 0 { project = args[0] @@ -170,5 +168,9 @@ func LoadMockConfig(args ...string) error { configStr = args[3] } - return LoadLogstoreConfig(project, logstore, configName, 666, configStr) + err := LoadLogstoreConfig(project, logstore, configName, 666, configStr) + if err != nil { + return err + } + return Start(configName) } diff --git a/pluginmanager/plugin_runner.go b/pluginmanager/plugin_runner.go index 720af22c70..13e7a5ba76 100644 --- a/pluginmanager/plugin_runner.go +++ b/pluginmanager/plugin_runner.go @@ -59,4 +59,6 @@ type PluginRunner interface { Merge(p PluginRunner) Stop(exit bool) error + + IsWithInputPlugin() bool } diff --git a/pluginmanager/plugin_runner_v1.go b/pluginmanager/plugin_runner_v1.go index b2d3af518b..0e50eba6dd 100644 --- a/pluginmanager/plugin_runner_v1.go +++ b/pluginmanager/plugin_runner_v1.go @@ -65,7 +65,7 @@ func (p *pluginv1Runner) Init(inputQueueSize int, flushQueueSize int) error { func (p *pluginv1Runner) AddDefaultAggregatorIfEmpty() error { if len(p.AggregatorPlugins) == 0 { - pluginMeta := p.LogstoreConfig.genPluginMeta("aggregator_default", true, false) + pluginMeta := p.LogstoreConfig.genPluginMeta("aggregator_default") logger.Debug(p.LogstoreConfig.Context.GetRuntimeContext(), "add default aggregator") if err := loadAggregator(pluginMeta, p.LogstoreConfig, nil); err != nil { return err @@ -78,7 +78,7 @@ func (p *pluginv1Runner) AddDefaultFlusherIfEmpty() error { if len(p.FlusherPlugins) == 0 { logger.Debug(p.LogstoreConfig.Context.GetRuntimeContext(), "add default flusher") category, options := flags.GetFlusherConfiguration() - pluginMeta := p.LogstoreConfig.genPluginMeta(category, true, true) + pluginMeta := p.LogstoreConfig.genPluginMeta(category) if err := loadFlusher(pluginMeta, p.LogstoreConfig, options); err != nil { return err } @@ -138,6 +138,10 @@ func (p *pluginv1Runner) RunPlugins(category pluginCategory, control *pipeline.A } } +func (p *pluginv1Runner) IsWithInputPlugin() bool { + return len(p.MetricPlugins) > 0 || len(p.ServicePlugins) > 0 +} + func (p *pluginv1Runner) addMetricInput(pluginMeta *pipeline.PluginMeta, input pipeline.MetricInputV1, inputInterval int) error { var wrapper MetricWrapperV1 wrapper.Config = p.LogstoreConfig @@ -297,21 +301,12 @@ func (p *pluginv1Runner) runFlusherInternal(cc *pipeline.AsyncControl) { if len(p.LogGroupsChan) == 0 { return } - case <-p.LogstoreConfig.pauseChan: - p.LogstoreConfig.waitForResume() case logGroup = <-p.LogGroupsChan: if logGroup == nil { continue } - // Check pause status if config is still alive, if paused, wait for resume. - select { - case <-p.LogstoreConfig.pauseChan: - p.LogstoreConfig.waitForResume() - default: - } - listLen := len(p.LogGroupsChan) + 1 logGroups := make([]*protocol.LogGroup, listLen) logGroups[0] = logGroup @@ -359,7 +354,7 @@ func (p *pluginv1Runner) runFlusherInternal(cc *pipeline.AsyncControl) { } break } - if !p.LogstoreConfig.FlushOutFlag { + if !p.LogstoreConfig.FlushOutFlag.Load() { time.Sleep(time.Duration(10) * time.Millisecond) continue } @@ -389,7 +384,7 @@ func (p *pluginv1Runner) Stop(exit bool) error { p.AggregateControl.WaitCancel() logger.Info(p.LogstoreConfig.Context.GetRuntimeContext(), "aggregator plugins stop", "done") - p.LogstoreConfig.FlushOutFlag = true + p.LogstoreConfig.FlushOutFlag.Store(true) p.FlushControl.WaitCancel() if exit && p.FlushOutStore.Len() > 0 { diff --git a/pluginmanager/plugin_runner_v2.go b/pluginmanager/plugin_runner_v2.go index d16a679f18..c9f9867d57 100644 --- a/pluginmanager/plugin_runner_v2.go +++ b/pluginmanager/plugin_runner_v2.go @@ -77,7 +77,7 @@ func (p *pluginv2Runner) Init(inputQueueSize int, flushQueueSize int) error { func (p *pluginv2Runner) AddDefaultAggregatorIfEmpty() error { if len(p.AggregatorPlugins) == 0 { - pluginMeta := p.LogstoreConfig.genPluginMeta("aggregator_default", true, false) + pluginMeta := p.LogstoreConfig.genPluginMeta("aggregator_default") logger.Debug(p.LogstoreConfig.Context.GetRuntimeContext(), "add default aggregator") if err := loadAggregator(pluginMeta, p.LogstoreConfig, nil); err != nil { return err @@ -142,6 +142,10 @@ func (p *pluginv2Runner) RunPlugins(category pluginCategory, control *pipeline.A } } +func (p *pluginv2Runner) IsWithInputPlugin() bool { + return len(p.MetricPlugins) > 0 || len(p.ServicePlugins) > 0 +} + func (p *pluginv2Runner) addMetricInput(pluginMeta *pipeline.PluginMeta, input pipeline.MetricInputV2, inputInterval int) error { var wrapper MetricWrapperV2 wrapper.Config = p.LogstoreConfig @@ -153,7 +157,7 @@ func (p *pluginv2Runner) addMetricInput(pluginMeta *pipeline.PluginMeta, input p p.MetricPlugins = append(p.MetricPlugins, &wrapper) p.TimerRunner = append(p.TimerRunner, &timerRunner{ state: input, - interval: wrapper.Interval * time.Millisecond, + interval: wrapper.Interval, context: p.LogstoreConfig.Context, latencyMetric: p.LogstoreConfig.Statistics.CollecLatencytMetric, }) @@ -189,7 +193,7 @@ func (p *pluginv2Runner) addAggregator(pluginMeta *pipeline.PluginMeta, aggregat p.AggregatorPlugins = append(p.AggregatorPlugins, &wrapper) p.TimerRunner = append(p.TimerRunner, &timerRunner{ state: aggregator, - interval: time.Millisecond * wrapper.Interval, + interval: wrapper.Interval, context: p.LogstoreConfig.Context, latencyMetric: p.LogstoreConfig.Statistics.CollecLatencytMetric, }) @@ -228,7 +232,7 @@ func (p *pluginv2Runner) runInput() { func (p *pluginv2Runner) runMetricInput(control *pipeline.AsyncControl) { for _, t := range p.TimerRunner { - if plugin, ok := t.state.(*MetricWrapperV2); ok { + if plugin, ok := t.state.(pipeline.MetricInputV2); ok { metric := plugin timer := t control.Run(func(cc *pipeline.AsyncControl) { @@ -236,6 +240,8 @@ func (p *pluginv2Runner) runMetricInput(control *pipeline.AsyncControl) { return metric.Read(p.InputPipeContext) }, cc) }) + } else { + logger.Error(p.LogstoreConfig.Context.GetRuntimeContext(), "METRIC_INPUT_V2_START_FAILURE", "type assertion", "failure") } } } @@ -322,21 +328,12 @@ func (p *pluginv2Runner) runFlusherInternal(cc *pipeline.AsyncControl) { if len(pipeChan) == 0 { return } - case <-p.LogstoreConfig.pauseChan: - p.LogstoreConfig.waitForResume() case event := <-pipeChan: if event == nil { continue } - // Check pause status if config is still alive, if paused, wait for resume. - select { - case <-p.LogstoreConfig.pauseChan: - p.LogstoreConfig.waitForResume() - default: - } - dataSize := len(pipeChan) + 1 data := make([]*models.PipelineGroupEvents, dataSize) data[0] = event @@ -380,7 +377,7 @@ func (p *pluginv2Runner) runFlusherInternal(cc *pipeline.AsyncControl) { } break } - if !p.LogstoreConfig.FlushOutFlag { + if !p.LogstoreConfig.FlushOutFlag.Load() { time.Sleep(time.Duration(10) * time.Millisecond) continue } @@ -410,7 +407,7 @@ func (p *pluginv2Runner) Stop(exit bool) error { p.AggregateControl.WaitCancel() logger.Info(p.LogstoreConfig.Context.GetRuntimeContext(), "aggregator plugins stop", "done") - p.LogstoreConfig.FlushOutFlag = true + p.LogstoreConfig.FlushOutFlag.Store(true) p.FlushControl.WaitCancel() if exit && p.FlushOutStore.Len() > 0 { diff --git a/pluginmanager/plugin_wrapper.go b/pluginmanager/plugin_wrapper.go index 942df327bc..9c0f2c61ab 100644 --- a/pluginmanager/plugin_wrapper.go +++ b/pluginmanager/plugin_wrapper.go @@ -32,16 +32,18 @@ type InputWrapper struct { Tags map[string]string Interval time.Duration - inputRecordsTotal pipeline.CounterMetric - inputRecordsSizeBytes pipeline.CounterMetric + outEventsTotal pipeline.CounterMetric + outEventGroupsTotal pipeline.CounterMetric + outSizeBytes pipeline.CounterMetric } -func (w *InputWrapper) InitMetricRecord(pluginMeta *pipeline.PluginMeta) { - labels := pipeline.GetCommonLabels(w.Config.Context, pluginMeta) - w.MetricRecord = w.Config.Context.RegisterMetricRecord(labels) +func (wrapper *InputWrapper) InitMetricRecord(pluginMeta *pipeline.PluginMeta) { + labels := helper.GetCommonLabels(wrapper.Config.Context, pluginMeta) + wrapper.MetricRecord = wrapper.Config.Context.RegisterMetricRecord(labels) - w.inputRecordsTotal = helper.NewCounterMetricAndRegister(w.MetricRecord, "input_records_total") - w.inputRecordsSizeBytes = helper.NewCounterMetricAndRegister(w.MetricRecord, "input_records_size_bytes") + wrapper.outEventsTotal = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginOutEventsTotal) + wrapper.outEventGroupsTotal = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginOutEventGroupsTotal) + wrapper.outSizeBytes = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginOutSizeBytes) } // The service plugin is an input plugin used for passively receiving data. @@ -64,18 +66,22 @@ type ProcessorWrapper struct { pipeline.PluginContext Config *LogstoreConfig - procInRecordsTotal pipeline.CounterMetric - procOutRecordsTotal pipeline.CounterMetric - procTimeMS pipeline.CounterMetric + inEventsTotal pipeline.CounterMetric + inSizeBytes pipeline.CounterMetric + outEventsTotal pipeline.CounterMetric + outSizeBytes pipeline.CounterMetric + totalProcessTimeMs pipeline.CounterMetric } -func (w *ProcessorWrapper) InitMetricRecord(pluginMeta *pipeline.PluginMeta) { - labels := pipeline.GetCommonLabels(w.Config.Context, pluginMeta) - w.MetricRecord = w.Config.Context.RegisterMetricRecord(labels) +func (wrapper *ProcessorWrapper) InitMetricRecord(pluginMeta *pipeline.PluginMeta) { + labels := helper.GetCommonLabels(wrapper.Config.Context, pluginMeta) + wrapper.MetricRecord = wrapper.Config.Context.RegisterMetricRecord(labels) - w.procInRecordsTotal = helper.NewCounterMetricAndRegister(w.MetricRecord, "proc_in_records_total") - w.procOutRecordsTotal = helper.NewCounterMetricAndRegister(w.MetricRecord, "proc_out_records_total") - w.procTimeMS = helper.NewCounterMetricAndRegister(w.MetricRecord, "proc_time_ms") + wrapper.inEventsTotal = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginInEventsTotal) + wrapper.inSizeBytes = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginInSizeBytes) + wrapper.outEventsTotal = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginOutEventsTotal) + wrapper.outSizeBytes = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginOutSizeBytes) + wrapper.totalProcessTimeMs = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginTotalProcessTimeMs) } /*--------------------- @@ -88,18 +94,18 @@ type AggregatorWrapper struct { Config *LogstoreConfig Interval time.Duration - aggrInRecordsTotal pipeline.CounterMetric - aggrOutRecordsTotal pipeline.CounterMetric - aggrTimeMS pipeline.CounterMetric + outEventsTotal pipeline.CounterMetric + outEventGroupsTotal pipeline.CounterMetric + outSizeBytes pipeline.CounterMetric } -func (w *AggregatorWrapper) InitMetricRecord(pluginMeta *pipeline.PluginMeta) { - labels := pipeline.GetCommonLabels(w.Config.Context, pluginMeta) - w.MetricRecord = w.Config.Context.RegisterMetricRecord(labels) +func (wrapper *AggregatorWrapper) InitMetricRecord(pluginMeta *pipeline.PluginMeta) { + labels := helper.GetCommonLabels(wrapper.Config.Context, pluginMeta) + wrapper.MetricRecord = wrapper.Config.Context.RegisterMetricRecord(labels) - w.aggrInRecordsTotal = helper.NewCounterMetricAndRegister(w.MetricRecord, "aggr_in_records_total") - w.aggrOutRecordsTotal = helper.NewCounterMetricAndRegister(w.MetricRecord, "aggr_out_records_total") - w.aggrTimeMS = helper.NewCounterMetricAndRegister(w.MetricRecord, "aggr_time_ms") + wrapper.outEventsTotal = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginOutEventsTotal) + wrapper.outEventGroupsTotal = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginOutEventGroupsTotal) + wrapper.outSizeBytes = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginOutSizeBytes) } /*--------------------- @@ -117,25 +123,18 @@ type FlusherWrapper struct { Config *LogstoreConfig Interval time.Duration - flusherInRecordsTotal pipeline.CounterMetric - flusherInRecordsSizeBytes pipeline.CounterMetric - flusherSuccessRecordsTotal pipeline.CounterMetric - flusherDiscardRecordsTotal pipeline.CounterMetric - flusherSuccessTimeMs pipeline.LatencyMetric - flusherErrorTimeMs pipeline.LatencyMetric - flusherErrorTotal pipeline.CounterMetric + inEventsTotal pipeline.CounterMetric + inEventGroupsTotal pipeline.CounterMetric + inSizeBytes pipeline.CounterMetric + totalDelayTimeMs pipeline.CounterMetric } -func (w *FlusherWrapper) InitMetricRecord(pluginMeta *pipeline.PluginMeta) { - labels := pipeline.GetCommonLabels(w.Config.Context, pluginMeta) - w.MetricRecord = w.Config.Context.RegisterMetricRecord(labels) - - w.flusherInRecordsTotal = helper.NewCounterMetricAndRegister(w.MetricRecord, "flusher_in_records_total") - w.flusherInRecordsSizeBytes = helper.NewCounterMetricAndRegister(w.MetricRecord, "flusher_in_records_size_bytes") - w.flusherDiscardRecordsTotal = helper.NewCounterMetricAndRegister(w.MetricRecord, "flusher_discard_records_total") - w.flusherSuccessRecordsTotal = helper.NewCounterMetricAndRegister(w.MetricRecord, "flusher_success_records_total") - w.flusherSuccessTimeMs = helper.NewLatencyMetricAndRegister(w.MetricRecord, "flusher_success_time_ms") - w.flusherErrorTimeMs = helper.NewLatencyMetricAndRegister(w.MetricRecord, "flusher_error_time_ms") - w.flusherErrorTotal = helper.NewCounterMetricAndRegister(w.MetricRecord, "flusher_error_total") +func (wrapper *FlusherWrapper) InitMetricRecord(pluginMeta *pipeline.PluginMeta) { + labels := helper.GetCommonLabels(wrapper.Config.Context, pluginMeta) + wrapper.MetricRecord = wrapper.Config.Context.RegisterMetricRecord(labels) + wrapper.inEventsTotal = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginInEventsTotal) + wrapper.inEventGroupsTotal = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginInEventGroupsTotal) + wrapper.inSizeBytes = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginInSizeBytes) + wrapper.totalDelayTimeMs = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginTotalDelayMs) } diff --git a/pluginmanager/plugin_wrapper_aggregator_v1.go b/pluginmanager/plugin_wrapper_aggregator_v1.go index 0a99db49e8..a3404e45ca 100644 --- a/pluginmanager/plugin_wrapper_aggregator_v1.go +++ b/pluginmanager/plugin_wrapper_aggregator_v1.go @@ -37,30 +37,30 @@ type AggregatorWrapperV1 struct { Aggregator pipeline.AggregatorV1 } -func (p *AggregatorWrapperV1) Init(pluginMeta *pipeline.PluginMeta) error { - p.InitMetricRecord(pluginMeta) +func (wrapper *AggregatorWrapperV1) Init(pluginMeta *pipeline.PluginMeta) error { + wrapper.InitMetricRecord(pluginMeta) - interval, err := p.Aggregator.Init(p.Config.Context, p) + interval, err := wrapper.Aggregator.Init(wrapper.Config.Context, wrapper) if err != nil { - logger.Error(p.Config.Context.GetRuntimeContext(), "AGGREGATOR_INIT_ERROR", "Aggregator failed to initialize", p.Aggregator.Description(), "error", err) + logger.Error(wrapper.Config.Context.GetRuntimeContext(), "AGGREGATOR_INIT_ERROR", "Aggregator failed to initialize", wrapper.Aggregator.Description(), "error", err) return err } if interval == 0 { - interval = p.Config.GlobalConfig.AggregatIntervalMs + interval = wrapper.Config.GlobalConfig.AggregatIntervalMs } - p.Interval = time.Millisecond * time.Duration(interval) + wrapper.Interval = time.Millisecond * time.Duration(interval) return nil } // Add inserts @loggroup to LogGroupsChan if @loggroup is not empty. // It is called by associated Aggregator. // It returns errAggAdd when queue is full. -func (p *AggregatorWrapperV1) Add(loggroup *protocol.LogGroup) error { +func (wrapper *AggregatorWrapperV1) Add(loggroup *protocol.LogGroup) error { if len(loggroup.Logs) == 0 { return nil } select { - case p.LogGroupsChan <- loggroup: + case wrapper.LogGroupsChan <- loggroup: return nil default: return errAggAdd @@ -71,13 +71,13 @@ func (p *AggregatorWrapperV1) Add(loggroup *protocol.LogGroup) error { // It works like Add but adds a timeout policy when log group queue is full. // It returns errAggAdd when queue is full and timeout. // NOTE: no body calls it now. -func (p *AggregatorWrapperV1) AddWithWait(loggroup *protocol.LogGroup, duration time.Duration) error { +func (wrapper *AggregatorWrapperV1) AddWithWait(loggroup *protocol.LogGroup, duration time.Duration) error { if len(loggroup.Logs) == 0 { return nil } timer := time.NewTimer(duration) select { - case p.LogGroupsChan <- loggroup: + case wrapper.LogGroupsChan <- loggroup: return nil case <-timer.C: return errAggAdd @@ -86,16 +86,19 @@ func (p *AggregatorWrapperV1) AddWithWait(loggroup *protocol.LogGroup, duration // Run calls periodically Aggregator.Flush to get log groups from associated aggregator and // pass them to LogstoreConfig through LogGroupsChan. -func (p *AggregatorWrapperV1) Run(control *pipeline.AsyncControl) { - defer panicRecover(p.Aggregator.Description()) +func (wrapper *AggregatorWrapperV1) Run(control *pipeline.AsyncControl) { + defer panicRecover(wrapper.Aggregator.Description()) for { - exitFlag := util.RandomSleep(p.Interval, 0.1, control.CancelToken()) - logGroups := p.Aggregator.Flush() + exitFlag := util.RandomSleep(wrapper.Interval, 0.1, control.CancelToken()) + logGroups := wrapper.Aggregator.Flush() for _, logGroup := range logGroups { if len(logGroup.Logs) == 0 { continue } - p.LogGroupsChan <- logGroup + wrapper.outEventsTotal.Add(int64(len(logGroup.GetLogs()))) + wrapper.outEventGroupsTotal.Add(1) + wrapper.outSizeBytes.Add(int64(logGroup.Size())) + wrapper.LogGroupsChan <- logGroup } if exitFlag { return diff --git a/pluginmanager/plugin_wrapper_aggregator_v2.go b/pluginmanager/plugin_wrapper_aggregator_v2.go index 307638e7f1..ac2eed760e 100644 --- a/pluginmanager/plugin_wrapper_aggregator_v2.go +++ b/pluginmanager/plugin_wrapper_aggregator_v2.go @@ -17,6 +17,7 @@ package pluginmanager import ( "time" + "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/models" "github.com/alibaba/ilogtail/pkg/pipeline" @@ -31,42 +32,49 @@ import ( type AggregatorWrapperV2 struct { AggregatorWrapper Aggregator pipeline.AggregatorV2 + + totalDelayTimeMs pipeline.CounterMetric } -func (p *AggregatorWrapperV2) Init(pluginMeta *pipeline.PluginMeta) error { - p.InitMetricRecord(pluginMeta) +func (wrapper *AggregatorWrapperV2) Init(pluginMeta *pipeline.PluginMeta) error { + wrapper.InitMetricRecord(pluginMeta) + wrapper.totalDelayTimeMs = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginTotalDelayMs) - interval, err := p.Aggregator.Init(p.Config.Context, p) + interval, err := wrapper.Aggregator.Init(wrapper.Config.Context, wrapper) if err != nil { - logger.Error(p.Config.Context.GetRuntimeContext(), "AGGREGATOR_INIT_ERROR", "Aggregator failed to initialize", p.Aggregator.Description(), "error", err) + logger.Error(wrapper.Config.Context.GetRuntimeContext(), "AGGREGATOR_INIT_ERROR", "Aggregator failed to initialize", wrapper.Aggregator.Description(), "error", err) return err } if interval == 0 { - interval = p.Config.GlobalConfig.AggregatIntervalMs + interval = wrapper.Config.GlobalConfig.AggregatIntervalMs } - p.Interval = time.Millisecond * time.Duration(interval) + wrapper.Interval = time.Millisecond * time.Duration(interval) return nil } -func (p *AggregatorWrapperV2) Record(events *models.PipelineGroupEvents, context pipeline.PipelineContext) error { - p.aggrInRecordsTotal.Add(int64(len(events.Events))) +func (wrapper *AggregatorWrapperV2) Record(events *models.PipelineGroupEvents, context pipeline.PipelineContext) error { startTime := time.Now() - err := p.Aggregator.Record(events, context) + + err := wrapper.Aggregator.Record(events, context) if err == nil { - p.aggrOutRecordsTotal.Add(int64(len(events.Events))) - p.aggrTimeMS.Add(time.Since(startTime).Milliseconds()) + wrapper.outEventsTotal.Add(int64(len(events.Events))) + wrapper.outEventGroupsTotal.Add(1) + for _, event := range events.Events { + wrapper.outSizeBytes.Add(event.GetSize()) + } } + wrapper.totalDelayTimeMs.Add(time.Since(startTime).Milliseconds()) return err } -func (p *AggregatorWrapperV2) GetResult(context pipeline.PipelineContext) error { - return p.Aggregator.GetResult(context) +func (wrapper *AggregatorWrapperV2) GetResult(context pipeline.PipelineContext) error { + return wrapper.Aggregator.GetResult(context) } -func (p *AggregatorWrapperV2) Add(loggroup *protocol.LogGroup) error { +func (wrapper *AggregatorWrapperV2) Add(loggroup *protocol.LogGroup) error { return nil } -func (p *AggregatorWrapperV2) AddWithWait(loggroup *protocol.LogGroup, duration time.Duration) error { +func (wrapper *AggregatorWrapperV2) AddWithWait(loggroup *protocol.LogGroup, duration time.Duration) error { return nil } diff --git a/pluginmanager/plugin_wrapper_flusher_v1.go b/pluginmanager/plugin_wrapper_flusher_v1.go index 405ae16b62..35da3699e9 100644 --- a/pluginmanager/plugin_wrapper_flusher_v1.go +++ b/pluginmanager/plugin_wrapper_flusher_v1.go @@ -38,23 +38,15 @@ func (wrapper *FlusherWrapperV1) IsReady(projectName string, logstoreName string } func (wrapper *FlusherWrapperV1) Flush(projectName string, logstoreName string, configName string, logGroupList []*protocol.LogGroup) error { - var total, size int64 + startTime := time.Now() for _, logGroup := range logGroupList { - total += int64(len(logGroup.Logs)) - size += int64(logGroup.Size()) + wrapper.inEventsTotal.Add(int64(len(logGroup.Logs))) + wrapper.inEventGroupsTotal.Add(1) + wrapper.inSizeBytes.Add(int64(logGroup.Size())) } - wrapper.flusherInRecordsTotal.Add(total) - wrapper.flusherInRecordsSizeBytes.Add(size) - startTime := time.Now() err := wrapper.Flusher.Flush(projectName, logstoreName, configName, logGroupList) - if err == nil { - wrapper.flusherSuccessRecordsTotal.Add(total) - wrapper.flusherSuccessTimeMs.Observe(float64(time.Since(startTime))) - } else { - wrapper.flusherErrorTotal.Add(1) - wrapper.flusherDiscardRecordsTotal.Add(total) - wrapper.flusherErrorTimeMs.Observe(float64(time.Since(startTime))) - } + + wrapper.totalDelayTimeMs.Add(time.Since(startTime).Milliseconds()) return err } diff --git a/pluginmanager/plugin_wrapper_flusher_v2.go b/pluginmanager/plugin_wrapper_flusher_v2.go index a3d5110faa..35a3ec9d09 100644 --- a/pluginmanager/plugin_wrapper_flusher_v2.go +++ b/pluginmanager/plugin_wrapper_flusher_v2.go @@ -37,24 +37,17 @@ func (wrapper *FlusherWrapperV2) IsReady(projectName string, logstoreName string } func (wrapper *FlusherWrapperV2) Export(pipelineGroupEvents []*models.PipelineGroupEvents, pipelineContext pipeline.PipelineContext) error { - var total, size int64 + startTime := time.Now() for _, groups := range pipelineGroupEvents { - total += int64(len(groups.Events)) + wrapper.inEventsTotal.Add(int64(len(groups.Events))) + wrapper.inEventGroupsTotal.Add(1) for _, event := range groups.Events { - size += event.GetSize() + wrapper.inSizeBytes.Add(event.GetSize()) } } - wrapper.flusherInRecordsTotal.Add(total) - wrapper.flusherInRecordsSizeBytes.Add(size) - startTime := time.Now() + err := wrapper.Flusher.Export(pipelineGroupEvents, pipelineContext) - if err == nil { - wrapper.flusherSuccessRecordsTotal.Add(total) - wrapper.flusherSuccessTimeMs.Observe(float64(time.Since(startTime))) - } else { - wrapper.flusherErrorTotal.Add(1) - wrapper.flusherDiscardRecordsTotal.Add(total) - wrapper.flusherErrorTimeMs.Observe(float64(time.Since(startTime))) - } + + wrapper.totalDelayTimeMs.Add(time.Since(startTime).Milliseconds()) return err } diff --git a/pluginmanager/plugin_wrapper_metric_v1.go b/pluginmanager/plugin_wrapper_metric_v1.go index 3e590af48f..853550d0ce 100644 --- a/pluginmanager/plugin_wrapper_metric_v1.go +++ b/pluginmanager/plugin_wrapper_metric_v1.go @@ -30,30 +30,30 @@ type MetricWrapperV1 struct { Input pipeline.MetricInputV1 } -func (p *MetricWrapperV1) Init(pluginMeta *pipeline.PluginMeta, inputInterval int) error { - p.InitMetricRecord(pluginMeta) +func (wrapper *MetricWrapperV1) Init(pluginMeta *pipeline.PluginMeta, inputInterval int) error { + wrapper.InitMetricRecord(pluginMeta) - interval, err := p.Input.Init(p.Config.Context) + interval, err := wrapper.Input.Init(wrapper.Config.Context) if err != nil { return err } if interval == 0 { interval = inputInterval } - p.Interval = time.Duration(interval) * time.Millisecond + wrapper.Interval = time.Duration(interval) * time.Millisecond return nil } -func (p *MetricWrapperV1) Run(control *pipeline.AsyncControl) { - logger.Info(p.Config.Context.GetRuntimeContext(), "start run metric ", p.Input.Description()) - defer panicRecover(p.Input.Description()) +func (wrapper *MetricWrapperV1) Run(control *pipeline.AsyncControl) { + logger.Info(wrapper.Config.Context.GetRuntimeContext(), "start run metric ", wrapper.Input.Description()) + defer panicRecover(wrapper.Input.Description()) for { - exitFlag := util.RandomSleep(p.Interval, 0.1, control.CancelToken()) + exitFlag := util.RandomSleep(wrapper.Interval, 0.1, control.CancelToken()) startTime := time.Now() - err := p.Input.Collect(p) - p.LatencyMetric.Observe(float64(time.Since(startTime))) + err := wrapper.Input.Collect(wrapper) + wrapper.LatencyMetric.Observe(float64(time.Since(startTime))) if err != nil { - logger.Error(p.Config.Context.GetRuntimeContext(), "INPUT_COLLECT_ALARM", "error", err) + logger.Error(wrapper.Config.Context.GetRuntimeContext(), "INPUT_COLLECT_ALARM", "error", err) } if exitFlag { return @@ -61,35 +61,36 @@ func (p *MetricWrapperV1) Run(control *pipeline.AsyncControl) { } } -func (p *MetricWrapperV1) AddData(tags map[string]string, fields map[string]string, t ...time.Time) { - p.AddDataWithContext(tags, fields, nil, t...) +func (wrapper *MetricWrapperV1) AddData(tags map[string]string, fields map[string]string, t ...time.Time) { + wrapper.AddDataWithContext(tags, fields, nil, t...) } -func (p *MetricWrapperV1) AddDataArray(tags map[string]string, +func (wrapper *MetricWrapperV1) AddDataArray(tags map[string]string, columns []string, values []string, t ...time.Time) { - p.AddDataArrayWithContext(tags, columns, values, nil, t...) + wrapper.AddDataArrayWithContext(tags, columns, values, nil, t...) } -func (p *MetricWrapperV1) AddRawLog(log *protocol.Log) { - p.AddRawLogWithContext(log, nil) +func (wrapper *MetricWrapperV1) AddRawLog(log *protocol.Log) { + wrapper.AddRawLogWithContext(log, nil) } -func (p *MetricWrapperV1) AddDataWithContext(tags map[string]string, fields map[string]string, ctx map[string]interface{}, t ...time.Time) { +func (wrapper *MetricWrapperV1) AddDataWithContext(tags map[string]string, fields map[string]string, ctx map[string]interface{}, t ...time.Time) { var logTime time.Time if len(t) == 0 { logTime = time.Now() } else { logTime = t[0] } - slsLog, _ := helper.CreateLog(logTime, len(t) != 0, p.Tags, tags, fields) - p.inputRecordsTotal.Add(1) - p.inputRecordsSizeBytes.Add(int64(slsLog.Size())) - p.LogsChan <- &pipeline.LogWithContext{Log: slsLog, Context: ctx} + slsLog, _ := helper.CreateLog(logTime, len(t) != 0, wrapper.Tags, tags, fields) + wrapper.outEventsTotal.Add(1) + wrapper.outEventGroupsTotal.Add(1) + wrapper.outSizeBytes.Add(int64(slsLog.Size())) + wrapper.LogsChan <- &pipeline.LogWithContext{Log: slsLog, Context: ctx} } -func (p *MetricWrapperV1) AddDataArrayWithContext(tags map[string]string, +func (wrapper *MetricWrapperV1) AddDataArrayWithContext(tags map[string]string, columns []string, values []string, ctx map[string]interface{}, @@ -100,14 +101,16 @@ func (p *MetricWrapperV1) AddDataArrayWithContext(tags map[string]string, } else { logTime = t[0] } - slsLog, _ := helper.CreateLogByArray(logTime, len(t) != 0, p.Tags, tags, columns, values) - p.inputRecordsTotal.Add(1) - p.inputRecordsSizeBytes.Add(int64(slsLog.Size())) - p.LogsChan <- &pipeline.LogWithContext{Log: slsLog, Context: ctx} + slsLog, _ := helper.CreateLogByArray(logTime, len(t) != 0, wrapper.Tags, tags, columns, values) + wrapper.outEventsTotal.Add(1) + wrapper.outEventGroupsTotal.Add(1) + wrapper.outSizeBytes.Add(int64(slsLog.Size())) + wrapper.LogsChan <- &pipeline.LogWithContext{Log: slsLog, Context: ctx} } -func (p *MetricWrapperV1) AddRawLogWithContext(log *protocol.Log, ctx map[string]interface{}) { - p.inputRecordsTotal.Add(1) - p.inputRecordsSizeBytes.Add(int64(log.Size())) - p.LogsChan <- &pipeline.LogWithContext{Log: log, Context: ctx} +func (wrapper *MetricWrapperV1) AddRawLogWithContext(log *protocol.Log, ctx map[string]interface{}) { + wrapper.outEventsTotal.Add(1) + wrapper.outEventGroupsTotal.Add(1) + wrapper.outSizeBytes.Add(int64(log.Size())) + wrapper.LogsChan <- &pipeline.LogWithContext{Log: log, Context: ctx} } diff --git a/pluginmanager/plugin_wrapper_metric_v2.go b/pluginmanager/plugin_wrapper_metric_v2.go index 5ea0ad8ec8..619c38ad3a 100644 --- a/pluginmanager/plugin_wrapper_metric_v2.go +++ b/pluginmanager/plugin_wrapper_metric_v2.go @@ -25,20 +25,20 @@ type MetricWrapperV2 struct { Input pipeline.MetricInputV2 } -func (p *MetricWrapperV2) Init(pluginMeta *pipeline.PluginMeta, inputInterval int) error { - p.InitMetricRecord(pluginMeta) +func (wrapper *MetricWrapperV2) Init(pluginMeta *pipeline.PluginMeta, inputInterval int) error { + wrapper.InitMetricRecord(pluginMeta) - interval, err := p.Input.Init(p.Config.Context) + interval, err := wrapper.Input.Init(wrapper.Config.Context) if err != nil { return err } if interval == 0 { interval = inputInterval } - p.Interval = time.Duration(interval) * time.Millisecond + wrapper.Interval = time.Duration(interval) * time.Millisecond return nil } -func (p *MetricWrapperV2) Read(pipelineContext pipeline.PipelineContext) error { - return p.Input.Read(pipelineContext) +func (wrapper *MetricWrapperV2) Read(pipelineContext pipeline.PipelineContext) error { + return wrapper.Input.Read(pipelineContext) } diff --git a/pluginmanager/plugin_wrapper_processor_v1.go b/pluginmanager/plugin_wrapper_processor_v1.go index ae92cfd0bd..a44e1a9fdf 100644 --- a/pluginmanager/plugin_wrapper_processor_v1.go +++ b/pluginmanager/plugin_wrapper_processor_v1.go @@ -34,10 +34,18 @@ func (wrapper *ProcessorWrapperV1) Init(pluginMeta *pipeline.PluginMeta) error { } func (wrapper *ProcessorWrapperV1) Process(logArray []*protocol.Log) []*protocol.Log { - wrapper.procInRecordsTotal.Add(int64(len(logArray))) startTime := time.Now().UnixMilli() + wrapper.inEventsTotal.Add(int64(len(logArray))) + for _, log := range logArray { + wrapper.inSizeBytes.Add(int64(log.Size())) + } + result := wrapper.Processor.ProcessLogs(logArray) - wrapper.procTimeMS.Add(time.Now().UnixMilli() - startTime) - wrapper.procOutRecordsTotal.Add(int64(len(result))) + + wrapper.outEventsTotal.Add(int64(len(result))) + for _, log := range result { + wrapper.outSizeBytes.Add(int64(log.Size())) + } + wrapper.totalProcessTimeMs.Add(time.Now().UnixMilli() - startTime) return result } diff --git a/pluginmanager/plugin_wrapper_processor_v2.go b/pluginmanager/plugin_wrapper_processor_v2.go index 6c1ac0e79d..f312d54fbb 100644 --- a/pluginmanager/plugin_wrapper_processor_v2.go +++ b/pluginmanager/plugin_wrapper_processor_v2.go @@ -17,6 +17,7 @@ package pluginmanager import ( "time" + "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/models" "github.com/alibaba/ilogtail/pkg/pipeline" ) @@ -24,18 +25,34 @@ import ( type ProcessorWrapperV2 struct { ProcessorWrapper Processor pipeline.ProcessorV2 + + inEventGroupsTotal pipeline.CounterMetric + outEventGroupsTotal pipeline.CounterMetric } func (wrapper *ProcessorWrapperV2) Init(pluginMeta *pipeline.PluginMeta) error { wrapper.InitMetricRecord(pluginMeta) + wrapper.inEventGroupsTotal = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginInEventGroupsTotal) + wrapper.outEventGroupsTotal = helper.NewCounterMetricAndRegister(wrapper.MetricRecord, helper.MetricPluginOutEventGroupsTotal) return wrapper.Processor.Init(wrapper.Config.Context) } func (wrapper *ProcessorWrapperV2) Process(in *models.PipelineGroupEvents, context pipeline.PipelineContext) { - wrapper.procInRecordsTotal.Add(int64(len(in.Events))) startTime := time.Now().UnixMilli() + + wrapper.inEventGroupsTotal.Add(1) + wrapper.inEventsTotal.Add(int64(len(in.Events))) + for _, event := range in.Events { + wrapper.inSizeBytes.Add(event.GetSize()) + } + wrapper.Processor.Process(in, context) - wrapper.procTimeMS.Add(time.Now().UnixMilli() - startTime) - wrapper.procOutRecordsTotal.Add(int64(len(in.Events))) + + wrapper.outEventGroupsTotal.Add(1) + wrapper.outEventsTotal.Add(int64(len(in.Events))) + for _, event := range in.Events { + wrapper.outSizeBytes.Add(event.GetSize()) + } + wrapper.totalProcessTimeMs.Add(time.Now().UnixMilli() - startTime) } diff --git a/pluginmanager/plugin_wrapper_service_v1.go b/pluginmanager/plugin_wrapper_service_v1.go index 7b3ea22acb..2d0bdf496f 100644 --- a/pluginmanager/plugin_wrapper_service_v1.go +++ b/pluginmanager/plugin_wrapper_service_v1.go @@ -29,64 +29,65 @@ type ServiceWrapperV1 struct { Input pipeline.ServiceInputV1 } -func (p *ServiceWrapperV1) Init(pluginMeta *pipeline.PluginMeta) error { - p.InitMetricRecord(pluginMeta) +func (wrapper *ServiceWrapperV1) Init(pluginMeta *pipeline.PluginMeta) error { + wrapper.InitMetricRecord(pluginMeta) - _, err := p.Input.Init(p.Config.Context) + _, err := wrapper.Input.Init(wrapper.Config.Context) return err } -func (p *ServiceWrapperV1) Run(cc *pipeline.AsyncControl) { - logger.Info(p.Config.Context.GetRuntimeContext(), "start run service", p.Input) +func (wrapper *ServiceWrapperV1) Run(cc *pipeline.AsyncControl) { + logger.Info(wrapper.Config.Context.GetRuntimeContext(), "start run service", wrapper.Input) go func() { - defer panicRecover(p.Input.Description()) - err := p.Input.Start(p) + defer panicRecover(wrapper.Input.Description()) + err := wrapper.Input.Start(wrapper) if err != nil { - logger.Error(p.Config.Context.GetRuntimeContext(), "PLUGIN_ALARM", "start service error, err", err) + logger.Error(wrapper.Config.Context.GetRuntimeContext(), "PLUGIN_ALARM", "start service error, err", err) } - logger.Info(p.Config.Context.GetRuntimeContext(), "service done", p.Input.Description()) + logger.Info(wrapper.Config.Context.GetRuntimeContext(), "service done", wrapper.Input.Description()) }() } -func (p *ServiceWrapperV1) Stop() error { - err := p.Input.Stop() +func (wrapper *ServiceWrapperV1) Stop() error { + err := wrapper.Input.Stop() if err != nil { - logger.Error(p.Config.Context.GetRuntimeContext(), "PLUGIN_ALARM", "stop service error, err", err) + logger.Error(wrapper.Config.Context.GetRuntimeContext(), "PLUGIN_ALARM", "stop service error, err", err) } return err } -func (p *ServiceWrapperV1) AddData(tags map[string]string, fields map[string]string, t ...time.Time) { - p.AddDataWithContext(tags, fields, nil, t...) +func (wrapper *ServiceWrapperV1) AddData(tags map[string]string, fields map[string]string, t ...time.Time) { + wrapper.AddDataWithContext(tags, fields, nil, t...) } -func (p *ServiceWrapperV1) AddDataArray(tags map[string]string, +func (wrapper *ServiceWrapperV1) AddDataArray(tags map[string]string, columns []string, values []string, t ...time.Time) { - p.AddDataArrayWithContext(tags, columns, values, nil, t...) + wrapper.AddDataArrayWithContext(tags, columns, values, nil, t...) } -func (p *ServiceWrapperV1) AddRawLog(log *protocol.Log) { - p.AddRawLogWithContext(log, nil) +func (wrapper *ServiceWrapperV1) AddRawLog(log *protocol.Log) { + wrapper.AddRawLogWithContext(log, nil) } -func (p *ServiceWrapperV1) AddDataWithContext(tags map[string]string, fields map[string]string, ctx map[string]interface{}, t ...time.Time) { +func (wrapper *ServiceWrapperV1) AddDataWithContext(tags map[string]string, fields map[string]string, ctx map[string]interface{}, t ...time.Time) { var logTime time.Time if len(t) == 0 { logTime = time.Now() } else { logTime = t[0] } - slsLog, _ := helper.CreateLog(logTime, len(t) != 0, p.Tags, tags, fields) - p.inputRecordsTotal.Add(1) - p.inputRecordsSizeBytes.Add(int64(slsLog.Size())) - p.LogsChan <- &pipeline.LogWithContext{Log: slsLog, Context: ctx} + slsLog, _ := helper.CreateLog(logTime, len(t) != 0, wrapper.Tags, tags, fields) + wrapper.outEventsTotal.Add(1) + wrapper.outEventGroupsTotal.Add(1) + wrapper.outSizeBytes.Add(int64(slsLog.Size())) + wrapper.LogsChan <- &pipeline.LogWithContext{Log: slsLog, Context: ctx} } -func (p *ServiceWrapperV1) AddDataArrayWithContext(tags map[string]string, +func (wrapper *ServiceWrapperV1) AddDataArrayWithContext(tags map[string]string, columns []string, values []string, ctx map[string]interface{}, @@ -97,14 +98,16 @@ func (p *ServiceWrapperV1) AddDataArrayWithContext(tags map[string]string, } else { logTime = t[0] } - slsLog, _ := helper.CreateLogByArray(logTime, len(t) != 0, p.Tags, tags, columns, values) - p.inputRecordsTotal.Add(1) - p.inputRecordsSizeBytes.Add(int64(slsLog.Size())) - p.LogsChan <- &pipeline.LogWithContext{Log: slsLog, Context: ctx} + slsLog, _ := helper.CreateLogByArray(logTime, len(t) != 0, wrapper.Tags, tags, columns, values) + wrapper.outEventsTotal.Add(1) + wrapper.outEventGroupsTotal.Add(1) + wrapper.outSizeBytes.Add(int64(slsLog.Size())) + wrapper.LogsChan <- &pipeline.LogWithContext{Log: slsLog, Context: ctx} } -func (p *ServiceWrapperV1) AddRawLogWithContext(log *protocol.Log, ctx map[string]interface{}) { - p.inputRecordsTotal.Add(1) - p.inputRecordsSizeBytes.Add(int64(log.Size())) - p.LogsChan <- &pipeline.LogWithContext{Log: log, Context: ctx} +func (wrapper *ServiceWrapperV1) AddRawLogWithContext(log *protocol.Log, ctx map[string]interface{}) { + wrapper.outEventsTotal.Add(1) + wrapper.outEventGroupsTotal.Add(1) + wrapper.outSizeBytes.Add(int64(log.Size())) + wrapper.LogsChan <- &pipeline.LogWithContext{Log: log, Context: ctx} } diff --git a/pluginmanager/plugin_wrapper_service_v2.go b/pluginmanager/plugin_wrapper_service_v2.go index a85cc76c0d..a617de3b5b 100644 --- a/pluginmanager/plugin_wrapper_service_v2.go +++ b/pluginmanager/plugin_wrapper_service_v2.go @@ -22,13 +22,13 @@ type ServiceWrapperV2 struct { Input pipeline.ServiceInputV2 } -func (p *ServiceWrapperV2) Init(pluginMeta *pipeline.PluginMeta) error { - p.InitMetricRecord(pluginMeta) +func (wrapper *ServiceWrapperV2) Init(pluginMeta *pipeline.PluginMeta) error { + wrapper.InitMetricRecord(pluginMeta) - _, err := p.Input.Init(p.Config.Context) + _, err := wrapper.Input.Init(wrapper.Config.Context) return err } -func (p *ServiceWrapperV2) StartService(pipelineContext pipeline.PipelineContext) error { - return p.Input.StartService(pipelineContext) +func (wrapper *ServiceWrapperV2) StartService(pipelineContext pipeline.PipelineContext) error { + return wrapper.Input.StartService(pipelineContext) } diff --git a/pluginmanager/self_telemetry_alarm.go b/pluginmanager/self_telemetry_alarm.go index 3f2b7be5d6..f8453b2c49 100644 --- a/pluginmanager/self_telemetry_alarm.go +++ b/pluginmanager/self_telemetry_alarm.go @@ -37,12 +37,14 @@ func (r *InputAlarm) Description() string { func (r *InputAlarm) Collect(collector pipeline.Collector) error { loggroup := &protocol.LogGroup{} + LogtailConfigLock.RLock() for _, config := range LogtailConfig { alarm := config.Context.GetRuntimeContext().Value(pkg.LogTailMeta).(*pkg.LogtailContextMeta).GetAlarm() if alarm != nil { alarm.SerializeToPb(loggroup) } } + LogtailConfigLock.RUnlock() util.GlobalAlarm.SerializeToPb(loggroup) if len(loggroup.Logs) > 0 && AlarmConfig != nil { for _, log := range loggroup.Logs { diff --git a/pluginmanager/self_telemetry_statistics.go b/pluginmanager/self_telemetry_statistics.go index 9e5748f9e0..3e5b44e89a 100644 --- a/pluginmanager/self_telemetry_statistics.go +++ b/pluginmanager/self_telemetry_statistics.go @@ -35,6 +35,7 @@ func (r *InputStatistics) Description() string { } func (r *InputStatistics) Collect(collector pipeline.Collector) error { + LogtailConfigLock.RLock() for _, config := range LogtailConfig { log := &protocol.Log{} metricRecord := config.Context.GetLogstoreConfigMetricRecord() @@ -62,6 +63,7 @@ func (r *InputStatistics) Collect(collector pipeline.Collector) error { logger.Debug(r.context.GetRuntimeContext(), "statistics", *log) } } + LogtailConfigLock.RUnlock() return nil } diff --git a/plugins/flusher/prometheus/flusher_prometheus_test.go b/plugins/flusher/prometheus/flusher_prometheus_test.go index f061f12046..797352a7d4 100644 --- a/plugins/flusher/prometheus/flusher_prometheus_test.go +++ b/plugins/flusher/prometheus/flusher_prometheus_test.go @@ -23,6 +23,7 @@ import ( "sort" "strings" "testing" + "time" "github.com/golang/snappy" "github.com/jarcoal/httpmock" @@ -240,6 +241,8 @@ func TestPrometheusFlusher_ShouldWriteToRemoteStorageSuccess_GivenCorrectDataWit err := flusher.Export(groupEventsSlice, nil) So(err, ShouldBeNil) + time.Sleep(1 * time.Second) // guarantee that all http requests are handled + err = flusher.Stop() So(err, ShouldBeNil) diff --git a/plugins/flusher/sls/flusher_sls.go b/plugins/flusher/sls/flusher_sls.go index 95b96eaa60..84716e59d7 100644 --- a/plugins/flusher/sls/flusher_sls.go +++ b/plugins/flusher/sls/flusher_sls.go @@ -20,7 +20,6 @@ package sls import ( "fmt" - "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/logtail" "github.com/alibaba/ilogtail/pkg/pipeline" "github.com/alibaba/ilogtail/pkg/protocol" @@ -32,15 +31,12 @@ type SlsFlusher struct { // nolint:revive EnableShardHash bool KeepShardHash bool - context pipeline.Context - lenCounter pipeline.CounterMetric + context pipeline.Context } // Init ... func (p *SlsFlusher) Init(context pipeline.Context) error { p.context = context - metricsRecord := context.GetMetricRecord() - p.lenCounter = helper.NewCounterMetricAndRegister(metricsRecord, "flush_sls_size") return nil } @@ -82,8 +78,6 @@ func (p *SlsFlusher) Flush(projectName string, logstoreName string, configName s if err != nil { return fmt.Errorf("loggroup marshal err %v", err) } - bufLen := len(buf) - p.lenCounter.Add(int64(bufLen)) var rst int if !p.EnableShardHash { diff --git a/plugins/input/canal/input_canal.go b/plugins/input/canal/input_canal.go index 3f0b4d7b3b..0f890370d1 100644 --- a/plugins/input/canal/input_canal.go +++ b/plugins/input/canal/input_canal.go @@ -195,14 +195,14 @@ func (sc *ServiceCanal) Init(context pipeline.Context) (int, error) { sc.lastErrorChan = make(chan error, 1) metricsRecord := context.GetMetricRecord() - sc.rotateCounter = helper.NewCounterMetricAndRegister(metricsRecord, "binlog_rotate") - sc.syncCounter = helper.NewCounterMetricAndRegister(metricsRecord, "binlog_sync") - sc.ddlCounter = helper.NewCounterMetricAndRegister(metricsRecord, "binlog_ddl") - sc.rowCounter = helper.NewCounterMetricAndRegister(metricsRecord, "binlog_row") - sc.xgidCounter = helper.NewCounterMetricAndRegister(metricsRecord, "binlog_xgid") - sc.checkpointCounter = helper.NewCounterMetricAndRegister(metricsRecord, "binlog_checkpoint") - sc.lastBinLogMetric = helper.NewStringMetricAndRegister(metricsRecord, "binlog_filename") - sc.lastGTIDMetric = helper.NewStringMetricAndRegister(metricsRecord, "binlog_gtid") + sc.rotateCounter = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginBinlogRotate) + sc.syncCounter = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginBinlogSync) + sc.ddlCounter = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginBinlogDdl) + sc.rowCounter = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginBinlogRow) + sc.xgidCounter = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginBinlogXgid) + sc.checkpointCounter = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginBinlogCheckpoint) + sc.lastBinLogMetric = helper.NewStringMetricAndRegister(metricsRecord, helper.MetricPluginBinlogFilename) + sc.lastGTIDMetric = helper.NewStringMetricAndRegister(metricsRecord, helper.MetricPluginBinlogGtid) return 0, nil } @@ -759,9 +759,13 @@ func (sc *ServiceCanal) Start(c pipeline.Collector) error { startPos.Pos = sc.checkpoint.Offset } if nil == gtid && 0 == len(startPos.Name) && !sc.StartFromBegining { - gtid, err = sc.getLatestGTID() - if err != nil { - logger.Warning(sc.context.GetRuntimeContext(), "CANAL_START_ALARM", "Call getLatestGTID failed, error", err) + if sc.isGTIDEnabled { + gtid, err = sc.getLatestGTID() + if err != nil { + logger.Warning(sc.context.GetRuntimeContext(), "CANAL_START_ALARM", "Call getLatestGTID failed, error", err) + } + } + if gtid == nil { startPos = sc.GetBinlogLatestPos() } logger.Infof(sc.context.GetRuntimeContext(), "Get latest checkpoint GTID: %v Position: %v", gtid, startPos) diff --git a/plugins/input/command/input_command.go b/plugins/input/command/input_command.go index adf7e3e82e..70f4e79562 100644 --- a/plugins/input/command/input_command.go +++ b/plugins/input/command/input_command.go @@ -70,7 +70,7 @@ func (in *InputCommand) Init(context pipeline.Context) (int, error) { } // mkdir - in.storageDir = path.Join(config.LogtailGlobalConfig.LogtailSysConfDir, "/scripts") + in.storageDir = path.Join(config.LoongcollectorGlobalConfig.LoongcollectorConfDir, "/scripts") err := mkdir(in.storageDir) if err != nil { err = fmt.Errorf("init storageInstance error : %s", err) diff --git a/plugins/input/docker/logmeta/metric_container_info.go b/plugins/input/docker/logmeta/metric_container_info.go index 5ac5b27eb4..392bea353a 100644 --- a/plugins/input/docker/logmeta/metric_container_info.go +++ b/plugins/input/docker/logmeta/metric_container_info.go @@ -161,10 +161,10 @@ func (idf *InputDockerFile) Init(context pipeline.Context) (int, error) { idf.updateEmptyFlag = true metricsRecord := idf.context.GetMetricRecord() - idf.avgInstanceMetric = helper.NewAverageMetricAndRegister(metricsRecord, "container_count") - idf.addMetric = helper.NewCounterMetricAndRegister(metricsRecord, "add_container") - idf.deleteMetric = helper.NewCounterMetricAndRegister(metricsRecord, "remove_container") - idf.updateMetric = helper.NewCounterMetricAndRegister(metricsRecord, "update_container") + idf.avgInstanceMetric = helper.NewAverageMetricAndRegister(metricsRecord, helper.MetricPluginContainerTotal) + idf.addMetric = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginAddContainerTotal) + idf.deleteMetric = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginRemoveContainerTotal) + idf.updateMetric = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginUpdateContainerTotal) var err error idf.IncludeEnv, idf.IncludeEnvRegex, err = helper.SplitRegexFromMap(idf.IncludeEnv) diff --git a/plugins/input/docker/stdout/input_docker_stdout.go b/plugins/input/docker/stdout/input_docker_stdout.go index 0a302034f5..f470cc03cd 100644 --- a/plugins/input/docker/stdout/input_docker_stdout.go +++ b/plugins/input/docker/stdout/input_docker_stdout.go @@ -197,9 +197,9 @@ func (sds *ServiceDockerStdout) Init(context pipeline.Context) (int, error) { metricsRecord := sds.context.GetMetricRecord() sds.tracker = helper.NewReaderMetricTracker(metricsRecord) - sds.avgInstanceMetric = helper.NewAverageMetricAndRegister(metricsRecord, "container_count") - sds.addMetric = helper.NewCounterMetricAndRegister(metricsRecord, "add_container") - sds.deleteMetric = helper.NewCounterMetricAndRegister(metricsRecord, "remove_container") + sds.avgInstanceMetric = helper.NewAverageMetricAndRegister(metricsRecord, helper.MetricPluginContainerTotal) + sds.addMetric = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginAddContainerTotal) + sds.deleteMetric = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginRemoveContainerTotal) var err error sds.IncludeEnv, sds.IncludeEnvRegex, err = helper.SplitRegexFromMap(sds.IncludeEnv) diff --git a/plugins/input/jmxfetch/jmxfetch.go b/plugins/input/jmxfetch/jmxfetch.go index df2f8b5218..920edd077d 100644 --- a/plugins/input/jmxfetch/jmxfetch.go +++ b/plugins/input/jmxfetch/jmxfetch.go @@ -95,7 +95,7 @@ func (m *Jmx) Init(context pipeline.Context) (int, error) { m.context = context m.key = m.context.GetProject() + m.context.GetLogstore() + m.context.GetConfigName() helper.ReplaceInvalidChars(&m.key) - m.jvmHome = path.Join(config.LogtailGlobalConfig.LogtailSysConfDir, "jvm") + m.jvmHome = path.Join(config.LoongcollectorGlobalConfig.LoongcollectorThirdPartyDir, "jvm") for _, f := range m.Filters { m.filters = append(m.filters, NewFilterInner(f)) } diff --git a/plugins/input/kafka/input_kafka.go b/plugins/input/kafka/input_kafka.go index 5d5cf37cc0..e80775c477 100644 --- a/plugins/input/kafka/input_kafka.go +++ b/plugins/input/kafka/input_kafka.go @@ -23,6 +23,7 @@ import ( "github.com/IBM/sarama" "github.com/alibaba/ilogtail/pkg/logger" + "github.com/alibaba/ilogtail/pkg/models" "github.com/alibaba/ilogtail/pkg/pipeline" "github.com/alibaba/ilogtail/pkg/pipeline/extensions" "github.com/alibaba/ilogtail/pkg/protocol/decoder" @@ -239,7 +240,11 @@ func (k *InputKafka) onMessage(msg *sarama.ConsumerMessage) { switch k.version { case v1: fields := make(map[string]string) - fields[string(msg.Key)] = string(msg.Value) + if len(msg.Key) == 0 { + fields[models.ContentKey] = string(msg.Value) + } else { + fields[string(msg.Key)] = string(msg.Value) + } k.collectorV1.AddData(nil, fields) case v2: data, err := k.decoder.DecodeV2(msg.Value, nil) diff --git a/plugins/input/kubernetesmetav2/meta_collector.go b/plugins/input/kubernetesmetav2/meta_collector.go index 71fdbf0695..756e07630e 100644 --- a/plugins/input/kubernetesmetav2/meta_collector.go +++ b/plugins/input/kubernetesmetav2/meta_collector.go @@ -13,6 +13,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/alibaba/ilogtail/pkg/flags" "github.com/alibaba/ilogtail/pkg/helper/k8smeta" "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/models" @@ -64,88 +65,88 @@ func (m *metaCollector) Start() error { } if m.serviceK8sMeta.Pod { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.POD, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Node { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.NODE, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.NODE, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Service { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.SERVICE, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.SERVICE, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Deployment { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.DEPLOYMENT, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.DEPLOYMENT, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.ReplicaSet { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.REPLICASET, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.REPLICASET, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.DaemonSet { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.DAEMONSET, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.DAEMONSET, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.StatefulSet { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.STATEFULSET, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.STATEFULSET, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Configmap { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.CONFIGMAP, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.CONFIGMAP, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Secret { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.SECRET, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.SECRET, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Job { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.JOB, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.JOB, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.CronJob { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.CRONJOB, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.CRONJOB, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Namespace { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.NAMESPACE, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.NAMESPACE, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.PersistentVolume { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.PERSISTENTVOLUME, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.PERSISTENTVOLUME, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.PersistentVolumeClaim { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.PERSISTENTVOLUMECLAIM, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.PERSISTENTVOLUMECLAIM, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.StorageClass { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.STORAGECLASS, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.STORAGECLASS, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Ingress { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.INGRESS, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.INGRESS, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Pod && m.serviceK8sMeta.Node { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_NODE, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.POD_NODE, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Deployment && m.serviceK8sMeta.ReplicaSet { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.REPLICASET_DEPLOYMENT, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.REPLICASET_DEPLOYMENT, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.ReplicaSet && m.serviceK8sMeta.Pod { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_REPLICASET, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.POD_REPLICASET, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.StatefulSet && m.serviceK8sMeta.Pod { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_STATEFULSET, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.POD_STATEFULSET, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.DaemonSet && m.serviceK8sMeta.Pod { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_DAEMONSET, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.POD_DAEMONSET, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.CronJob && m.serviceK8sMeta.Job { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.JOB_CRONJOB, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.JOB_CRONJOB, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Job && m.serviceK8sMeta.Pod { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_JOB, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.POD_JOB, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Pod && m.serviceK8sMeta.PersistentVolumeClaim { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_PERSISENTVOLUMECLAIN, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.POD_PERSISENTVOLUMECLAIN, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Pod && m.serviceK8sMeta.Configmap { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_CONFIGMAP, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.POD_CONFIGMAP, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Pod && m.serviceK8sMeta.Secret { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_SECRET, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.POD_SECRET, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Service && m.serviceK8sMeta.Pod { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_SERVICE, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.POD_SERVICE, m.handleEvent, m.serviceK8sMeta.Interval) } if m.serviceK8sMeta.Pod { - m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD_CONTAINER, m.handleEvent, m.serviceK8sMeta.Interval) + m.serviceK8sMeta.metaManager.RegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.POD_CONTAINER, m.handleEvent, m.serviceK8sMeta.Interval) } go m.sendInBackground() return nil @@ -153,46 +154,52 @@ func (m *metaCollector) Start() error { func (m *metaCollector) Stop() error { if m.serviceK8sMeta.Pod { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.POD) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.POD) + } + if m.serviceK8sMeta.Node { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.NODE) } if m.serviceK8sMeta.Service { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.SERVICE) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.SERVICE) } if m.serviceK8sMeta.Deployment { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.DEPLOYMENT) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.DEPLOYMENT) + } + if m.serviceK8sMeta.ReplicaSet { + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.REPLICASET) } if m.serviceK8sMeta.DaemonSet { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.DAEMONSET) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.DAEMONSET) } if m.serviceK8sMeta.StatefulSet { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.STATEFULSET) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.STATEFULSET) } if m.serviceK8sMeta.Configmap { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.CONFIGMAP) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.CONFIGMAP) } if m.serviceK8sMeta.Secret { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.SECRET) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.SECRET) } if m.serviceK8sMeta.Job { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.JOB) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.JOB) } if m.serviceK8sMeta.CronJob { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.CRONJOB) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.CRONJOB) } if m.serviceK8sMeta.Namespace { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.NAMESPACE) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.NAMESPACE) } if m.serviceK8sMeta.PersistentVolume { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.PERSISTENTVOLUME) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.PERSISTENTVOLUME) } if m.serviceK8sMeta.PersistentVolumeClaim { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.PERSISTENTVOLUMECLAIM) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.PERSISTENTVOLUMECLAIM) } if m.serviceK8sMeta.StorageClass { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.STORAGECLASS) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.STORAGECLASS) } if m.serviceK8sMeta.Ingress { - m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.configName, k8smeta.INGRESS) + m.serviceK8sMeta.metaManager.UnRegisterSendFunc(m.serviceK8sMeta.context.GetProject(), m.serviceK8sMeta.configName, k8smeta.INGRESS) } close(m.stopCh) return nil @@ -220,7 +227,7 @@ func (m *metaCollector) handleAddOrUpdate(event *k8smeta.K8sMetaEvent) { if processor, ok := m.entityProcessor[event.Object.ResourceType]; ok { logs := processor(event.Object, "Update") for _, log := range logs { - m.send(log, !isEntity(event.Object.ResourceType)) + m.send(log, isEntity(event.Object.ResourceType)) if isEntity(event.Object.ResourceType) { link := m.generateEntityClusterLink(log) m.send(link, true) @@ -234,7 +241,7 @@ func (m *metaCollector) handleDelete(event *k8smeta.K8sMetaEvent) { logs := processor(event.Object, "Expire") for _, log := range logs { m.send(log, isEntity(event.Object.ResourceType)) - if !isEntity(event.Object.ResourceType) { + if isEntity(event.Object.ResourceType) { link := m.generateEntityClusterLink(log) m.send(link, true) } @@ -244,9 +251,9 @@ func (m *metaCollector) handleDelete(event *k8smeta.K8sMetaEvent) { func (m *metaCollector) processEntityCommonPart(logContents models.LogContents, kind, namespace, name, method string, firstObservedTime, lastObservedTime int64, creationTime v1.Time) { // entity reserved fields - logContents.Add(entityDomainFieldName, m.serviceK8sMeta.Domain) + logContents.Add(entityDomainFieldName, m.serviceK8sMeta.domain) logContents.Add(entityTypeFieldName, m.genEntityTypeKey(kind)) - logContents.Add(entityIDFieldName, m.genKey(namespace, name)) + logContents.Add(entityIDFieldName, m.genKey(kind, namespace, name)) logContents.Add(entityMethodFieldName, method) logContents.Add(entityFirstObservedTimeFieldName, strconv.FormatInt(firstObservedTime, 10)) @@ -262,13 +269,13 @@ func (m *metaCollector) processEntityCommonPart(logContents models.LogContents, } func (m *metaCollector) processEntityLinkCommonPart(logContents models.LogContents, srcKind, srcNamespace, srcName, destKind, destNamespace, destName, method string, firstObservedTime, lastObservedTime int64) { - logContents.Add(entityLinkSrcDomainFieldName, m.serviceK8sMeta.Domain) + logContents.Add(entityLinkSrcDomainFieldName, m.serviceK8sMeta.domain) logContents.Add(entityLinkSrcEntityTypeFieldName, m.genEntityTypeKey(srcKind)) - logContents.Add(entityLinkSrcEntityIDFieldName, m.genKey(srcNamespace, srcName)) + logContents.Add(entityLinkSrcEntityIDFieldName, m.genKey(srcKind, srcNamespace, srcName)) - logContents.Add(entityLinkDestDomainFieldName, m.serviceK8sMeta.Domain) + logContents.Add(entityLinkDestDomainFieldName, m.serviceK8sMeta.domain) logContents.Add(entityLinkDestEntityTypeFieldName, m.genEntityTypeKey(destKind)) - logContents.Add(entityLinkDestEntityIDFieldName, m.genKey(destNamespace, destName)) + logContents.Add(entityLinkDestEntityIDFieldName, m.genKey(destKind, destNamespace, destName)) logContents.Add(entityMethodFieldName, method) @@ -276,7 +283,6 @@ func (m *metaCollector) processEntityLinkCommonPart(logContents models.LogConten logContents.Add(entityLastObservedTimeFieldName, strconv.FormatInt(lastObservedTime, 10)) logContents.Add(entityKeepAliveSecondsFieldName, strconv.FormatInt(int64(m.serviceK8sMeta.Interval*2), 10)) logContents.Add(entityCategoryFieldName, defaultEntityLinkCategory) - logContents.Add(entityClusterIDFieldName, m.serviceK8sMeta.clusterID) } func (m *metaCollector) processEntityJSONObject(obj map[string]string) string { @@ -315,7 +321,7 @@ func (m *metaCollector) send(event models.PipelineEvent, entity bool) { func (m *metaCollector) sendInBackground() { entityGroup := &models.PipelineGroupEvents{} - entityLinkGroup := &models.PipelineGroupEvents{} + linkGroup := &models.PipelineGroupEvents{} sendFunc := func(group *models.PipelineGroupEvents) { for _, e := range group.Events { // TODO: temporary convert from event group back to log, will fix after pipeline support Go input to C++ processor @@ -330,26 +336,30 @@ func (m *metaCollector) sendInBackground() { case e := <-m.entityBuffer: entityGroup.Events = append(entityGroup.Events, e) if len(entityGroup.Events) >= 100 { + m.serviceK8sMeta.entityCount.Add(int64(len(entityGroup.Events))) sendFunc(entityGroup) } case e := <-m.entityLinkBuffer: - entityLinkGroup.Events = append(entityLinkGroup.Events, e) - if len(entityLinkGroup.Events) >= 100 { - sendFunc(entityLinkGroup) + linkGroup.Events = append(linkGroup.Events, e) + if len(linkGroup.Events) >= 100 { + m.serviceK8sMeta.linkCount.Add(int64(len(linkGroup.Events))) + sendFunc(linkGroup) } case <-time.After(3 * time.Second): if len(entityGroup.Events) > 0 { + m.serviceK8sMeta.entityCount.Add(int64(len(entityGroup.Events))) sendFunc(entityGroup) } - if len(entityLinkGroup.Events) > 0 { - sendFunc(entityLinkGroup) + if len(linkGroup.Events) > 0 { + m.serviceK8sMeta.linkCount.Add(int64(len(linkGroup.Events))) + sendFunc(linkGroup) } case <-m.stopCh: return } if time.Since(lastSendClusterTime) > time.Duration(m.serviceK8sMeta.Interval)*time.Second { // send cluster entity if in infra domain - if m.serviceK8sMeta.Domain == "infra" { + if m.serviceK8sMeta.domain == infraDomain { clusterEntity := m.generateClusterEntity() m.collector.AddRawLog(convertPipelineEvent2Log(clusterEntity)) lastSendClusterTime = time.Now() @@ -358,8 +368,8 @@ func (m *metaCollector) sendInBackground() { } } -func (m *metaCollector) genKey(namespace, name string) string { - key := m.serviceK8sMeta.clusterID + namespace + name +func (m *metaCollector) genKey(kind, namespace, name string) string { + key := m.serviceK8sMeta.clusterID + kind + namespace + name // #nosec G401 return fmt.Sprintf("%x", md5.Sum([]byte(key))) } @@ -368,9 +378,9 @@ func (m *metaCollector) generateClusterEntity() models.PipelineEvent { log := &models.Log{} log.Contents = models.NewLogContents() log.Timestamp = uint64(time.Now().Unix()) - log.Contents.Add(entityDomainFieldName, m.serviceK8sMeta.Domain) - log.Contents.Add(entityTypeFieldName, "infra.k8s.cluster") - log.Contents.Add(entityIDFieldName, m.genKey("", "")) + log.Contents.Add(entityDomainFieldName, m.serviceK8sMeta.domain) + log.Contents.Add(entityTypeFieldName, m.genEntityTypeKey(clusterTypeName)) + log.Contents.Add(entityIDFieldName, m.genKey("", "", "")) log.Contents.Add(entityMethodFieldName, "Update") log.Contents.Add(entityFirstObservedTimeFieldName, strconv.FormatInt(time.Now().Unix(), 10)) log.Contents.Add(entityLastObservedTimeFieldName, strconv.FormatInt(time.Now().Unix(), 10)) @@ -384,13 +394,12 @@ func (m *metaCollector) generateEntityClusterLink(entityEvent models.PipelineEve content := entityEvent.(*models.Log).Contents log := &models.Log{} log.Contents = models.NewLogContents() - log.Contents.Add(entityLinkSrcDomainFieldName, m.serviceK8sMeta.Domain) + log.Contents.Add(entityLinkSrcDomainFieldName, m.serviceK8sMeta.domain) log.Contents.Add(entityLinkSrcEntityTypeFieldName, content.Get(entityTypeFieldName)) log.Contents.Add(entityLinkSrcEntityIDFieldName, content.Get(entityIDFieldName)) - - log.Contents.Add(entityLinkDestDomainFieldName, m.serviceK8sMeta.Domain) - log.Contents.Add(entityLinkDestEntityTypeFieldName, "ack.cluster") - log.Contents.Add(entityLinkDestEntityIDFieldName, m.serviceK8sMeta.clusterID) + log.Contents.Add(entityLinkDestDomainFieldName, m.serviceK8sMeta.domain) + log.Contents.Add(entityLinkDestEntityTypeFieldName, m.genEntityTypeKey(clusterTypeName)) + log.Contents.Add(entityLinkDestEntityIDFieldName, m.genKey("", "", "")) log.Contents.Add(entityLinkRelationTypeFieldName, "runs") log.Contents.Add(entityMethodFieldName, content.Get(entityMethodFieldName)) @@ -399,17 +408,19 @@ func (m *metaCollector) generateEntityClusterLink(entityEvent models.PipelineEve log.Contents.Add(entityLastObservedTimeFieldName, content.Get(entityLastObservedTimeFieldName)) log.Contents.Add(entityKeepAliveSecondsFieldName, m.serviceK8sMeta.Interval*2) log.Contents.Add(entityCategoryFieldName, defaultEntityLinkCategory) - log.Contents.Add(entityClusterIDFieldName, m.serviceK8sMeta.clusterID) log.Timestamp = uint64(time.Now().Unix()) return log } func (m *metaCollector) genEntityTypeKey(kind string) string { - prefix := "" - if p, ok := DomainEntityTypePrefix[m.serviceK8sMeta.Domain]; ok { - prefix = p + // assert domain is initialized + if kind == "" { + return m.serviceK8sMeta.domain + ".k8s" + } + if kind == clusterTypeName && m.serviceK8sMeta.domain == acsDomain { + return m.serviceK8sMeta.domain + "." + *flags.ClusterType + "." + clusterTypeName } - return fmt.Sprintf("%s%s", prefix, strings.ToLower(kind)) + return m.serviceK8sMeta.domain + ".k8s." + strings.ToLower(kind) } func convertPipelineEvent2Log(event models.PipelineEvent) *protocol.Log { diff --git a/plugins/input/kubernetesmetav2/meta_collector_const.go b/plugins/input/kubernetesmetav2/meta_collector_const.go index 8c8020bdeb..5ff1e876c6 100644 --- a/plugins/input/kubernetesmetav2/meta_collector_const.go +++ b/plugins/input/kubernetesmetav2/meta_collector_const.go @@ -14,9 +14,10 @@ const ( entityLastObservedTimeFieldName = "__last_observed_time__" entityKeepAliveSecondsFieldName = "__keep_alive_seconds__" - entityCategoryFieldName = "__category__" - defaultEntityCategory = "entity" - defaultEntityLinkCategory = "entity_link" + entityCategoryFieldName = "__category__" + entityCategorySelfMetricName = "category" + defaultEntityCategory = "entity" + defaultEntityLinkCategory = "entity_link" entityLinkSrcDomainFieldName = "__src_domain__" entityLinkSrcEntityTypeFieldName = "__src_entity_type__" @@ -27,7 +28,14 @@ const ( entityLinkRelationTypeFieldName = "__relation_type__" ) -var DomainEntityTypePrefix = map[string]string{ - "acs": "acs.ack.cluster.", - "infra": "infra.k8s.cluster.", -} +const ( + acsDomain = "acs" + infraDomain = "infra" + + ackCluster = "ack" + oneCluster = "one" + asiCluster = "asi" + + clusterTypeName = "cluster" + containerTypeName = "container" +) diff --git a/plugins/input/kubernetesmetav2/meta_collector_core.go b/plugins/input/kubernetesmetav2/meta_collector_core.go index 50642c1ef6..5a85424c43 100644 --- a/plugins/input/kubernetesmetav2/meta_collector_core.go +++ b/plugins/input/kubernetesmetav2/meta_collector_core.go @@ -44,9 +44,9 @@ func (m *metaCollector) processPodEntity(data *k8smeta.ObjectWrapper, method str containerLog.Contents = models.NewLogContents() containerLog.Timestamp = log.Timestamp - containerLog.Contents.Add(entityDomainFieldName, m.serviceK8sMeta.Domain) - containerLog.Contents.Add(entityTypeFieldName, m.genEntityTypeKey("container")) - containerLog.Contents.Add(entityIDFieldName, m.genKey(obj.Namespace, obj.Name+container.Name)) + containerLog.Contents.Add(entityDomainFieldName, m.serviceK8sMeta.domain) + containerLog.Contents.Add(entityTypeFieldName, m.genEntityTypeKey(containerTypeName)) + containerLog.Contents.Add(entityIDFieldName, m.genKey(containerTypeName, obj.Namespace, obj.Name+container.Name)) containerLog.Contents.Add(entityMethodFieldName, method) containerLog.Contents.Add(entityFirstObservedTimeFieldName, strconv.FormatInt(data.FirstObservedTime, 10)) @@ -264,7 +264,7 @@ func (m *metaCollector) processPodNodeLink(data *k8smeta.ObjectWrapper, method s log := &models.Log{} log.Contents = models.NewLogContents() m.processEntityLinkCommonPart(log.Contents, obj.Pod.Kind, obj.Pod.Namespace, obj.Pod.Name, obj.Node.Kind, "", obj.Node.Name, method, data.FirstObservedTime, data.LastObservedTime) - log.Contents.Add(entityLinkRelationTypeFieldName, "related_to") + log.Contents.Add(entityLinkRelationTypeFieldName, "runs") log.Timestamp = uint64(time.Now().Unix()) return []models.PipelineEvent{log} } @@ -323,7 +323,7 @@ func (m *metaCollector) processPodContainerLink(data *k8smeta.ObjectWrapper, met if obj, ok := data.Raw.(*k8smeta.PodContainer); ok { log := &models.Log{} log.Contents = models.NewLogContents() - m.processEntityLinkCommonPart(log.Contents, obj.Pod.Kind, obj.Pod.Namespace, obj.Pod.Name, "container", obj.Pod.Namespace, obj.Container.Name, method, data.FirstObservedTime, data.LastObservedTime) + m.processEntityLinkCommonPart(log.Contents, obj.Pod.Kind, obj.Pod.Namespace, obj.Pod.Name, "container", obj.Pod.Namespace, obj.Pod.Name+obj.Container.Name, method, data.FirstObservedTime, data.LastObservedTime) log.Contents.Add(entityLinkRelationTypeFieldName, "contains") log.Timestamp = uint64(time.Now().Unix()) return []models.PipelineEvent{log} diff --git a/plugins/input/kubernetesmetav2/meta_collector_test.go b/plugins/input/kubernetesmetav2/meta_collector_test.go new file mode 100644 index 0000000000..c037581652 --- /dev/null +++ b/plugins/input/kubernetesmetav2/meta_collector_test.go @@ -0,0 +1,48 @@ +package kubernetesmetav2 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/alibaba/ilogtail/pkg/flags" +) + +func TestGenEntityTypeKeyAcs(t *testing.T) { + m := metaCollector{ + serviceK8sMeta: &ServiceK8sMeta{}, + } + *flags.ClusterType = ackCluster + m.serviceK8sMeta.initDomain() + assert.Equal(t, "acs.k8s.pod", m.genEntityTypeKey("pod")) + assert.Equal(t, "acs.ack.cluster", m.genEntityTypeKey("cluster")) + + *flags.ClusterType = oneCluster + m.serviceK8sMeta.initDomain() + assert.Equal(t, "acs.k8s.pod", m.genEntityTypeKey("pod")) + assert.Equal(t, "acs.one.cluster", m.genEntityTypeKey("cluster")) + + *flags.ClusterType = asiCluster + m.serviceK8sMeta.initDomain() + assert.Equal(t, "acs.k8s.pod", m.genEntityTypeKey("pod")) + assert.Equal(t, "acs.asi.cluster", m.genEntityTypeKey("cluster")) +} + +func TestGenEntityTypeKeyInfra(t *testing.T) { + m := metaCollector{ + serviceK8sMeta: &ServiceK8sMeta{}, + } + *flags.ClusterType = "k8s" + m.serviceK8sMeta.initDomain() + assert.Equal(t, "infra.k8s.pod", m.genEntityTypeKey("pod")) + assert.Equal(t, "infra.k8s.cluster", m.genEntityTypeKey("cluster")) +} + +func TestGenEntityTypeKeyEmpty(t *testing.T) { + m := metaCollector{ + serviceK8sMeta: &ServiceK8sMeta{}, + } + m.serviceK8sMeta.initDomain() + assert.Equal(t, "infra.k8s.pod", m.genEntityTypeKey("pod")) + assert.Equal(t, "infra.k8s.cluster", m.genEntityTypeKey("cluster")) +} diff --git a/plugins/input/kubernetesmetav2/service_meta.go b/plugins/input/kubernetesmetav2/service_meta.go index 1e12b560a0..ea7a4b69d6 100644 --- a/plugins/input/kubernetesmetav2/service_meta.go +++ b/plugins/input/kubernetesmetav2/service_meta.go @@ -2,6 +2,7 @@ package kubernetesmetav2 import ( "github.com/alibaba/ilogtail/pkg/flags" + "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/helper/k8smeta" "github.com/alibaba/ilogtail/pkg/models" "github.com/alibaba/ilogtail/pkg/pipeline" @@ -13,7 +14,6 @@ type ProcessFunc func(data *k8smeta.ObjectWrapper, method string) []models.Pipel type ServiceK8sMeta struct { //revive:enable:exported Interval int - Domain string // entity switch Pod bool Node bool @@ -33,19 +33,29 @@ type ServiceK8sMeta struct { Ingress bool Container bool // other + context pipeline.Context metaManager *k8smeta.MetaManager collector pipeline.Collector metaCollector *metaCollector configName string clusterID string + domain string + // self metric + entityCount pipeline.CounterMetric + linkCount pipeline.CounterMetric } // Init called for init some system resources, like socket, mutex... // return interval(ms) and error flag, if interval is 0, use default interval func (s *ServiceK8sMeta) Init(context pipeline.Context) (int, error) { + s.context = context s.metaManager = k8smeta.GetMetaManagerInstance() s.configName = context.GetConfigName() + s.initDomain() + metricRecord := s.context.GetMetricRecord() + s.entityCount = helper.NewCounterMetricAndRegister(metricRecord, helper.MetricCollectEntityTotal) + s.linkCount = helper.NewCounterMetricAndRegister(metricRecord, helper.MetricCollectLinkTotal) return 0, nil } @@ -72,6 +82,15 @@ func (s *ServiceK8sMeta) Start(collector pipeline.Collector) error { return s.metaCollector.Start() } +func (s *ServiceK8sMeta) initDomain() { + switch *flags.ClusterType { + case ackCluster, oneCluster, asiCluster: + s.domain = acsDomain + default: + s.domain = infraDomain + } +} + func init() { pipeline.ServiceInputs["service_kubernetes_meta"] = func() pipeline.ServiceInput { return &ServiceK8sMeta{ diff --git a/plugins/input/mysql/mysql.go b/plugins/input/mysql/mysql.go index e6bcb2f27d..b305301aba 100644 --- a/plugins/input/mysql/mysql.go +++ b/plugins/input/mysql/mysql.go @@ -106,8 +106,8 @@ func (m *Mysql) Init(context pipeline.Context) (int, error) { } metricsRecord := m.context.GetMetricRecord() - m.collectLatency = helper.NewLatencyMetricAndRegister(metricsRecord, "mysql_collect_avg_cost") - m.collectTotal = helper.NewCounterMetricAndRegister(metricsRecord, "mysql_collect_total") + m.collectLatency = helper.NewLatencyMetricAndRegister(metricsRecord, helper.MetricPluginCollectAvgCostTimeMs) + m.collectTotal = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginCollectTotal) if m.CheckPoint { m.checkpointMetric = helper.NewStringMetricAndRegister(metricsRecord, "mysql_checkpoint") diff --git a/plugins/input/opentelemetry/service_otlp_v1_test.go b/plugins/input/opentelemetry/service_otlp_v1_test.go index c531a8ce35..0f05a6ad67 100644 --- a/plugins/input/opentelemetry/service_otlp_v1_test.go +++ b/plugins/input/opentelemetry/service_otlp_v1_test.go @@ -75,7 +75,7 @@ func TestOtlpGRPC_Logs_V1(t *testing.T) { } func TestOtlpGRPC_Metrics_V1(t *testing.T) { - config.LogtailGlobalConfig.EnableSlsMetricsFormat = true + config.LoongcollectorGlobalConfig.EnableSlsMetricsFormat = true endpointGrpc := test.GetAvailableLocalAddress(t) input, err := newInput(true, false, endpointGrpc, "") assert.NoError(t, err) diff --git a/plugins/input/prometheus/input_prometheus.go b/plugins/input/prometheus/input_prometheus.go index 3a05ffeaea..0ecd38100c 100644 --- a/plugins/input/prometheus/input_prometheus.go +++ b/plugins/input/prometheus/input_prometheus.go @@ -32,9 +32,9 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape" "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" + "github.com/alibaba/ilogtail/pkg/config" "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/pipeline" - "github.com/alibaba/ilogtail/pkg/util" ) var libLoggerOnce sync.Once @@ -85,7 +85,7 @@ func (p *ServiceStaticPrometheus) Init(context pipeline.Context) (int, error) { case p.Yaml != "": detail = []byte(p.Yaml) if p.AuthorizationPath == "" { - p.AuthorizationPath = util.GetCurrentBinaryPath() + p.AuthorizationPath = config.LoongcollectorGlobalConfig.LoongcollectorConfDir } case p.ConfigFilePath != "": f, err := os.Open(p.ConfigFilePath) diff --git a/plugins/input/rdb/rdb.go b/plugins/input/rdb/rdb.go index 6a9fa46b01..2d261599b3 100644 --- a/plugins/input/rdb/rdb.go +++ b/plugins/input/rdb/rdb.go @@ -104,8 +104,8 @@ func (m *Rdb) Init(context pipeline.Context, rdbFunc RdbFunc) (int, error) { metricsRecord := m.Context.GetMetricRecord() - m.collectLatency = helper.NewLatencyMetricAndRegister(metricsRecord, fmt.Sprintf("%s_collect_avg_cost", m.Driver)) - m.collectTotal = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%s_collect_total", m.Driver)) + m.collectLatency = helper.NewLatencyMetricAndRegister(metricsRecord, helper.MetricPluginCollectAvgCostTimeMs) + m.collectTotal = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginCollectTotal) if m.CheckPoint { m.checkpointMetric = helper.NewStringMetricAndRegister(metricsRecord, fmt.Sprintf("%s_checkpoint", m.Driver)) } diff --git a/plugins/input/telegraf/input_telegraf.go b/plugins/input/telegraf/input_telegraf.go index a17a3c62d1..90c4cc2ca6 100644 --- a/plugins/input/telegraf/input_telegraf.go +++ b/plugins/input/telegraf/input_telegraf.go @@ -15,7 +15,7 @@ package telegraf import ( - global_config "github.com/alibaba/ilogtail/pkg/config" + "github.com/alibaba/ilogtail/pkg/config" "github.com/alibaba/ilogtail/pkg/pipeline" "path" @@ -37,7 +37,7 @@ func (s *ServiceTelegraf) Init(ctx pipeline.Context) (int, error) { Name: ctx.GetConfigName(), Detail: s.Detail, } - s.tm = GetTelegrafManager(path.Join(global_config.LogtailGlobalConfig.LogtailSysConfDir, "telegraf")) + s.tm = GetTelegrafManager(path.Join(config.LoongcollectorGlobalConfig.LoongcollectorThirdPartyDir, "telegraf")) return 0, nil } diff --git a/plugins/processor/anchor/anchor.go b/plugins/processor/anchor/anchor.go index bafb1755e2..89ce035f03 100644 --- a/plugins/processor/anchor/anchor.go +++ b/plugins/processor/anchor/anchor.go @@ -94,7 +94,7 @@ func (p *ProcessorAnchor) Init(context pipeline.Context) error { } metricsRecord := p.context.GetMetricRecord() - p.logPairMetric = helper.NewAverageMetricAndRegister(metricsRecord, "anchor_pairs_per_log") + p.logPairMetric = helper.NewAverageMetricAndRegister(metricsRecord, helper.PluginPairsPerLogTotal) return nil } diff --git a/plugins/processor/droplastkey/processor_drop_last_key.go b/plugins/processor/droplastkey/processor_drop_last_key.go index 0d61483b01..545e9de3f1 100644 --- a/plugins/processor/droplastkey/processor_drop_last_key.go +++ b/plugins/processor/droplastkey/processor_drop_last_key.go @@ -38,7 +38,7 @@ func (p *ProcessorDropLastKey) Init(context pipeline.Context) error { p.context = context metricsRecord := p.context.GetMetricRecord() - p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, "drop_key_count") + p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginDiscardedEventsTotal) if len(p.DropKey) == 0 { return fmt.Errorf("Invalid config, DropKey is empty") diff --git a/plugins/processor/encrypt/processor_encrypt.go b/plugins/processor/encrypt/processor_encrypt.go index 0155b9f713..cbd6388123 100644 --- a/plugins/processor/encrypt/processor_encrypt.go +++ b/plugins/processor/encrypt/processor_encrypt.go @@ -27,7 +27,6 @@ import ( "os" "strings" - "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/pipeline" "github.com/alibaba/ilogtail/pkg/protocol" @@ -66,9 +65,6 @@ type ProcessorEncrypt struct { blockSize int key []byte iv []byte - - encryptedCountMetric pipeline.CounterMetric - encryptedBytesMetric pipeline.CounterMetric } func (p *ProcessorEncrypt) Init(context pipeline.Context) error { @@ -86,10 +82,6 @@ func (p *ProcessorEncrypt) Init(context pipeline.Context) error { if err := p.parseIV(); err != nil { return err } - - metricsRecord := p.context.GetMetricRecord() - p.encryptedCountMetric = helper.NewCounterMetricAndRegister(metricsRecord, "encrypted_count") - p.encryptedBytesMetric = helper.NewCounterMetricAndRegister(metricsRecord, "encrypted_bytes") return nil } @@ -115,8 +107,6 @@ func (p *ProcessorEncrypt) processLog(log *protocol.Log) { continue } - p.encryptedCountMetric.Add(1) - p.encryptedBytesMetric.Add(int64(len(cont.Value))) ciphertext, err := p.encrypt(cont.Value) if err == nil { cont.Value = hex.EncodeToString(ciphertext) diff --git a/plugins/processor/fieldswithcondition/processor_fields_with_condition.go b/plugins/processor/fieldswithcondition/processor_fields_with_condition.go index 8f8aab8cef..abcc460133 100644 --- a/plugins/processor/fieldswithcondition/processor_fields_with_condition.go +++ b/plugins/processor/fieldswithcondition/processor_fields_with_condition.go @@ -44,9 +44,8 @@ type ProcessorFieldsWithCondition struct { DropIfNotMatchCondition bool `comment:"Optional. When the case condition is not met, whether the log is discarded (true) or retained (false)"` Switch []Condition `comment:"The switch-case conditions"` - filterMetric pipeline.CounterMetric - processedMetric pipeline.CounterMetric - context pipeline.Context + filterMetric pipeline.CounterMetric + context pipeline.Context } type Condition struct { @@ -203,8 +202,7 @@ func (p *ProcessorFieldsWithCondition) Init(context pipeline.Context) error { } } metricsRecord := p.context.GetMetricRecord() - p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_filtered", PluginName)) - p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", PluginName)) + p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginDiscardedEventsTotal) return nil } @@ -289,7 +287,6 @@ func (p *ProcessorFieldsWithCondition) ProcessLogs(logArray []*protocol.Log) []* } else { p.filterMetric.Add(1) } - p.processedMetric.Add(1) } logArray = logArray[:nextIdx] return logArray diff --git a/plugins/processor/filter/keyregex/processor_filter_key_regex.go b/plugins/processor/filter/keyregex/processor_filter_key_regex.go index 93222e834e..078ed6272d 100644 --- a/plugins/processor/filter/keyregex/processor_filter_key_regex.go +++ b/plugins/processor/filter/keyregex/processor_filter_key_regex.go @@ -15,7 +15,6 @@ package keyregex import ( - "fmt" "regexp" "github.com/alibaba/ilogtail/pkg/helper" @@ -30,11 +29,10 @@ type ProcessorKeyFilter struct { Include []string Exclude []string - includeRegex []*regexp.Regexp - excludeRegex []*regexp.Regexp - filterMetric pipeline.CounterMetric - processedMetric pipeline.CounterMetric - context pipeline.Context + includeRegex []*regexp.Regexp + excludeRegex []*regexp.Regexp + filterMetric pipeline.CounterMetric + context pipeline.Context } // Init called for init some system resources, like socket, mutex... @@ -64,8 +62,7 @@ func (p *ProcessorKeyFilter) Init(context pipeline.Context) error { } metricsRecord := p.context.GetMetricRecord() - p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_filtered", pluginType)) - p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginType)) + p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginDiscardedEventsTotal) return nil } @@ -111,7 +108,6 @@ func (p *ProcessorKeyFilter) ProcessLogs(logArray []*protocol.Log) []*protocol.L } else { p.filterMetric.Add(1) } - p.processedMetric.Add(1) } logArray = logArray[:nextIdx] return logArray diff --git a/plugins/processor/filter/regex/processor_filter_regex.go b/plugins/processor/filter/regex/processor_filter_regex.go index dbd04022bf..def8d0e6f1 100644 --- a/plugins/processor/filter/regex/processor_filter_regex.go +++ b/plugins/processor/filter/regex/processor_filter_regex.go @@ -15,7 +15,6 @@ package regex import ( - "fmt" "regexp" "github.com/alibaba/ilogtail/pkg/helper" @@ -35,11 +34,10 @@ type ProcessorRegexFilter struct { Include map[string]string Exclude map[string]string - includeRegex map[string]*regexp.Regexp - excludeRegex map[string]*regexp.Regexp - filterMetric pipeline.CounterMetric - processedMetric pipeline.CounterMetric - context pipeline.Context + includeRegex map[string]*regexp.Regexp + excludeRegex map[string]*regexp.Regexp + filterMetric pipeline.CounterMetric + context pipeline.Context } // Init called for init some system resources, like socket, mutex... @@ -68,8 +66,7 @@ func (p *ProcessorRegexFilter) Init(context pipeline.Context) error { } } metricsRecord := p.context.GetMetricRecord() - p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_filtered", pluginType)) - p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginType)) + p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginDiscardedEventsTotal) return nil } @@ -125,7 +122,6 @@ func (p *ProcessorRegexFilter) ProcessLogs(logArray []*protocol.Log) []*protocol } else { p.filterMetric.Add(1) } - p.processedMetric.Add(1) } logArray = logArray[:nextIdx] return logArray diff --git a/plugins/processor/json/processor_json.go b/plugins/processor/json/processor_json.go index 4b450f1a65..9aa89ec93a 100644 --- a/plugins/processor/json/processor_json.go +++ b/plugins/processor/json/processor_json.go @@ -19,7 +19,6 @@ import ( "github.com/buger/jsonparser" - "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/logger" "github.com/alibaba/ilogtail/pkg/models" "github.com/alibaba/ilogtail/pkg/pipeline" @@ -39,8 +38,7 @@ type ProcessorJSON struct { IgnoreFirstConnector bool // 是否忽略第一个Connector ExpandArray bool // 是否展开数组类型 - context pipeline.Context - procParseInSizeBytes pipeline.CounterMetric + context pipeline.Context } const pluginType = "processor_json" @@ -51,8 +49,6 @@ func (p *ProcessorJSON) Init(context pipeline.Context) error { return fmt.Errorf("must specify SourceKey for plugin %v", pluginType) } p.context = context - metricsRecord := p.context.GetMetricRecord() - p.procParseInSizeBytes = helper.NewCounterMetricAndRegister(metricsRecord, "proc_parse_in_size_bytes") return nil } @@ -69,7 +65,6 @@ func (p *ProcessorJSON) ProcessLogs(logArray []*protocol.Log) []*protocol.Log { func (p *ProcessorJSON) processLog(log *protocol.Log) { findKey := false - p.procParseInSizeBytes.Add(int64(log.Size())) for idx := range log.Contents { if log.Contents[idx].Key == p.SourceKey { objectVal := log.Contents[idx].Value diff --git a/plugins/processor/pickkey/processor_pick_key.go b/plugins/processor/pickkey/processor_pick_key.go index d08daa8f66..cbc4f8f72d 100644 --- a/plugins/processor/pickkey/processor_pick_key.go +++ b/plugins/processor/pickkey/processor_pick_key.go @@ -15,8 +15,6 @@ package pickkey import ( - "fmt" - "github.com/alibaba/ilogtail/pkg/helper" "github.com/alibaba/ilogtail/pkg/pipeline" "github.com/alibaba/ilogtail/pkg/protocol" @@ -34,17 +32,15 @@ type ProcessorPickKey struct { includeLen int excludeLen int - filterMetric pipeline.CounterMetric - processedMetric pipeline.CounterMetric - context pipeline.Context + filterMetric pipeline.CounterMetric + context pipeline.Context } // Init called for init some system resources, like socket, mutex... func (p *ProcessorPickKey) Init(context pipeline.Context) error { p.context = context metricsRecord := p.context.GetMetricRecord() - p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, "pick_key_lost") - p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginType)) + p.filterMetric = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginDiscardedEventsTotal) if len(p.Include) > 0 { p.includeMap = make(map[string]struct{}) @@ -109,7 +105,6 @@ func (p *ProcessorPickKey) ProcessLogs(logArray []*protocol.Log) []*protocol.Log } nextIdx++ } - p.processedMetric.Add(1) } logArray = logArray[:nextIdx] return logArray diff --git a/plugins/processor/ratelimit/processor_rate_limit.go b/plugins/processor/ratelimit/processor_rate_limit.go index 8414e176da..95f6edbc6e 100644 --- a/plugins/processor/ratelimit/processor_rate_limit.go +++ b/plugins/processor/ratelimit/processor_rate_limit.go @@ -27,10 +27,9 @@ type ProcessorRateLimit struct { Fields []string `comment:"Optional. Fields of value to be limited, for each unique result from combining these field values."` Limit string `comment:"Optional. Limit rate in the format of (number)/(time unit). Supported time unit: 's' (per second), 'm' (per minute), and 'h' (per hour)."` - Algorithm algorithm - limitMetric pipeline.CounterMetric - processedMetric pipeline.CounterMetric - context pipeline.Context + Algorithm algorithm + limitMetric pipeline.CounterMetric + context pipeline.Context } const pluginType = "processor_rate_limit" @@ -46,8 +45,7 @@ func (p *ProcessorRateLimit) Init(context pipeline.Context) error { p.Algorithm = newTokenBucket(limit) metricsRecord := p.context.GetMetricRecord() - p.limitMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_limited", pluginType)) - p.processedMetric = helper.NewCounterMetricAndRegister(metricsRecord, fmt.Sprintf("%v_processed", pluginType)) + p.limitMetric = helper.NewCounterMetricAndRegister(metricsRecord, helper.MetricPluginDiscardedEventsTotal) return nil } @@ -69,7 +67,6 @@ func (p *ProcessorRateLimit) ProcessLogs(logArray []*protocol.Log) []*protocol.L } else { p.limitMetric.Add(1) } - p.processedMetric.Add(1) } logArray = logArray[:nextIdx] return logArray diff --git a/plugins/processor/ratelimit/processor_rate_limit_test.go b/plugins/processor/ratelimit/processor_rate_limit_test.go index 40e9d26e8b..488075ccec 100644 --- a/plugins/processor/ratelimit/processor_rate_limit_test.go +++ b/plugins/processor/ratelimit/processor_rate_limit_test.go @@ -70,7 +70,6 @@ func (s *processorTestSuite) TestDefault(c *check.C) { c.Assert(len(outLogs[0].Contents), check.Equals, 3) // metric c.Assert(int64(processor.limitMetric.Collect().Value), check.Equals, int64(2)) - c.Assert(int64(processor.processedMetric.Collect().Value), check.Equals, int64(8)) } } @@ -95,7 +94,6 @@ func (s *processorTestSuite) TestField(c *check.C) { c.Assert(len(outLogs), check.Equals, 5) // metric c.Assert(int64(processor.limitMetric.Collect().Value), check.Equals, int64(1)) - c.Assert(int64(processor.processedMetric.Collect().Value), check.Equals, int64(6)) } { // case: multiple fields @@ -124,7 +122,6 @@ func (s *processorTestSuite) TestField(c *check.C) { c.Assert(len(outLogs), check.Equals, 9) // metric c.Assert(int64(processor.limitMetric.Collect().Value), check.Equals, int64(3)) - c.Assert(int64(processor.processedMetric.Collect().Value), check.Equals, int64(12)) } } @@ -150,7 +147,6 @@ func (s *processorTestSuite) TestGC(c *check.C) { c.Assert(len(outLogs), check.Equals, 6) // metric c.Assert(int64(processor.limitMetric.Collect().Value), check.Equals, int64(10004)) - c.Assert(int64(processor.processedMetric.Collect().Value), check.Equals, int64(10010)) } { // case: gc in multiple process @@ -176,6 +172,5 @@ func (s *processorTestSuite) TestGC(c *check.C) { c.Assert(len(outLogs), check.Equals, 0) // metric c.Assert(int64(processor.limitMetric.Collect().Value), check.Equals, int64(10004)) - c.Assert(int64(processor.processedMetric.Collect().Value), check.Equals, int64(10010)) } } diff --git a/plugins/processor/regex/regex.go b/plugins/processor/regex/regex.go index a0a81d757b..bb3d073675 100644 --- a/plugins/processor/regex/regex.go +++ b/plugins/processor/regex/regex.go @@ -61,7 +61,7 @@ func (p *ProcessorRegex) Init(context pipeline.Context) error { } metricsRecord := p.context.GetMetricRecord() - p.logPairMetric = helper.NewAverageMetricAndRegister(metricsRecord, "anchor_pairs_per_log") + p.logPairMetric = helper.NewAverageMetricAndRegister(metricsRecord, helper.PluginPairsPerLogTotal) return nil } diff --git a/plugins/processor/stringreplace/processor_string_replace.go b/plugins/processor/stringreplace/processor_string_replace.go index 44808e8b72..b3c6c914a6 100644 --- a/plugins/processor/stringreplace/processor_string_replace.go +++ b/plugins/processor/stringreplace/processor_string_replace.go @@ -75,7 +75,7 @@ func (p *ProcessorStringReplace) Init(context pipeline.Context) error { } metricsRecord := p.context.GetMetricRecord() - p.logPairMetric = helper.NewAverageMetricAndRegister(metricsRecord, "regex_replace_pairs_per_log") + p.logPairMetric = helper.NewAverageMetricAndRegister(metricsRecord, helper.PluginPairsPerLogTotal) return nil } diff --git a/plugins/test/common.go b/plugins/test/common.go index 4a60bc9ba6..8a3cbc9634 100644 --- a/plugins/test/common.go +++ b/plugins/test/common.go @@ -49,24 +49,34 @@ func GetTestConfig(configName string) string { } func LoadDefaultConfig() *pluginmanager.LogstoreConfig { - return LoadMockConfig("project", "logstore", "configName", GetTestConfig("config")) + return LoadAndStartMockConfig("project", "logstore", "configName", GetTestConfig("config")) } // project, logstore, config, jsonStr -func LoadMockConfig(project, logstore, configName, jsonStr string) *pluginmanager.LogstoreConfig { +func LoadAndStartMockConfig(project, logstore, configName, jsonStr string) *pluginmanager.LogstoreConfig { err := pluginmanager.LoadLogstoreConfig(project, logstore, configName, 666, jsonStr) if err != nil { panic(err) } - return pluginmanager.LogtailConfig[configName] + if err := pluginmanager.Start(configName); err != nil { + panic(err) + } + object := pluginmanager.LogtailConfig[configName] + return object } func PluginStart() error { - return pluginmanager.Resume() + return pluginmanager.Start("") } -func PluginStop(forceFlushFlag bool) error { - return pluginmanager.HoldOn(true) +func PluginStop() error { + if err := pluginmanager.StopAllPipelines(true); err != nil { + return err + } + if err := pluginmanager.StopAllPipelines(false); err != nil { + return err + } + return nil } func CreateLogs(kvs ...string) *protocol.Log { diff --git a/scripts/check_glibc.sh b/scripts/check_glibc.sh index e43e7f4b9b..aa35950eb3 100755 --- a/scripts/check_glibc.sh +++ b/scripts/check_glibc.sh @@ -20,11 +20,11 @@ set -o pipefail # intialize variables OUT_DIR=${1:-output} ROOTDIR=$(cd $(dirname "${BASH_SOURCE[0]}") && cd .. && pwd) -BIN="${ROOTDIR}/${OUT_DIR}/ilogtail" -ADAPTER="${ROOTDIR}/${OUT_DIR}/libPluginAdapter.so" -PLUGIN="${ROOTDIR}/${OUT_DIR}/libPluginBase.so" +BIN="${ROOTDIR}/${OUT_DIR}/loongcollector" +ADAPTER="${ROOTDIR}/${OUT_DIR}/libGoPluginAdapter.so" +PLUGIN="${ROOTDIR}/${OUT_DIR}/libGoPluginBase.so" -# check if the symbols in ilogtail are compatible with GLIBC_2.5 +# check if the symbols in loongcollector are compatible with GLIBC_2.5 awk_script=$(cat <<- EOF BEGIN { delete bad_syms[0] diff --git a/scripts/dist.sh b/scripts/dist.sh index 71e6550e17..0b5f9ed554 100755 --- a/scripts/dist.sh +++ b/scripts/dist.sh @@ -30,25 +30,25 @@ function arch() { # intialize variables OUT_DIR=${1:-output} DIST_DIR=${2:-dist} -PACKAGE_DIR=${3:-ilogtail-1.2.1} +PACKAGE_DIR=${3:-loongcollector-0.0.1} ROOTDIR=$(cd $(dirname "${BASH_SOURCE[0]}") && cd .. && pwd) ARCH=$(arch) # prepare dist dir mkdir -p "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}" cp LICENSE README.md "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}" -cp "${ROOTDIR}/${OUT_DIR}/ilogtail" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}" -cp "${ROOTDIR}/${OUT_DIR}/libPluginAdapter.so" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}" -cp "${ROOTDIR}/${OUT_DIR}/libPluginBase.so" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}" -cp "${ROOTDIR}/${OUT_DIR}/ilogtail_config.json" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}" -cp -a "${ROOTDIR}/${OUT_DIR}/config/local" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}" -if file "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/ilogtail" | grep x86-64; then ./scripts/download_ebpflib.sh "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}"; fi +cp "${ROOTDIR}/${OUT_DIR}/loongcollector" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}" +cp "${ROOTDIR}/${OUT_DIR}/libGoPluginAdapter.so" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}" +cp "${ROOTDIR}/${OUT_DIR}/libGoPluginBase.so" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}" +cp "${ROOTDIR}/${OUT_DIR}/loongcollector_config.json" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}" +cp -a "${ROOTDIR}/${OUT_DIR}/config/local" "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/conf" +if file "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/loongcollector" | grep x86-64; then ./scripts/download_ebpflib.sh "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}"; fi # Splitting debug info at build time with -gsplit-dwarf does not work with current gcc version # Strip binary to reduce size here -strip "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/ilogtail" -strip "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/libPluginAdapter.so" -strip "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/libPluginBase.so" +strip "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/loongcollector" +strip "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/libGoPluginAdapter.so" +strip "${ROOTDIR}/${DIST_DIR}/${PACKAGE_DIR}/libGoPluginBase.so" # pack dist dir cd "${ROOTDIR}/${DIST_DIR}" diff --git a/scripts/docker_build.sh b/scripts/docker_build.sh index e040cc8a2d..66b07f73e2 100755 --- a/scripts/docker_build.sh +++ b/scripts/docker_build.sh @@ -56,8 +56,8 @@ function check_docker_buildkit_support { ARCH=$(arch) CATEGORY=$1 GENERATED_HOME=$2 -VERSION=${3:-2.0.0} -REPOSITORY=${4:-aliyun/ilogtail} +VERSION=${3:-0.0.1} +REPOSITORY=${4:-aliyun/loongcollector} PUSH=${5:-false} USE_DOCKER_BUILDKIT=${6:-${DOCKER_BUILD_USE_BUILDKIT:-$(check_docker_buildkit_support)}} diff --git a/scripts/gen_build_scripts.sh b/scripts/gen_build_scripts.sh index d053d954f1..bc65b2e303 100755 --- a/scripts/gen_build_scripts.sh +++ b/scripts/gen_build_scripts.sh @@ -24,8 +24,8 @@ set -o pipefail # e2e: Build plugin dynamic lib with GOC and build the CPP part. CATEGORY=$1 GENERATED_HOME=$2 -VERSION=${3:-2.0.0} -REPOSITORY=${4:-aliyun/ilogtail} +VERSION=${3:-0.0.1} +REPOSITORY=${4:-aliyun/loongcollector} OUT_DIR=${5:-output} EXPORT_GO_ENVS=${6:-${DOCKER_BUILD_EXPORT_GO_ENVS:-true}} COPY_GIT_CONFIGS=${7:-${DOCKER_BUILD_COPY_GIT_CONFIGS:-true}} @@ -82,7 +82,7 @@ EOF chmod 755 $BUILD_SCRIPT_FILE if [ $CATEGORY = "plugin" ]; then - echo "mkdir -p core/build && cd core/build && cmake -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DLOGTAIL_VERSION=${VERSION} .. && cd plugin && make -s PluginAdapter && cd ../../.. && ./scripts/upgrade_adapter_lib.sh && ./scripts/plugin_build.sh mod c-shared ${OUT_DIR} ${VERSION} ${PLUGINS_CONFIG_FILE} ${GO_MOD_FILE}" >>$BUILD_SCRIPT_FILE + echo "mkdir -p core/build && cd core/build && cmake -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DLOGTAIL_VERSION=${VERSION} .. && cd plugin && make -s GoPluginAdapter && cd ../../.. && ./scripts/upgrade_adapter_lib.sh && ./scripts/plugin_build.sh mod c-shared ${OUT_DIR} ${VERSION} ${PLUGINS_CONFIG_FILE} ${GO_MOD_FILE}" >>$BUILD_SCRIPT_FILE elif [ $CATEGORY = "core" ]; then echo "mkdir -p core/build && cd core/build && cmake -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DLOGTAIL_VERSION=${VERSION} -DBUILD_LOGTAIL=${BUILD_LOGTAIL} -DBUILD_LOGTAIL_UT=${BUILD_LOGTAIL_UT} -DENABLE_COMPATIBLE_MODE=${ENABLE_COMPATIBLE_MODE} -DENABLE_STATIC_LINK_CRT=${ENABLE_STATIC_LINK_CRT} -DWITHOUTGDB=${WITHOUTGDB} -DWITHSPL=${WITHSPL} .. && make -sj${MAKE_JOBS}" >>$BUILD_SCRIPT_FILE elif [ $CATEGORY = "all" ]; then @@ -99,26 +99,26 @@ function generateCopyScript() { echo "id=\$(docker create ${REPOSITORY}:${VERSION})" >>$COPY_SCRIPT_FILE if [ $CATEGORY = "plugin" ]; then - echo 'docker cp "$id":'${PATH_IN_DOCKER}'/'${OUT_DIR}'/libPluginBase.so $BINDIR' >>$COPY_SCRIPT_FILE + echo 'docker cp "$id":'${PATH_IN_DOCKER}'/'${OUT_DIR}'/libGoPluginBase.so $BINDIR' >>$COPY_SCRIPT_FILE elif [ $CATEGORY = "core" ]; then if [ $BUILD_LOGTAIL = "ON" ]; then - echo 'docker cp "$id":'${PATH_IN_DOCKER}'/core/build/ilogtail $BINDIR' >>$COPY_SCRIPT_FILE - echo 'docker cp "$id":'${PATH_IN_DOCKER}'/core/build/go_pipeline/libPluginAdapter.so $BINDIR' >>$COPY_SCRIPT_FILE + echo 'docker cp "$id":'${PATH_IN_DOCKER}'/core/build/loongcollector $BINDIR' >>$COPY_SCRIPT_FILE + echo 'docker cp "$id":'${PATH_IN_DOCKER}'/core/build/go_pipeline/libGoPluginAdapter.so $BINDIR' >>$COPY_SCRIPT_FILE fi if [ $BUILD_LOGTAIL_UT = "ON" ]; then echo 'docker cp "$id":'${PATH_IN_DOCKER}'/core/build core/build' >>$COPY_SCRIPT_FILE echo 'rm -rf core/protobuf/sls && docker cp "$id":'${PATH_IN_DOCKER}'/core/protobuf/sls core/protobuf/sls' >>$COPY_SCRIPT_FILE fi else - echo 'docker cp "$id":'${PATH_IN_DOCKER}'/'${OUT_DIR}'/libPluginBase.so $BINDIR' >>$COPY_SCRIPT_FILE - echo 'docker cp "$id":'${PATH_IN_DOCKER}'/core/build/ilogtail $BINDIR' >>$COPY_SCRIPT_FILE - echo 'docker cp "$id":'${PATH_IN_DOCKER}'/core/build/go_pipeline/libPluginAdapter.so $BINDIR' >>$COPY_SCRIPT_FILE + echo 'docker cp "$id":'${PATH_IN_DOCKER}'/'${OUT_DIR}'/libGoPluginBase.so $BINDIR' >>$COPY_SCRIPT_FILE + echo 'docker cp "$id":'${PATH_IN_DOCKER}'/core/build/loongcollector $BINDIR' >>$COPY_SCRIPT_FILE + echo 'docker cp "$id":'${PATH_IN_DOCKER}'/core/build/go_pipeline/libGoPluginAdapter.so $BINDIR' >>$COPY_SCRIPT_FILE if [ $BUILD_LOGTAIL_UT = "ON" ]; then echo 'docker cp "$id":'${PATH_IN_DOCKER}'/core/build core/build' >>$COPY_SCRIPT_FILE echo 'rm -rf core/protobuf/sls && docker cp "$id":'${PATH_IN_DOCKER}'/core/protobuf/sls core/protobuf/sls' >>$COPY_SCRIPT_FILE fi fi - echo 'echo -e "{\n}" > $BINDIR/ilogtail_config.json' >>$COPY_SCRIPT_FILE + echo 'echo -e "{\n}" > $BINDIR/loongcollector_config.json' >>$COPY_SCRIPT_FILE echo 'mkdir -p $BINDIR/config/local' >>$COPY_SCRIPT_FILE echo 'docker rm -v "$id"' >>$COPY_SCRIPT_FILE } diff --git a/scripts/ilogtail_control.sh b/scripts/loongcollector_control.sh similarity index 70% rename from scripts/ilogtail_control.sh rename to scripts/loongcollector_control.sh index 2d3e40c018..0627454e98 100755 --- a/scripts/ilogtail_control.sh +++ b/scripts/loongcollector_control.sh @@ -21,9 +21,9 @@ set -ue set -o pipefail caller_dir="$PWD" -ilogtail_dir=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) -bin_file="$ilogtail_dir/ilogtail" -pid_file="$ilogtail_dir/ilogtail.pid" +loongcollector_dir=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) +bin_file="$loongcollector_dir/loongcollector" +pid_file="$loongcollector_dir/run/loongcollector.pid" kill_timeout=10 port=${HTTP_PROBE_PORT:-7953} port_initial_delay_sec=${PORT_INITIAL_DELAY_SEC:-3} @@ -48,29 +48,29 @@ gen_config() { : } -start_ilogtail() { +start_loongcollector() { check_liveness_by_pid && { - local ilogtail_pid=$(load_pid) - echo "ilogtail already started. pid: $ilogtail_pid" + local loongcollector_pid=$(load_pid) + echo "loongcollector already started. pid: $loongcollector_pid" } || { ($bin_file $@) & - local ilogtail_pid=$! - save_pid $ilogtail_pid - echo "ilogtail started. pid: $ilogtail_pid" + local loongcollector_pid=$! + save_pid $loongcollector_pid + echo "loongcollector started. pid: $loongcollector_pid" } } check_liveness_by_pid() { # check if process exists - local ilogtail_pid=$(load_pid) - [[ ! -z $ilogtail_pid && -d /proc/$ilogtail_pid ]] || { + local loongcollector_pid=$(load_pid) + [[ ! -z $loongcollector_pid && -d /proc/$loongcollector_pid ]] || { return 1 } - pid_status=`head /proc/$ilogtail_pid/status | grep "State:*"` + pid_status=`head /proc/$loongcollector_pid/status | grep "State:*"` # check if process is zombie [[ "$pid_status" =~ .*"zombie"*. ]] && \ return 2 || : - # ilogtail is healthy + # loongcollector is healthy return 0 } @@ -87,7 +87,7 @@ block_on_check_liveness_by_pid() { while [[ $exit_flag -eq 0 ]] do check_liveness_by_pid || { - echo "ilogtail exited unexpectedly" + echo "loongcollector exited unexpectedly" exit 1 } sleep $liveness_check_interval @@ -99,27 +99,27 @@ block_on_check_liveness_by_port() { while [[ $exit_flag -eq 0 ]] do check_liveness_by_port || { - echo "ilogtail plugin exited unexpectedly" + echo "loongcollector plugin exited unexpectedly" exit 1 } sleep $liveness_check_interval done } -stop_ilogtail() { - # just return if ilogtail has not started - local ilogtail_pid=$(load_pid) - [[ ! -z $ilogtail_pid ]] || { - echo "ilogtail not started" +stop_loongcollector() { + # just return if loongcollector has not started + local loongcollector_pid=$(load_pid) + [[ ! -z $loongcollector_pid ]] || { + echo "loongcollector not started" return } local delaySec=${1:-0} - echo 'delay stop ilogtail, sleep' $delaySec + echo 'delay stop loongcollector, sleep' $delaySec sleep $delaySec - echo "stop ilogtail" + echo "stop loongcollector" # try to kill with SIGTERM, and wait for process to exit - kill $ilogtail_pid + kill $loongcollector_pid local exit_time=0 local exit_result="force killed" while [[ $exit_time -lt $kill_timeout ]] @@ -131,8 +131,8 @@ stop_ilogtail() { sleep 1 done # force kill with SIGKILL if timed out - [[ $exit_time -ge $kill_timeout ]] && kill -9 $ilogtail_pid || : - echo "stop ilogtail done, result: ${exit_result}" + [[ $exit_time -ge $kill_timeout ]] && kill -9 $loongcollector_pid || : + echo "stop loongcollector done, result: ${exit_result}" remove_pid } @@ -165,14 +165,14 @@ trap 'exit_handler' SIGINT case "$command" in start) - start_ilogtail $args + start_loongcollector $args ;; stop) - stop_ilogtail $args + stop_loongcollector $args ;; restart) - stop_ilogtail $args - start_ilogtail $args + stop_loongcollector $args + start_loongcollector $args ;; status) check_liveness_by_pid && exit 0 || exit $? @@ -181,9 +181,9 @@ case "$command" in check_liveness_by_port && exit 0 || exit $? ;; start_and_block) - start_ilogtail $args + start_loongcollector $args block_on_check_liveness_by_pid - stop_ilogtail $args + stop_loongcollector $args ;; -h) usage diff --git a/scripts/plugin_build.sh b/scripts/plugin_build.sh index 088acd2986..fa70e344de 100755 --- a/scripts/plugin_build.sh +++ b/scripts/plugin_build.sh @@ -27,7 +27,7 @@ function os() { MOD=${1:-mod} BUILDMODE=${2:-default} OUT_DIR=${3:-output} -VERSION=${4:-2.0.0} +VERSION=${4:-0.0.1} PLUGINS_CONFIG_FILE=${5:-${PLUGINS_CONFIG_FILE:-plugins.yml,external_plugins.yml}} GO_MOD_FILE=${6:-${GO_MOD_FILE:-go.mod}} NAME=ilogtail @@ -46,13 +46,13 @@ if [ $OS_FLAG = 1 ]; then LDFLAGS=$LDFLAGS' -extldflags "-Wl,--wrap=memcpy"' fi if [ $BUILDMODE = "c-shared" ]; then - NAME=libPluginBase.so + NAME=libGoPluginBase.so fi elif [ $OS_FLAG = 3 ]; then export GOARCH=386 export CGO_ENABLED=1 if [ $BUILDMODE = "c-shared" ]; then - NAME=PluginBase.dll + NAME=GoPluginBase.dll fi elif [ $OS_FLAG = 2 ]; then BUILDMODE=default diff --git a/scripts/plugin_gocbuild.sh b/scripts/plugin_gocbuild.sh index a00734aeb9..9b195077a4 100755 --- a/scripts/plugin_gocbuild.sh +++ b/scripts/plugin_gocbuild.sh @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -NAME=libPluginBase.so +NAME=libGoPluginBase.so goc version if [ $? != 0 ]; then echo "===========================================================================================================" diff --git a/scripts/update_version.sh b/scripts/update_version.sh index 3f9b723c1b..a93b996afe 100755 --- a/scripts/update_version.sh +++ b/scripts/update_version.sh @@ -40,12 +40,12 @@ sed -i "s/VERSION=\${1:-.*}/VERSION=\${1:-$version}/g" scripts/*.sh sed -i "s/VERSION=\${2:-.*}/VERSION=\${2:-$version}/g" scripts/*.sh sed -i "s/VERSION=\${3:-.*}/VERSION=\${3:-$version}/g" scripts/*.sh sed -i "s/VERSION=\${4:-.*}/VERSION=\${4:-$version}/g" scripts/*.sh -sed -i "s/DIST_DIR=\${2:-ilogtail-.*}/DIST_DIR=\${2:-ilogtail-$version}/g" scripts/dist.sh +sed -i "s/DIST_DIR=\${2:-loongcollector-.*}/DIST_DIR=\${2:-loongcollector-$version}/g" scripts/dist.sh sed -i "s/^set ILOGTAIL_VERSION=.*/set ILOGTAIL_VERSION=$version/g" scripts/*.bat -sed -i "s/image: aliyun\\/ilogtail:.*/image: aliyun\\/ilogtail:$version/g" test/engine/boot/compose.go +sed -i "s/image: aliyun\\/loongcollector:.*/image: aliyun\\/loongcollector:$version/g" test/engine/boot/compose.go # Docs -sed -i "s/aliyun\\/ilogtail:[^\` ]*/aliyun\\/ilogtail:$version/g" \ +sed -i "s/aliyun\\/loongcollector:[^\` ]*/aliyun\\/loongcollector:$version/g" \ README.md \ docs/en/guides/How-to-do-manual-test.md \ docs/en/guides/How-to-build-with-docker.md diff --git a/scripts/upgrade_adapter_lib.sh b/scripts/upgrade_adapter_lib.sh index ca261bd70a..2c7d8afb37 100755 --- a/scripts/upgrade_adapter_lib.sh +++ b/scripts/upgrade_adapter_lib.sh @@ -35,5 +35,5 @@ SOURCEDIR="core/build/plugin" [[ $# -eq 2 ]] && SOURCEDIR="$1" || : if [ $OS_FLAG = 1 ]; then - cp ${ROOTDIR}/core/build/go_pipeline/libPluginAdapter.so ${ROOTDIR}/pkg/logtail/libPluginAdapter.so + cp ${ROOTDIR}/core/build/go_pipeline/libGoPluginAdapter.so ${ROOTDIR}/pkg/logtail/libGoPluginAdapter.so fi diff --git a/scripts/windows32_build.bat b/scripts/windows32_build.bat index cf240bd253..7aea286b36 100644 --- a/scripts/windows32_build.bat +++ b/scripts/windows32_build.bat @@ -48,7 +48,7 @@ REM Clean up IF exist %OUTPUT_DIR% ( rd /s /q %OUTPUT_DIR% ) mkdir %OUTPUT_DIR% -REM Build C++ core (ilogtail.exe, PluginAdapter.dll) +REM Build C++ core (ilogtail.exe, GoPluginAdapter.dll) echo begin to compile core cd %ILOGTAIL_PLUGIN_SRC_PATH%\core IF exist build ( rd /s /q build ) @@ -76,13 +76,13 @@ del /f/s/q %ILOGTAIL_PLUGIN_SRC_PATH%\plugins\all\all_linux.go go run -mod=mod %ILOGTAIL_PLUGIN_SRC_UNIX_PATH%/tools/builder -root-dir=%ILOGTAIL_PLUGIN_SRC_UNIX_PATH% -config="plugins.yml,external_plugins.yml" -modfile="go.mod" echo generating plugins finished successfully -REM Build plugins(PluginBase.dll, PluginBase.h) +REM Build plugins(GoPluginBase.dll, GoPluginBase.h) echo Begin to build plugins... IF exist %OUTPUT_DIR% ( rd /s /q %OUTPUT_DIR% ) mkdir %OUTPUT_DIR% -xcopy /Y %ILOGTAIL_CORE_BUILD_PATH%\plugin\Release\PluginAdapter.dll %ILOGTAIL_PLUGIN_SRC_PATH%\pkg\logtail +xcopy /Y %ILOGTAIL_CORE_BUILD_PATH%\plugin\Release\GoPluginAdapter.dll %ILOGTAIL_PLUGIN_SRC_PATH%\pkg\logtail set LDFLAGS="-X "github.com/alibaba/ilogtail/pluginmanager.BaseVersion=%ILOGTAIL_VERSION%"" -go build -mod=mod -buildmode=c-shared -ldflags=%LDFLAGS% -o %OUTPUT_UNIX_DIR%/PluginBase.dll %ILOGTAIL_PLUGIN_SRC_UNIX_PATH%/plugin_main +go build -mod=mod -buildmode=c-shared -ldflags=%LDFLAGS% -o %OUTPUT_UNIX_DIR%/GoPluginBase.dll %ILOGTAIL_PLUGIN_SRC_UNIX_PATH%/plugin_main if not %ERRORLEVEL% == 0 ( echo Build iLogtail plugin source failed. goto quit @@ -91,7 +91,7 @@ echo Build plugins success REM Copy artifacts xcopy /Y %ILOGTAIL_CORE_BUILD_PATH%\Release\ilogtail.exe %OUTPUT_DIR% -xcopy /Y %ILOGTAIL_CORE_BUILD_PATH%\plugin\Release\PluginAdapter.dll %OUTPUT_DIR% +xcopy /Y %ILOGTAIL_CORE_BUILD_PATH%\plugin\Release\GoPluginAdapter.dll %OUTPUT_DIR% echo { > %OUTPUT_DIR%\ilogtail_config.json & echo } >> %OUTPUT_DIR%\ilogtail_config.json mkdir %OUTPUT_DIR%\config\local cd %OUTPUT_DIR% diff --git a/scripts/windows64_build.bat b/scripts/windows64_build.bat index e6bbd64c87..17ec0f8db4 100644 --- a/scripts/windows64_build.bat +++ b/scripts/windows64_build.bat @@ -48,7 +48,7 @@ REM Clean up IF exist %OUTPUT_DIR% ( rd /s /q %OUTPUT_DIR% ) mkdir %OUTPUT_DIR% -REM Build C++ core(ilogtail.exe, PluginAdapter.dll) +REM Build C++ core(ilogtail.exe, GoPluginAdapter.dll) echo begin to compile core cd %ILOGTAIL_PLUGIN_SRC_PATH%\core IF exist build ( rd /s /q build ) @@ -76,13 +76,13 @@ del /f/s/q %ILOGTAIL_PLUGIN_SRC_PATH%\plugins\all\all_linux.go go run -mod=mod %ILOGTAIL_PLUGIN_SRC_UNIX_PATH%/tools/builder -root-dir=%ILOGTAIL_PLUGIN_SRC_UNIX_PATH% -config="plugins.yml,external_plugins.yml" -modfile="go.mod" echo generating plugins finished successfully -REM Build plugins (PluginBase.dll, PluginBase.h) +REM Build plugins (GoPluginBase.dll, GoPluginBase.h) echo Begin to build plugins... IF exist %OUTPUT_DIR% ( rd /s /q %OUTPUT_DIR% ) mkdir %OUTPUT_DIR% -xcopy /Y %ILOGTAIL_CORE_BUILD_PATH%\plugin\Release\PluginAdapter.dll %ILOGTAIL_PLUGIN_SRC_PATH%\pkg\logtail +xcopy /Y %ILOGTAIL_CORE_BUILD_PATH%\plugin\Release\GoPluginAdapter.dll %ILOGTAIL_PLUGIN_SRC_PATH%\pkg\logtail set LDFLAGS="-X "github.com/alibaba/ilogtail/pluginmanager.BaseVersion=%ILOGTAIL_VERSION%"" -go build -mod=mod -buildmode=c-shared -ldflags=%LDFLAGS% -o %OUTPUT_UNIX_DIR%/PluginBase.dll %ILOGTAIL_PLUGIN_SRC_UNIX_PATH%/plugin_main +go build -mod=mod -buildmode=c-shared -ldflags=%LDFLAGS% -o %OUTPUT_UNIX_DIR%/GoPluginBase.dll %ILOGTAIL_PLUGIN_SRC_UNIX_PATH%/plugin_main if not %ERRORLEVEL% == 0 ( echo Build iLogtail plugin source failed. goto quit @@ -91,7 +91,7 @@ echo Build plugins success REM Copy artifacts xcopy /Y %ILOGTAIL_CORE_BUILD_PATH%\Release\ilogtail.exe %OUTPUT_DIR% -xcopy /Y %ILOGTAIL_CORE_BUILD_PATH%\plugin\Release\PluginAdapter.dll %OUTPUT_DIR% +xcopy /Y %ILOGTAIL_CORE_BUILD_PATH%\plugin\Release\GoPluginAdapter.dll %OUTPUT_DIR% echo { > %OUTPUT_DIR%\ilogtail_config.json & echo } >> %OUTPUT_DIR%\ilogtail_config.json mkdir %OUTPUT_DIR%\config\local cd %OUTPUT_DIR% diff --git a/test/e2e/test_cases/alwaysonline_noraml_config_exit_false/case.feature b/test/e2e/test_cases/alwaysonline_noraml_config_exit_false/case.feature deleted file mode 100644 index d2691268ee..0000000000 --- a/test/e2e/test_cases/alwaysonline_noraml_config_exit_false/case.feature +++ /dev/null @@ -1,104 +0,0 @@ -@input -Feature: always online normal config exit false - Test always online normal config exit false - - @e2e-core @docker-compose - Scenario: TestAlwaysOnlineNormalConfigExitFalse - Given {docker-compose} environment - Given subcribe data from {grpc} with config - """ - """ - When start docker-compose {alwaysonline_noraml_config_exit_false} - Given {nomal-case} http config as below - """ - { - "global": { - "InputIntervalMs": 10000, - "AggregatIntervalMs": 1000, - "FlushIntervalMs": 300, - "DefaultLogQueueSize": 1000, - "DefaultLogGroupQueueSize": 4, - "AlwaysOnline": true, - "DelayStopSec": 1 - }, - "inputs": [ - { - "type": "service_mock", - "detail": { - "LogsPerSecond": 10000, - "MaxLogCount": 20000, - "Fields": { - "content": "time:2017.09.12 20:55:36 json:{\"array\" : [1, 2, 3, 4], \"key1\" : \"xx\", \"key2\": false, \"key3\":123.456, \"key4\" : { \"inner1\" : 1, \"inner2\" : {\"xxxx\" : \"yyyy\", \"zzzz\" : \"中文\"}}}\n" - } - } - } - ], - "processors": [ - { - "type": "processor_anchor", - "detail": { - "SourceKey": "content", - "NoAnchorError": true, - "Anchors": [ - { - "Start": "time", - "Stop": " ", - "FieldName": "time", - "FieldType": "string", - "ExpondJson": false - }, - { - "Start": "json:", - "Stop": "\n", - "FieldName": "val", - "FieldType": "json", - "ExpondJson": true, - "MaxExpondDepth": 2, - "ExpondConnecter": "#" - } - ] - } - } - ], - "flushers": [ - { - "type": "flusher_grpc", - "detail": { - "Address": "host.docker.internal:9000" - } - } - ] - } - """ - Then wait {10} seconds - Given {another-case} http config as below - """ - """ - Then wait {10} seconds - Given remove http config {nomal-case} - Then wait {10} seconds - Then there is at least {20000} logs - # Resume - Then the logtail log contains {2} times of {[Resume] Resume:start} - Then the logtail log contains {2} times of {[Resume] checkpoint:Resume} - Then the logtail log contains {2} times of {[Resume] Resume:success} - Then the logtail log contains {2} times of {[Start] [shennong_log_profile,logtail_plugin_profile] config start:success} - Then the logtail log contains {2} times of {[Start] [logtail_alarm,logtail_alarm] config start:success} - Then the logtail log contains {1} times of {[Start] [nomal-case,e2e-test-logstore] config start:success} - # Holdon - Then the logtail log contains {2} times of {[HoldOn] Hold on:start flag:0} - Then the logtail log contains {1} times of {[HoldOn] Hold on:start flag:1} - Then the logtail log contains {2} times of {[HoldOn] checkpoint:HoldOn} - Then the logtail log contains {3} times of {[HoldOn] Hold on:success} - Then the logtail log contains {1} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:begin exit:false} - Then the logtail log contains {1} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:begin exit:true} - Then the logtail log contains {2} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:success} - Then the logtail log contains {1} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:begin exit:false} - Then the logtail log contains {1} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:begin exit:true} - Then the logtail log contains {2} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:success} - Then the logtail log contains {1} times of {[Stop] [nomal-case,e2e-test-logstore] config stop:begin exit:true} - # not always online stop logs - Then the logtail log contains {0} times of {[nomal-case,e2e-test-logstore] Stop config in goroutine:begin} - # always online logs - Then the logtail log contains {1} times of {[nomal-case,e2e-test-logstore] always online config nomal-case is deleted, stop it} - Then the logtail log contains {1} times of {[nomal-case,e2e-test-logstore] always online config nomal-case stopped, error: } \ No newline at end of file diff --git a/test/e2e/test_cases/alwaysonline_noraml_config_exit_true/case.feature b/test/e2e/test_cases/alwaysonline_noraml_config_exit_true/case.feature deleted file mode 100644 index c244dcf3f9..0000000000 --- a/test/e2e/test_cases/alwaysonline_noraml_config_exit_true/case.feature +++ /dev/null @@ -1,76 +0,0 @@ -@input -Feature: always online normal config exit true - Test always online normal config exit true - - @e2e-core @docker-compose - Scenario: TestAlwaysOnlineNormalConfigExitTrue - Given {docker-compose} environment - Given subcribe data from {grpc} with config - """ - """ - When start docker-compose {alwaysonline_noraml_config_exit_true} - Given {nomal-case} local config as below - """ - enable: true - global: - InputIntervalMs: 10000 - AggregatIntervalMs: 1000 - FlushIntervalMs: 300 - DefaultLogQueueSize: 1000 - DefaultLogGroupQueueSize: 4 - AlwaysOnline: true - DelayStopSec: 1 - inputs: - - Type: service_mock - LogsPerSecond: 10000 - MaxLogCount: 20000 - Fields: - content: > - time:2017.09.12 20:55:36 json:{"array" : [1, 2, 3, 4], "key1" : "xx", - "key2": false, "key3":123.456, "key4" : { "inner1" : 1, "inner2" : - {"xxxx" : "yyyy", "zzzz" : "中文"}}} - processors: - - Type: processor_anchor - SourceKey: content - NoAnchorError: true - Anchors: - - Start: time - Stop: ' ' - FieldName: time - FieldType: string - ExpondJson: false - - Start: 'json:' - Stop: |+ - FieldName: val - FieldType: json - ExpondJson: true - MaxExpondDepth: 2 - ExpondConnecter: '#' - """ - Then wait {15} seconds - Given remove http config {nomal-case/1} - Then wait {10} seconds - Then there is at least {20000} logs - # Resume - Then the logtail log contains {1} times of {[Resume] Resume:start} - Then the logtail log contains {1} times of {[Resume] checkpoint:Resume} - Then the logtail log contains {1} times of {[Resume] Resume:success} - Then the logtail log contains {1} times of {[Start] [shennong_log_profile,logtail_plugin_profile] config start:success} - Then the logtail log contains {1} times of {[Start] [logtail_alarm,logtail_alarm] config start:success} - Then the logtail log contains {1} times of {[Start] [nomal-case/1] config start:success} - # Holdon - Then the logtail log contains {1} times of {[HoldOn] Hold on:start flag:0} - Then the logtail log contains {1} times of {[HoldOn] Hold on:start flag:1} - Then the logtail log contains {1} times of {[HoldOn] checkpoint:HoldOn} - Then the logtail log contains {2} times of {[HoldOn] Hold on:success} - Then the logtail log contains {0} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:begin exit:false} - Then the logtail log contains {1} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:begin exit:true} - Then the logtail log contains {1} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:success} - Then the logtail log contains {0} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:begin exit:false} - Then the logtail log contains {1} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:begin exit:true} - Then the logtail log contains {1} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:success} - Then the logtail log contains {1} times of {[nomal-case/1] Stop config in goroutine:begin} - Then the logtail log contains {1} times of {[Stop] [nomal-case/1] config stop:begin exit:true} - # always online logs - Then the logtail log contains {0} times of {[nomal-case/1] always online config nomal-case/1 is deleted, stop it} - Then the logtail log contains {0} times of {[nomal-case/1] always online config nomal-case/1 stopped, error: } \ No newline at end of file diff --git a/test/e2e/test_cases/alwaysonline_noraml_config_resume/case.feature b/test/e2e/test_cases/alwaysonline_noraml_config_resume/case.feature deleted file mode 100644 index f995d4403a..0000000000 --- a/test/e2e/test_cases/alwaysonline_noraml_config_resume/case.feature +++ /dev/null @@ -1,166 +0,0 @@ -@input -Feature: always online normal config resume - Test always online normal config resume - - @e2e-core @docker-compose - Scenario: TestAlwaysOnlineNormalConfigResume - Given {docker-compose} environment - Given subcribe data from {grpc} with config - """ - """ - When start docker-compose {alwaysonline_noraml_config_resume} - Given {nomal-case} http config as below - """ - { - "global":{ - "InputIntervalMs":10000, - "AggregatIntervalMs":1000, - "FlushIntervalMs":300, - "DefaultLogQueueSize":1000, - "DefaultLogGroupQueueSize":4, - "AlwaysOnline":true, - "DelayStopSec": 10 - }, - "inputs":[ - { - "type":"service_mock", - "detail":{ - "LogsPerSecond":10000, - "MaxLogCount":20000, - "Fields":{ - "content":"time:2017.09.12 20:55:36 json:{\"array\" : [1, 2, 3, 4], \"key1\" : \"xx\", \"key2\": false, \"key3\":123.456, \"key4\" : { \"inner1\" : 1, \"inner2\" : {\"xxxx\" : \"yyyy\", \"zzzz\" : \"中文\"}}}\n" - } - } - } - ], - "processors":[ - { - "type":"processor_anchor", - "detail":{ - "SourceKey":"content", - "NoAnchorError":true, - "Anchors":[ - { - "Start":"time", - "Stop":" ", - "FieldName":"time", - "FieldType":"string", - "ExpondJson":false - }, - { - "Start":"json:", - "Stop":"\n", - "FieldName":"val", - "FieldType":"json", - "ExpondJson":true, - "MaxExpondDepth":2, - "ExpondConnecter":"#" - } - ] - } - } - ], - "flushers": [ - { - "type": "flusher_grpc", - "detail": { - "Address": "host.docker.internal:9000" - } - } - ] - } - """ - Then wait {6} seconds - Given {nomal-case} http config as below - """ - { - "global":{ - "InputIntervalMs":10000, - "AggregatIntervalMs":1000, - "FlushIntervalMs":300, - "DefaultLogQueueSize":1000, - "DefaultLogGroupQueueSize":4, - "AlwaysOnline":true, - "DelayStopSec": 10 - }, - "inputs":[ - { - "type":"service_mock", - "detail":{ - "LogsPerSecond":10000, - "MaxLogCount":20000, - "Fields":{ - "content":"time:2017.09.12 20:55:36 json:{\"array\" : [1, 2, 3, 4], \"key1\" : \"xx\", \"key2\": false, \"key3\":123.456, \"key4\" : { \"inner1\" : 1, \"inner2\" : {\"xxxx\" : \"yyyy\", \"zzzz\" : \"中文\"}}}\n" - } - } - } - ], - "processors":[ - { - "type":"processor_anchor", - "detail":{ - "SourceKey":"content", - "NoAnchorError":true, - "Anchors":[ - { - "Start":"time", - "Stop":" ", - "FieldName":"time", - "FieldType":"string", - "ExpondJson":false - }, - { - "Start":"json:", - "Stop":"\n", - "FieldName":"val", - "FieldType":"json", - "ExpondJson":true, - "MaxExpondDepth":2, - "ExpondConnecter":"#" - } - ] - } - } - ], - "flushers": [ - { - "type": "flusher_grpc", - "detail": { - "Address": "host.docker.internal:9000" - } - } - ] - } - """ - Then wait {6} seconds - Given {holdon} http config as below - """ - """ - Then wait {6} seconds - Given remove http config {nomal-case} - Then wait {6} seconds - Then there is at least {20000} logs - # Resume - Then the logtail log contains {3} times of {[Resume] Resume:start} - Then the logtail log contains {3} times of {[Resume] checkpoint:Resume} - Then the logtail log contains {3} times of {[Resume] Resume:success} - Then the logtail log contains {3} times of {[Start] [shennong_log_profile,logtail_plugin_profile] config start:success} - Then the logtail log contains {3} times of {[Start] [logtail_alarm,logtail_alarm] config start:success} - Then the logtail log contains {1} times of {[Start] [nomal-case,e2e-test-logstore] config start:success} - # Holdon - Then the logtail log contains {3} times of {[HoldOn] Hold on:start flag:0} - Then the logtail log contains {1} times of {[HoldOn] Hold on:start flag:1} - Then the logtail log contains {3} times of {[HoldOn] checkpoint:HoldOn} - Then the logtail log contains {4} times of {[HoldOn] Hold on:success} - Then the logtail log contains {2} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:begin exit:false} - Then the logtail log contains {1} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:begin exit:true} - Then the logtail log contains {3} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:success} - Then the logtail log contains {2} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:begin exit:false} - Then the logtail log contains {1} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:begin exit:true} - Then the logtail log contains {3} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:success} - Then the logtail log contains {0} times of {[nomal-case,e2e-test-logstore] Stop config in goroutine:begin} - Then the logtail log contains {1} times of {[Stop] [nomal-case,e2e-test-logstore] config stop:begin exit:true} - # always online logs - Then the logtail log contains {1} times of {[nomal-case,e2e-test-logstore] always online config nomal-case is deleted, stop it} - Then the logtail log contains {1} times of {[nomal-case,e2e-test-logstore] always online config nomal-case stopped, error: } - Then the logtail log contains {1} times of {[nomal-case,e2e-test-logstore] config is same after reload, use it again:0} \ No newline at end of file diff --git a/test/e2e/test_cases/block_holdon_resume/case.feature b/test/e2e/test_cases/block_holdon_resume/case.feature deleted file mode 100644 index f608d777b2..0000000000 --- a/test/e2e/test_cases/block_holdon_resume/case.feature +++ /dev/null @@ -1,72 +0,0 @@ -@input -Feature: block holdon resume - Test block holdon resume - - @e2e-core @docker-compose - Scenario: TestBlockHoldonResume - Given {docker-compose} environment - Given subcribe data from {grpc} with config - """ - """ - When start docker-compose {block_holdon_resume} - Given {block-case} local config as below - """ - enable: true - global: - InputIntervalMs: 10000 - AggregatIntervalMs: 1000 - FlushIntervalMs: 300 - DefaultLogQueueSize: 1000 - DefaultLogGroupQueueSize: 10 - inputs: - - Type: service_mock - LogsPerSecond: 100000 - MaxLogCount: 200000 - Fields: - content: > - time:2017.09.12 20:55:36 json:{"array" : [1, 2, 3, 4], "key1" : "xx", - "key2": false, "key3":123.456, "key4" : { "inner1" : 1, "inner2" : - {"xxxx" : "yyyy", "zzzz" : "中文"}}} - processors: - - Type: processor_anchor - SourceKey: content - NoAnchorError: true - Anchors: - - Start: time - Stop: ' ' - FieldName: time - FieldType: string - ExpondJson: false - - Start: 'json:' - Stop: |+ - FieldName: val - FieldType: json - ExpondJson: true - MaxExpondDepth: 2 - ExpondConnecter: '#' - flushers: - - Type: flusher_checker - Block: true - """ - Then wait {10} seconds - Given remove http config {block-case/1} - Then wait {10} seconds - # Resume - Then the logtail log contains {1} times of {[Resume] Resume:start} - Then the logtail log contains {1} times of {[Resume] checkpoint:Resume} - Then the logtail log contains {1} times of {[Resume] Resume:success} - Then the logtail log contains {1} times of {[Start] [shennong_log_profile,logtail_plugin_profile] config start:success} - Then the logtail log contains {1} times of {[Start] [logtail_alarm,logtail_alarm] config start:success} - Then the logtail log contains {1} times of {[Start] [block-case/1] config start:success} - # Holdon - Then the logtail log contains {1} times of {[HoldOn] Hold on:start flag:0} - Then the logtail log contains {1} times of {[HoldOn] Hold on:start flag:1} - Then the logtail log contains {1} times of {[HoldOn] checkpoint:HoldOn} - Then the logtail log contains {2} times of {[HoldOn] Hold on:success} - Then the logtail log contains {1} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:begin exit:true} - Then the logtail log contains {1} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:success} - Then the logtail log contains {1} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:begin exit:true} - Then the logtail log contains {1} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:success} - Then the logtail log contains {1} times of {[block-case/1] Stop config in goroutine:begin} - Then the logtail log contains {1} times of {[Stop] [block-case/1] config stop:begin exit:true} - Then the logtail log contains {1} times of {[block-case/1] AlarmType:CONFIG_STOP_TIMEOUT_ALARM timeout when stop config, goroutine might leak:} diff --git a/test/e2e/test_cases/input_container_stdio/case.feature b/test/e2e/test_cases/input_container_stdio/case.feature index be89bd3e0a..0c650bc872 100644 --- a/test/e2e/test_cases/input_container_stdio/case.feature +++ b/test/e2e/test_cases/input_container_stdio/case.feature @@ -24,7 +24,7 @@ Feature: input container stdio Then the log tags match kv """ _image_name_: ".*_container:latest$" - _container_name_: ".*-container-1$" + _container_name_: ".*[-_]container[-_]1$" _container_ip_: ^\b(?:(?:2(?:[0-4][0-9]|5[0-5])|[0-1]?[0-9]?[0-9])\.){3}(?:(?:2([0-4][0-9]|5[0-5])|[0-1]?[0-9]?[0-9]))\b$ """ Then the log fields match kv diff --git a/test/e2e/test_cases/input_container_stdio_multiline/case.feature b/test/e2e/test_cases/input_container_stdio_multiline/case.feature index a2551e5af3..48426c4a61 100644 --- a/test/e2e/test_cases/input_container_stdio_multiline/case.feature +++ b/test/e2e/test_cases/input_container_stdio_multiline/case.feature @@ -27,7 +27,7 @@ Feature: input container stdio multiline Then the log tags match kv """ _image_name_: ".*_container:latest$" - _container_name_: ".*-container-1$" + _container_name_: ".*[-_]container[-_]1$" _container_ip_: ^\b(?:(?:2(?:[0-4][0-9]|5[0-5])|[0-1]?[0-9]?[0-9])\.){3}(?:(?:2([0-4][0-9]|5[0-5])|[0-1]?[0-9]?[0-9]))\b$ """ Then the log fields match kv diff --git a/test/e2e/test_cases/input_docker_rawstdout/case.feature b/test/e2e/test_cases/input_docker_rawstdout/case.feature index 4fe948777f..5dc990ddc7 100644 --- a/test/e2e/test_cases/input_docker_rawstdout/case.feature +++ b/test/e2e/test_cases/input_docker_rawstdout/case.feature @@ -24,6 +24,6 @@ Feature: input docker rawstdout content: "^hello$" _source_: "^stdout$" _image_name_: ".*_container:latest$" - _container_name_: ".*-container-1$" + _container_name_: ".*[-_]container[-_]1$" _container_ip_: ^\b(?:(?:2(?:[0-4][0-9]|5[0-5])|[0-1]?[0-9]?[0-9])\.){3}(?:(?:2([0-4][0-9]|5[0-5])|[0-1]?[0-9]?[0-9]))\b$ """ \ No newline at end of file diff --git a/test/e2e/test_cases/input_docker_rawstdout_multiline/case.feature b/test/e2e/test_cases/input_docker_rawstdout_multiline/case.feature index ce8d6531d8..da2eff2828 100644 --- a/test/e2e/test_cases/input_docker_rawstdout_multiline/case.feature +++ b/test/e2e/test_cases/input_docker_rawstdout_multiline/case.feature @@ -24,6 +24,6 @@ Feature: input docker rawstdout multiline content: "^hello$" _source_: "^stdout$" _image_name_: ".*_container:latest$" - _container_name_: ".*-container-1$" + _container_name_: ".*[-_]container[-_]1$" _container_ip_: ^\b(?:(?:2(?:[0-4][0-9]|5[0-5])|[0-1]?[0-9]?[0-9])\.){3}(?:(?:2([0-4][0-9]|5[0-5])|[0-1]?[0-9]?[0-9]))\b$ """ \ No newline at end of file diff --git a/test/e2e/test_cases/input_docker_stdout/case.feature b/test/e2e/test_cases/input_docker_stdout/case.feature index 6fb839ffb9..30a4b10157 100644 --- a/test/e2e/test_cases/input_docker_stdout/case.feature +++ b/test/e2e/test_cases/input_docker_stdout/case.feature @@ -24,6 +24,6 @@ Feature: input docker stdout content: "^hello$" _source_: "^stdout$" _image_name_: ".*_container:latest$" - _container_name_: ".*-container-1$" + _container_name_: ".*[-_]container[-_]1$" _container_ip_: ^\b(?:(?:2(?:[0-4][0-9]|5[0-5])|[0-1]?[0-9]?[0-9])\.){3}(?:(?:2([0-4][0-9]|5[0-5])|[0-1]?[0-9]?[0-9]))\b$ """ \ No newline at end of file diff --git a/test/e2e/test_cases/input_docker_stdout_multiline/case.feature b/test/e2e/test_cases/input_docker_stdout_multiline/case.feature index 02f2fc8b75..46c10a283b 100644 --- a/test/e2e/test_cases/input_docker_stdout_multiline/case.feature +++ b/test/e2e/test_cases/input_docker_stdout_multiline/case.feature @@ -26,6 +26,6 @@ Feature: input docker stdout multiline content: "^today\nhello$" _source_: "^stdout$" _image_name_: ".*_container:latest$" - _container_name_: ".*-container-1$" + _container_name_: ".*[-_]container[-_]1$" _container_ip_: ^\b(?:(?:2(?:[0-4][0-9]|5[0-5])|[0-1]?[0-9]?[0-9])\.){3}(?:(?:2([0-4][0-9]|5[0-5])|[0-1]?[0-9]?[0-9]))\b$ """ \ No newline at end of file diff --git a/test/e2e/test_cases/load_same_block_config/case.feature b/test/e2e/test_cases/load_same_block_config/case.feature deleted file mode 100644 index 30b5300323..0000000000 --- a/test/e2e/test_cases/load_same_block_config/case.feature +++ /dev/null @@ -1,154 +0,0 @@ -@input -Feature: load same block config - Test load same block config - - @e2e-core @docker-compose - Scenario: TestLoadSameBlockConfig - Given {docker-compose} environment - Given subcribe data from {grpc} with config - """ - """ - When start docker-compose {load_same_block_config} - Given {block-case} http config as below - """ - { - "global":{ - "InputIntervalMs":10000, - "AggregatIntervalMs":1000, - "FlushIntervalMs":300, - "DefaultLogQueueSize":1000, - "DefaultLogGroupQueueSize":4 - }, - "inputs":[ - { - "type":"service_mock", - "detail":{ - "LogsPerSecond":10000, - "MaxLogCount":20000, - "Fields":{ - "content":"time:2017.09.12 20:55:36 json:{\"array\" : [1, 2, 3, 4], \"key1\" : \"xx\", \"key2\": false, \"key3\":123.456, \"key4\" : { \"inner1\" : 1, \"inner2\" : {\"xxxx\" : \"yyyy\", \"zzzz\" : \"中文\"}}}\n" - } - } - } - ], - "processors":[ - { - "type":"processor_anchor", - "detail":{ - "SourceKey":"content", - "NoAnchorError":true, - "Anchors":[ - { - "Start":"time", - "Stop":" ", - "FieldName":"time", - "FieldType":"string", - "ExpondJson":false - }, - { - "Start":"json:", - "Stop":"\n", - "FieldName":"val", - "FieldType":"json", - "ExpondJson":true, - "MaxExpondDepth":2, - "ExpondConnecter":"#" - } - ] - } - } - ], - "flushers":[ - { - "type":"flusher_checker", - "detail":{ - "Block":true - } - } - ] - } - """ - Given {block-case} http config as below - """ - { - "global":{ - "InputIntervalMs":10000, - "AggregatIntervalMs":1000, - "FlushIntervalMs":300, - "DefaultLogQueueSize":1000, - "DefaultLogGroupQueueSize":4 - }, - "inputs":[ - { - "type":"service_mock", - "detail":{ - "LogsPerSecond":10000, - "MaxLogCount":20000, - "Fields":{ - "content":"time:2017.09.12 20:55:36 json:{\"array\" : [1, 2, 3, 4], \"key1\" : \"xx\", \"key2\": false, \"key3\":123.456, \"key4\" : { \"inner1\" : 1, \"inner2\" : {\"xxxx\" : \"yyyy\", \"zzzz\" : \"中文\"}}}\n" - } - } - } - ], - "processors":[ - { - "type":"processor_anchor", - "detail":{ - "SourceKey":"content", - "NoAnchorError":true, - "Anchors":[ - { - "Start":"time", - "Stop":" ", - "FieldName":"time", - "FieldType":"string", - "ExpondJson":false - }, - { - "Start":"json:", - "Stop":"\n", - "FieldName":"val", - "FieldType":"json", - "ExpondJson":true, - "MaxExpondDepth":2, - "ExpondConnecter":"#" - } - ] - } - } - ], - "flushers":[ - { - "type":"flusher_checker", - "detail":{ - "Block":true - } - } - ] - } - """ - Then wait {5} seconds - Given remove http config {block-case} - Then wait {5} seconds - # Resume - Then the logtail log contains {2} times of {[Resume] Resume:start} - Then the logtail log contains {2} times of {[Resume] checkpoint:Resume} - Then the logtail log contains {2} times of {[Resume] Resume:success} - Then the logtail log contains {2} times of {[Start] [shennong_log_profile,logtail_plugin_profile] config start:success} - Then the logtail log contains {2} times of {[Start] [logtail_alarm,logtail_alarm] config start:success} - Then the logtail log contains {1} times of {[Start] [block-case,e2e-test-logstore] config start:success} - # Holdon - Then the logtail log contains {2} times of {[HoldOn] Hold on:start flag:0} - Then the logtail log contains {1} times of {[HoldOn] Hold on:start flag:1} - Then the logtail log contains {2} times of {[HoldOn] checkpoint:HoldOn} - Then the logtail log contains {3} times of {[HoldOn] Hold on:success} - Then the logtail log contains {1} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:begin exit:false} - Then the logtail log contains {1} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:begin exit:true} - Then the logtail log contains {2} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:success} - Then the logtail log contains {1} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:begin exit:false} - Then the logtail log contains {1} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:begin exit:true} - Then the logtail log contains {2} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:success} - Then the logtail log contains {1} times of {[block-case,e2e-test-logstore] Stop config in goroutine:begin} - Then the logtail log contains {0} times of {[Stop] [block-case,e2e-test-logstore] config stop:begin exit:true} - Then the logtail log contains {1} times of {[Stop] [block-case,e2e-test-logstore] config stop:begin exit:false} - Then the logtail log contains {1} times of {[block-case,e2e-test-logstore] AlarmType:CONFIG_STOP_TIMEOUT_ALARM timeout when stop config, goroutine might leak:} diff --git a/test/e2e/test_cases/normal_holdon_resume/case.feature b/test/e2e/test_cases/normal_holdon_resume/case.feature deleted file mode 100644 index a789db5c8e..0000000000 --- a/test/e2e/test_cases/normal_holdon_resume/case.feature +++ /dev/null @@ -1,46 +0,0 @@ -@input -Feature: normal holdon resume - Test normal holdon resume - - @e2e-core @docker-compose - Scenario: TestNormalHoldonResume - Given {docker-compose} environment - Given subcribe data from {grpc} with config - """ - """ - When start docker-compose {normal_holdon_resume} - Given {mock-metric-case} local config as below - """ - enable: true - inputs: - - Type: metric_mock - IntervalMs: 1000 - Tags: - tag1: aaaa - tag2: bbb - Fields: - content: xxxxx - time: '2017.09.12 20:55:36' - """ - Then wait {15} seconds - Given remove http config {mock-metric-case/1} - Then there is at least {5} logs - # Resume - Then the logtail log contains {1} times of {[Resume] Resume:start} - Then the logtail log contains {1} times of {[Resume] checkpoint:Resume} - Then the logtail log contains {1} times of {[Resume] Resume:success} - Then the logtail log contains {1} times of {[Start] [shennong_log_profile,logtail_plugin_profile] config start:success} - Then the logtail log contains {1} times of {[Start] [logtail_alarm,logtail_alarm] config start:success} - Then the logtail log contains {1} times of {[Start] [mock-metric-case/1] config start:success} - # Holdon - Then the logtail log contains {1} times of {[HoldOn] Hold on:start flag:0} - Then the logtail log contains {1} times of {[HoldOn] Hold on:start flag:1} - Then the logtail log contains {1} times of {[HoldOn] checkpoint:HoldOn} - Then the logtail log contains {2} times of {[HoldOn] Hold on:success} - Then the logtail log contains {1} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:begin exit:true} - Then the logtail log contains {1} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:success} - Then the logtail log contains {1} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:begin exit:true} - Then the logtail log contains {1} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:success} - Then the logtail log contains {1} times of {[mock-metric-case/1] Stop config in goroutine:begin} - Then the logtail log contains {1} times of {[Stop] [mock-metric-case/1] config stop:begin exit:true} - Then the logtail log contains {1} times of {[Stop] [mock-metric-case/1] config stop:success} diff --git a/test/e2e/test_cases/recover_holdon_resume/case.feature b/test/e2e/test_cases/recover_holdon_resume/case.feature deleted file mode 100644 index 5fea42cea1..0000000000 --- a/test/e2e/test_cases/recover_holdon_resume/case.feature +++ /dev/null @@ -1,69 +0,0 @@ -@input -Feature: recover holdon resume - Test recover holdon resume - - @e2e-core @docker-compose - Scenario: TestRecoverHoldonResume - Given {docker-compose} environment - Given subcribe data from {grpc} with config - """ - """ - When start docker-compose {recover_holdon_resume} - Given {recover-case} local config as below - """ - enable: true - global: - InputIntervalMs: 10000 - AggregatIntervalMs: 300 - FlushIntervalMs: 300 - DefaultLogQueueSize: 1000 - DefaultLogGroupQueueSize: 4 - inputs: - - Type: service_mock - LogsPerSecond: 10000 - MaxLogCount: 20000 - Fields: - content: > - time:2017.09.12 20:55:36 json:{"array" : [1, 2, 3, 4], "key1" : "xx", - "key2": false, "key3":123.456, "key4" : { "inner1" : 1, "inner2" : - {"xxxx" : "yyyy", "zzzz" : "中文"}}} - processors: - - Type: processor_anchor - SourceKey: content - NoAnchorError: true - Anchors: - - Start: time - Stop: ' ' - FieldName: time - FieldType: string - ExpondJson: false - - Start: 'json:' - Stop: |+ - FieldName: val - FieldType: json - ExpondJson: true - MaxExpondDepth: 2 - ExpondConnecter: '#' - """ - Then wait {10} seconds - Given remove http config {recover-case/1} - Then wait {10} seconds - # Resume - Then the logtail log contains {1} times of {[Resume] Resume:start} - Then the logtail log contains {1} times of {[Resume] checkpoint:Resume} - Then the logtail log contains {1} times of {[Resume] Resume:success} - Then the logtail log contains {1} times of {[Start] [shennong_log_profile,logtail_plugin_profile] config start:success} - Then the logtail log contains {1} times of {[Start] [logtail_alarm,logtail_alarm] config start:success} - Then the logtail log contains {1} times of {[Start] [recover-case/1] config start:success} - # Holdon - Then the logtail log contains {1} times of {[HoldOn] Hold on:start flag:0} - Then the logtail log contains {1} times of {[HoldOn] Hold on:start flag:1} - Then the logtail log contains {1} times of {[HoldOn] checkpoint:HoldOn} - Then the logtail log contains {2} times of {[HoldOn] Hold on:success} - Then the logtail log contains {1} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:begin exit:true} - Then the logtail log contains {1} times of {[Stop] [shennong_log_profile,logtail_plugin_profile] config stop:success} - Then the logtail log contains {1} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:begin exit:true} - Then the logtail log contains {1} times of {[Stop] [logtail_alarm,logtail_alarm] config stop:success} - Then the logtail log contains {1} times of {[recover-case/1] Stop config in goroutine:begin} - Then the logtail log contains {1} times of {[Stop] [recover-case/1] config stop:begin exit:true} - Then the logtail log contains {1} times of {[Stop] [recover-case/1] config stop:success} diff --git a/test/engine/control/config.go b/test/engine/control/config.go index ae32887940..3e68a8bd1d 100644 --- a/test/engine/control/config.go +++ b/test/engine/control/config.go @@ -32,7 +32,7 @@ import ( "github.com/alibaba/ilogtail/test/engine/setup/subscriber" ) -const iLogtailLocalConfigDir = "/etc/ilogtail/config/local" +const iLogtailLocalConfigDir = "/usr/local/loongcollector/conf/local" const lotailpluginHTTPAddress = "ilogtailC:18689" const E2EProjectName = "e2e-test-project" const E2ELogstoreName = "e2e-test-logstore" diff --git a/test/engine/setup/dockercompose/compose.go b/test/engine/setup/dockercompose/compose.go index fda303e3f2..351552f7e1 100644 --- a/test/engine/setup/dockercompose/compose.go +++ b/test/engine/setup/dockercompose/compose.go @@ -56,13 +56,13 @@ services: interval: 1s retries: 10 ilogtailC: - image: aliyun/ilogtail:2.0.0 + image: aliyun/loongcollector:0.0.1 hostname: ilogtail privileged: true pid: host volumes: - - %s:/ilogtail/default_flusher.json - - %s:/ilogtail/config/local + - %s:/loongcollector/conf/default_flusher.json + - %s:/loongcollector/conf/pipeline_config/local - /:/logtail_host - /var/run/docker.sock:/var/run/docker.sock - /sys/:/sys/ @@ -77,7 +77,7 @@ services: - ALICLOUD_LOG_PLUGIN_ENV_CONFIG=false - ALIYUN_LOGTAIL_USER_DEFINED_ID=1111 healthcheck: - test: "cat ilogtail.LOG" + test: "cat /loongcollector/log/loongcollector.LOG" interval: 15s timeout: 5s ` @@ -119,9 +119,9 @@ func (c *ComposeBooter) Start(ctx context.Context) error { if execError.Error == nil { break } + logger.Error(context.Background(), "START_DOCKER_COMPOSE_ERROR", + "stdout", execError.Error.Error()) if i == 2 { - logger.Error(context.Background(), "START_DOCKER_COMPOSE_ERROR", - "stdout", execError.Error.Error()) return execError.Error } execError = testcontainers.NewLocalDockerCompose([]string{config.CaseHome + finalFileName}, projectName).Down() @@ -138,7 +138,10 @@ func (c *ComposeBooter) Start(ctx context.Context) error { c.cli = cli list, err := cli.ContainerList(context.Background(), types.ContainerListOptions{ - Filters: filters.NewArgs(filters.Arg("name", fmt.Sprintf("%s-ilogtailC", projectName))), + Filters: filters.NewArgs( + filters.Arg("name", fmt.Sprintf("%s_ilogtailC*", projectName)), + filters.Arg("name", fmt.Sprintf("%s-ilogtailC*", projectName)), + ), }) if len(list) != 1 { logger.Errorf(context.Background(), "LOGTAIL_COMPOSE_ALARM", "logtail container size is not equal 1, got %d count", len(list)) @@ -222,13 +225,13 @@ func (c *ComposeBooter) CopyCoreLogs() { if c.logtailID != "" { _ = os.Remove(config.LogDir) _ = os.Mkdir(config.LogDir, 0750) - cmd := exec.Command("docker", "cp", c.logtailID+":/ilogtail/ilogtail.LOG", config.LogDir) + cmd := exec.Command("docker", "cp", c.logtailID+":/loongcollector/log/loongcollector.LOG", config.LogDir) output, err := cmd.CombinedOutput() logger.Debugf(context.Background(), "\n%s", string(output)) if err != nil { logger.Error(context.Background(), "COPY_LOG_ALARM", "type", "main", "err", err) } - cmd = exec.Command("docker", "cp", c.logtailID+":/ilogtail/logtail_plugin.LOG", config.LogDir) + cmd = exec.Command("docker", "cp", c.logtailID+":/loongcollector/log/go_plugin.LOG", config.LogDir) output, err = cmd.CombinedOutput() logger.Debugf(context.Background(), "\n%s", string(output)) if err != nil { diff --git a/test/engine/verify/sys_logtail_log.go b/test/engine/verify/sys_logtail_log.go index e39cb90ba5..f3da4b45f7 100644 --- a/test/engine/verify/sys_logtail_log.go +++ b/test/engine/verify/sys_logtail_log.go @@ -31,9 +31,9 @@ import ( func LogtailPluginLog(ctx context.Context, expectCount int, expectStr string) (context.Context, error) { dockercompose.CopyCoreLogs() - logtailPluginLog := config.LogDir + "/logtail_plugin.LOG" + logtailPluginLog := config.LogDir + "/go_plugin.LOG" count, err := lineCounter(logtailPluginLog) - logger.Infof(context.Background(), "find %d lines of the logtail plugin log", count) + logger.Infof(context.Background(), "find %d lines of the logtail plugin log, file: %s", count, logtailPluginLog) if err != nil { return ctx, fmt.Errorf("read log file %s failed: %v", logtailPluginLog, err) } diff --git a/tools/coverage-diff/main.py b/tools/coverage-diff/main.py index 086265837e..51f837d289 100644 --- a/tools/coverage-diff/main.py +++ b/tools/coverage-diff/main.py @@ -1,53 +1,104 @@ import argparse import subprocess import sys -import time +import json +import re + +ERROR_COLOR = '\033[31m' +RESET_COLOR = '\033[0m' def get_changed_files(): try: - # Run the git command to get the list of changed files - result = subprocess.Popen('git diff --name-only -r HEAD^1 HEAD', shell=True, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - # Split the result by new line to get each file name - out, err = result.communicate() - changed_files = out.splitlines() - result_files = [] - for file in changed_files: - fileStr = file.decode('utf-8') - if fileStr.startswith('core'): - result_files.append(fileStr[5:]) - return result_files + result = subprocess.check_output(['git', 'diff', '--unified=0', 'HEAD^1' ,'HEAD'], universal_newlines=True) + return result except subprocess.CalledProcessError as e: - print(f"An error occurred while running git command: {e}") + print(f'An error occurred while running git command: {e}') return [] +def parse_diff(diff_output): + changes = {} + + current_file = None + for line in diff_output.split('\n'): + # 识别文件名 + file_match = re.match(r'^diff --git a/(.*) b/(.*)$', line) + if file_match: + current_file = file_match.group(2) + changes[current_file] = [] + continue + + # 识别文件中的行变化 + hunk_match = re.match(r'^@@ -\d+(,\d+)? \+(\d+)(,(\d+))? @@', line) + if hunk_match and current_file: + start_line = int(hunk_match.group(2)) + line_count = int(hunk_match.group(4) if hunk_match.group(4) else 1) + for i in range(start_line, start_line + line_count): + changes[current_file].append(i) + + return changes + if __name__ == '__main__': - parser = argparse.ArgumentParser(description="A simple argparse example") - parser.add_argument("path", type=str, help="The path of coverage file") + parser = argparse.ArgumentParser(description='A simple argparse example') + parser.add_argument('--path', type=str, help='The path of coverage file') + parser.add_argument('--summary_path', type=str, help='The path of coverage file') args = parser.parse_args() changed_files = get_changed_files() - line_cache = "" - not_satified = [] + changed_lines = parse_diff(changed_files) + + with open(args.summary_path, 'r') as file: + summary = json.load(file) + print('='*20) + print('Total coverage rate: ', summary['line_percent'], '%') + print('='*20) + with open(args.path, 'r') as file: - for line in file: - if len(line_cache) > 0: - line = line_cache + line - line_cache = "" - if '/' in line or ('%' in line and 'TOTAL' not in line): - for changed_file in changed_files: - if line.startswith(changed_file): - units = line.split() - if len(units) < 4: - # some files with long filename will be split into 2 lines - line_cache = line - continue - coverage_rate = int(units[3][:-1]) - if coverage_rate < 50: - not_satified.append(changed_file) - print(line, flush=True) - break - else: - print(line, flush=True) - if len(not_satified) > 0: - print(f"Coverage rate is less than 50% for the following files: {not_satified}", flush=True) + coverage = json.load(file) + not_satified = {} + not_satified_count = 0 + satified_count = 0 + + for file in coverage['files']: + if 'core/' + file['file'] in changed_lines: + file_name = 'core/' + file['file'] + cur_satified = [] + cur_not_satified = [] + i = 0 + j = 0 + while i < len(file['lines']) and j < len(changed_lines[file_name]): + if file['lines'][i]['line_number'] == changed_lines[file_name][j]: + if file['lines'][i]['count'] == 0: + cur_not_satified.append(file['lines'][i]['line_number']) + else: + cur_satified.append(file['lines'][i]['line_number']) + i += 1 + j += 1 + elif file['lines'][i]['line_number'] < changed_lines[file_name][j]: + i += 1 + else: + j += 1 + if len(cur_satified) > 0 or len(cur_not_satified) > 0: + print('file: ', file_name) + if len(cur_satified) > 0: + print('covered lines: ', cur_satified) + satified_count += len(cur_satified) + if len(cur_not_satified) > 0: + print(f'{ERROR_COLOR}not covered lines:{RESET_COLOR} ', cur_not_satified) + not_satified_count += len(cur_not_satified) + print('') + if len(cur_not_satified) > 0: + not_satified[file_name] = cur_not_satified + + if not_satified_count + satified_count == 0: + print('No line to cover', flush=True) + sys.exit(0) + + coverage_rate = ((satified_count) / (not_satified_count + satified_count) ) * 100 + print('='*20) + if coverage_rate < 50: + print(f'{ERROR_COLOR}Diff coverage rate is less than 50%: {coverage_rate:.1f}%{RESET_COLOR}', flush=True) + print('='*20) sys.exit(1) + else: + print(f'Diff coverage rate is {coverage_rate:.1f}%', flush=True) + print('='*20) + sys.exit(0)