From f880b7a1cff6a754ca41c7143e3a169fcd56c36d Mon Sep 17 00:00:00 2001 From: Jim DeFabia Date: Fri, 17 May 2024 10:37:10 -0400 Subject: [PATCH 001/151] HPCC-31739 Document changes in moving Unicode regex from ICU to PCRE2 Signed-off-by: Jim DeFabia --- .../ECLR_mods/BltInFunc-REGEXFIND.xml | 20 +++----- .../ECLR_mods/BltInFunc-REGEXFINDSET.xml | 20 +++----- .../ECLR_mods/BltInFunc-REGEXREPLACE.xml | 50 ++++++++----------- 3 files changed, 35 insertions(+), 55 deletions(-) diff --git a/docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-REGEXFIND.xml b/docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-REGEXFIND.xml index ed04a13c6db..d0d00e4c7e1 100644 --- a/docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-REGEXFIND.xml +++ b/docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-REGEXFIND.xml @@ -66,22 +66,16 @@ find matches. The regex must be a standard Perl regular expression Perl regular expression - . We use third-party libraries to support this, so for - non-unicode text, see the Perl-compatible Regular - Expressions (PCRE2) documentation at https://www.pcre.org/current/doc/html/pcre2pattern.html. - For unicode text, see the ICU docs, the sections - 'Regular Expression Metacharacters' and 'Regular Expression Operators' at - http://userguide.icu-project.org/strings/regexp - and the links from there, in particular the section 'UnicodeSet patterns' at - http://userguide.icu-project.org/strings/unicodeset. - We use version 2.6 which should support all listed features. + . + + We use a third-party library -- Perl-compatible Regular Expressions + (PCRE2) to support this. See https://www.pcre.org/current/doc/html/pcre2syntax.html + for details on the PCRE2 pattern syntax. Example: - namesRecord := RECORD + namesRecord := RECORD STRING20 surname; STRING10 forename; STRING10 userdate; diff --git a/docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-REGEXFINDSET.xml b/docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-REGEXFINDSET.xml index f3d128303f8..0e05ef7dcdf 100644 --- a/docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-REGEXFINDSET.xml +++ b/docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-REGEXFINDSET.xml @@ -54,25 +54,19 @@ find matches. The regex must be a standard Perl regular expression Perl regular expression - . We use third-party libraries to support this, so for - non-unicode text, see the Perl-compatible Regular - Expressions (PCRE2) documentation at https://www.pcre.org/current/doc/html/pcre2pattern.html. - For unicode text, see the ICU docs, the sections - 'Regular Expression Metacharacters' and 'Regular Expression Operators' at - http://userguide.icu-project.org/strings/regexp - and the links from there, in particular the section 'UnicodeSet patterns' at - http://userguide.icu-project.org/strings/unicodeset. - We use version 2.6 which should support all listed features. + . + + We use a third-party library -- Perl-compatible Regular Expressions + (PCRE2) to support this. See https://www.pcre.org/current/doc/html/pcre2syntax.html + for details on the PCRE2 pattern syntax. REGEXFINDSET ignores capture groups. REGEXFINDSET repeatedly extracts the text matching the entire regex pattern. Example: - sampleStr := + sampleStr := 'To: jane@example.com From: john@example.com This is the winter of our discontent.'; eMails:=REGEXFINDSET('\\w+@[a-zA-Z_]+?\\.[a-zA-Z]{2,3}' , sampleStr); OUTPUT(eMails); diff --git a/docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-REGEXREPLACE.xml b/docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-REGEXREPLACE.xml index 638d7e6ddb8..1e0e0aab910 100644 --- a/docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-REGEXREPLACE.xml +++ b/docs/EN_US/ECLLanguageReference/ECLR_mods/BltInFunc-REGEXREPLACE.xml @@ -64,22 +64,16 @@ string. The regex must be a standard Perl regular expression Perl regular expression - . We use third-party libraries to support this, so for - non-unicode text, see the Perl-compatible Regular - Expressions (PCRE2) documentation at https://www.pcre.org/current/doc/html/pcre2pattern.html. - For unicode text, see the ICU docs, the sections - 'Regular Expression Metacharacters' and 'Regular Expression Operators' at - http://userguide.icu-project.org/strings/regexp - and the links from there, in particular the section 'UnicodeSet patterns' at - http://userguide.icu-project.org/strings/unicodeset. - We use version 2.6 which should support all listed features. + . + + We use a third-party library -- Perl-compatible Regular Expressions + (PCRE2) to support this. See https://www.pcre.org/current/doc/html/pcre2syntax.html + for details on the PCRE2 pattern syntax. Example: - REGEXREPLACE('(.a)t', 'the cat sat on the mat', '$1p'); + REGEXREPLACE('(.a)t', 'the cat sat on the mat', '$1p'); //ASCII REGEXREPLACE(u'(.a)t', u'the cat sat on the mat', u'$1p'); //UNICODE @@ -87,28 +81,26 @@ REGEXREPLACE(u'(.a)t', u'the cat sat on the mat', u'$1p'); inrec := {STRING10 str, UNICODE10 ustr}; inset := DATASET([{'She', u'Eins'}, {'Sells', u'Zwei'}, -{'Sea', u'Drei'}, {'Shells', u'Vier'}], inrec); -outrec := {STRING10 orig, STRING10 withcase, STRING10 - wocase, -UNICODE10 uorig,UNICODE10 uwithcase,UNICODE10 uwocase}; + {'Sea', u'Drei'}, {'Shells', u'Vier'}], inrec); +outrec := {STRING10 orig, STRING10 withcase, STRING10 + wocase,UNICODE10 uorig,UNICODE10 uwithcase,UNICODE10 uwocase}; outrec trans(inrec l) := TRANSFORM -SELF.orig := l.str; -SELF.withcase := REGEXREPLACE('s', l.str, 'f'); -SELF.wocase := REGEXREPLACE('s', l.str, 'f', NOCASE); -SELF.uorig := l.ustr; -SELF.uwithcase := REGEXREPLACE(u'e', l.ustr, u'\u00EB'); -SELF.uwocase := REGEXREPLACE(u'e', l.ustr, u'\u00EB', - NOCASE); + SELF.orig := l.str; + SELF.withcase := REGEXREPLACE('s', l.str, 'f'); + SELF.wocase := REGEXREPLACE('s', l.str, 'f', NOCASE); + SELF.uorig := l.ustr; + SELF.uwithcase := REGEXREPLACE(u'e', l.ustr, u'\u00EB'); + SELF.uwocase := REGEXREPLACE(u'e', l.ustr, u'\u00EB',NOCASE); END; OUTPUT(PROJECT(inset, trans(LEFT))); /* the result set is: -orig withcase wocase uorig uwithcase uwocase -She She fhe Eins Eins \xc3\xabins -Sells Sellf fellf Zwei Zw\xc3\xabi Zw\xc3\xabi -Sea Sea fea Drei Dr\xc3\xabi Dr\xc3\xabi -Shells Shellf fhellf Vier Vi\xc3\xabr Vi\xc3\xabr */ +orig withcase wocase uorig uwithcase uwocase +She She fhe Eins Eins \xc3\xabins +Sells Sellf fellf Zwei Zw\xc3\xabi Zw\xc3\xabi +Sea Sea fea Drei Dr\xc3\xabi Dr\xc3\xabi +Shells Shellf fhellf Vier Vi\xc3\xabr Vi\xc3\xabr */ See Also: PARSE, Date: Wed, 22 May 2024 13:26:28 +0100 Subject: [PATCH 002/151] HPCC-31511 Avoid keeping the workunit graphs in memory Change default for 9.6 and beyond Signed-off-by: Richard Chapman --- common/workunit/workunit.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/workunit/workunit.cpp b/common/workunit/workunit.cpp index 2b7f41f0d22..d476a4f988f 100644 --- a/common/workunit/workunit.cpp +++ b/common/workunit/workunit.cpp @@ -10433,7 +10433,7 @@ IPropertyTree * CLocalWUGraph::getXGMMLTreeRaw() const return p->getPropTree("xgmml"); } -bool workunitGraphCacheEnabled = true; +bool workunitGraphCacheEnabled = false; IPropertyTree * CLocalWUGraph::getXGMMLTree(bool doMergeProgress, bool doFormatStats) const { From bfa2cce5d99069ba9ccd3015af4b6e5c16983f3a Mon Sep 17 00:00:00 2001 From: Jack Del Vecchio Date: Fri, 31 May 2024 13:03:31 +0000 Subject: [PATCH 003/151] HPCC-31979 Create new directories when writing Parquet files Signed-off-by: Jack Del Vecchio --- plugins/parquet/parquetembed.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/parquet/parquetembed.cpp b/plugins/parquet/parquetembed.cpp index 0494835173b..2349ea70530 100644 --- a/plugins/parquet/parquetembed.cpp +++ b/plugins/parquet/parquetembed.cpp @@ -612,6 +612,7 @@ arrow::Status ParquetWriter::openWriteFile() destination.insert(destination.find(".parquet"), std::to_string(activityCtx->querySlave())); } + recursiveCreateDirectoryForFile(destination.c_str()); std::shared_ptr outfile; PARQUET_ASSIGN_OR_THROW(outfile, arrow::io::FileOutputStream::Open(destination)); From 0403e49bc5dc127f1e8632eeab842b82ef0541b3 Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Wed, 22 May 2024 15:44:04 -0400 Subject: [PATCH 004/151] HPCC-31900 ECL Watch v9 global search DFU WU & File tabs Fix issue where from the global search results, switching to the DFU Workunits tab would show an empty list and switching to the Files tab would crash ECL Watch with an unhandled JavaScript exception Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- esp/src/eclwatch/GetDFUWorkunitsWidget.js | 4 --- .../eclwatch/templates/DFUQueryWidget.html | 2 +- esp/src/src-react/components/DFUWorkunits.tsx | 4 +-- esp/src/src/ESPDFUWorkunit.ts | 32 +++++++++++++++++-- esp/src/src/ESPLogicalFile.ts | 2 +- esp/src/src/FileSpray.ts | 2 ++ 6 files changed, 35 insertions(+), 11 deletions(-) diff --git a/esp/src/eclwatch/GetDFUWorkunitsWidget.js b/esp/src/eclwatch/GetDFUWorkunitsWidget.js index dc320468733..ea5285824ce 100644 --- a/esp/src/eclwatch/GetDFUWorkunitsWidget.js +++ b/esp/src/eclwatch/GetDFUWorkunitsWidget.js @@ -369,8 +369,6 @@ define([ initWorkunitsGrid: function () { var context = this; var filter = this.filter.toObject(); - filter.includeTimings = true; - filter.includeTransferRate = true; var store = this.params.searchResults ? this.params.searchResults : new ESPDFUWorkunit.CreateWUQueryStore(); this.workunitsGrid = new declare([ESPUtil.Grid(true, true, false, false, "GetDFUWorkunitsWidget")])({ store: store, @@ -477,8 +475,6 @@ define([ refreshGrid: function (clearSelection) { var filter = this.filter.toObject(); - filter.includeTimings = true; - filter.includeTransferRate = true; this.workunitsGrid.set("query", filter); if (clearSelection) { this.workunitsGrid.clearSelection(); diff --git a/esp/src/eclwatch/templates/DFUQueryWidget.html b/esp/src/eclwatch/templates/DFUQueryWidget.html index b45187e2be7..0eb44da4b31 100644 --- a/esp/src/eclwatch/templates/DFUQueryWidget.html +++ b/esp/src/eclwatch/templates/DFUQueryWidget.html @@ -69,7 +69,7 @@ - + diff --git a/esp/src/src-react/components/DFUWorkunits.tsx b/esp/src/src-react/components/DFUWorkunits.tsx index 39170270844..a8379f14aae 100644 --- a/esp/src/src-react/components/DFUWorkunits.tsx +++ b/esp/src/src-react/components/DFUWorkunits.tsx @@ -39,8 +39,6 @@ function formatQuery(_filter): { [id: string]: any } { if (filter.Type === true) { filter.Type = "archived workunits"; } - filter.includeTimings = true; - filter.includeTransferRate = true; return filter; } @@ -83,7 +81,7 @@ export const DFUWorkunits: React.FunctionComponent = ({ // Grid --- const gridStore = React.useMemo(() => { - return store || ESPDFUWorkunit.CreateWUQueryStore({}); + return store || ESPDFUWorkunit.CreateWUQueryStore(); }, [store]); const query = React.useMemo(() => { diff --git a/esp/src/src/ESPDFUWorkunit.ts b/esp/src/src/ESPDFUWorkunit.ts index f9d3fc7165c..3de6c1ba48a 100644 --- a/esp/src/src/ESPDFUWorkunit.ts +++ b/esp/src/src/ESPDFUWorkunit.ts @@ -8,6 +8,11 @@ import * as FileSpray from "./FileSpray"; import nlsHPCC from "./nlsHPCC"; import * as Utility from "./Utility"; +import { FileSprayService, FileSpray as FileSprayNS } from "@hpcc-js/comms"; + +import { Paged } from "./store/Paged"; +import { BaseStore } from "./store/Store"; + const i18n = nlsHPCC; class Store extends ESPRequest.Store { @@ -297,14 +302,37 @@ export function isInstanceOfWorkunit(obj) { export function Get(wuid, data?) { const store = new Store(); const retVal = store.get(wuid); - if (data) { + if (data && !retVal.__hpcc_id) { retVal.updateData(data); } return retVal; } -export function CreateWUQueryStore(options) { +export function CreateWUQueryStoreLegacy(options) { let store = new Store(options); store = new Observable(store); return store; } + +const service = new FileSprayService({ baseUrl: "" }); + +export type WUQueryStore = BaseStore; + +export function CreateWUQueryStore(): BaseStore { + const store = new Paged({ + start: "PageStartFrom", + count: "PageSize", + sortBy: "Sortby", + descending: "Descending" + }, "ID", request => { + request.includeTimings = true; + request.includeTransferRate = true; + return service.GetDFUWorkunits(request).then(response => { + return { + data: response.results.DFUWorkunit.map(wu => Get(wu.ID, wu)), + total: response.NumWUs + }; + }); + }); + return new Observable(store); +} diff --git a/esp/src/src/ESPLogicalFile.ts b/esp/src/src/ESPLogicalFile.ts index 62f7a8a8e54..24d7e78720d 100644 --- a/esp/src/src/ESPLogicalFile.ts +++ b/esp/src/src/ESPLogicalFile.ts @@ -472,7 +472,7 @@ export function Get(Cluster, Name, data?) { } const store = new Store(); const retVal = store.get(createID(Cluster, Name)); - if (data) { + if (data && !retVal.__hpcc_id) { lang.mixin(data, { __hpcc_id: createID(data.NodeGroup, data.Name), __hpcc_isDir: false, diff --git a/esp/src/src/FileSpray.ts b/esp/src/src/FileSpray.ts index 1dc36490c37..8263f04345f 100644 --- a/esp/src/src/FileSpray.ts +++ b/esp/src/src/FileSpray.ts @@ -327,6 +327,8 @@ export function CreateLandingZonesFilterStore(options) { } export function GetDFUWorkunits(params) { + params.request.includeTimings = true; + params.request.includeTransferRate = true; return ESPRequest.send("FileSpray", "GetDFUWorkunits", params); } From 0e2fcedea780c4712fa1472451e49e01bdc3c900 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Fri, 31 May 2024 16:15:05 +0100 Subject: [PATCH 005/151] Split off 9.0.114 Signed-off-by: Jake Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/localroxie.yaml.fixed | 4 ++-- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/roxie.yaml.fixed | 16 ++++++++-------- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 16 files changed, 35 insertions(+), 35 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index f510ae7ad10..e13e0b82478 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.0.113-closedown0 +version: 9.0.115-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.0.113-closedown0 +appVersion: 9.0.115-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index d9efa42079f..5e5f62840e2 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1314,7 +1314,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index cbb749ebc7b..503ce98f06a 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 annotations: checksum/config: {{ $configSHA }} spec: diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index dc7858734d1..b8f744494a1 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 6db713f6534..3cb50bb8770 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 0a4e6a6b28a..1633dc80d63 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -135,7 +135,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index c6b4409a292..a703430ed65 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -142,7 +142,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 6c08b88e79c..c7e902c158a 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 2ce4e023070..781c395abb3 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -117,7 +117,7 @@ spec: server: {{ .name | quote }} accessDali: "yes" app: {{ $application }} - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 3fc9e88cb61..447a38d6189 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml.fixed b/helm/hpcc/templates/localroxie.yaml.fixed index 22a619c9ed4..9cda81d8b8f 100644 --- a/helm/hpcc/templates/localroxie.yaml.fixed +++ b/helm/hpcc/templates/localroxie.yaml.fixed @@ -74,13 +74,13 @@ spec: accessDali: "yes" accessEsp: "yes" <<<<<<< HEAD - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 9c1a21f0f84..0662bd2c291 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -180,7 +180,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -242,7 +242,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -347,7 +347,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/roxie.yaml.fixed b/helm/hpcc/templates/roxie.yaml.fixed index b8af556380a..55a501816f0 100644 --- a/helm/hpcc/templates/roxie.yaml.fixed +++ b/helm/hpcc/templates/roxie.yaml.fixed @@ -126,7 +126,7 @@ spec: run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} <<<<<<< HEAD - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -134,7 +134,7 @@ spec: {{ toYaml $toposerver.labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -193,10 +193,10 @@ metadata: name: {{ $commonCtx.toponame | quote }} labels: <<<<<<< HEAD - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} ======= - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} >>>>>>> origin/candidate-9.6.x spec: @@ -260,7 +260,7 @@ spec: accessDali: "yes" accessEsp: "yes" <<<<<<< HEAD - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -269,7 +269,7 @@ spec: {{ toYaml $roxie.labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -379,7 +379,7 @@ spec: accessDali: "yes" accessEsp: "yes" <<<<<<< HEAD - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -387,7 +387,7 @@ spec: {{ toYaml $roxie.labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index cb8efb2c45b..f21fe53a9e3 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -52,7 +52,7 @@ spec: run: {{ $serviceName | quote }} server: {{ $serviceName | quote }} accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 3a18d9ae781..e889da23c4a 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -147,7 +147,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -214,7 +214,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -347,7 +347,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -412,7 +412,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.0.113-closedown0 + helmVersion: 9.0.115-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index cc126433607..a4c5c3ed115 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 0 ) -set ( HPCC_POINT 113 ) +set ( HPCC_POINT 115 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-05-23T17:16:46Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-05-31T15:15:05Z" ) ### From 907be4fcd071a532f7304fb02115f006baf6735a Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Fri, 31 May 2024 16:16:03 +0100 Subject: [PATCH 006/151] Split off 9.2.92 Signed-off-by: Jake Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index a6063fa3afd..9bb52d01957 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.2.91-closedown0 +version: 9.2.93-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.2.91-closedown0 +appVersion: 9.2.93-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index e00705a2b1f..0191fd5a91f 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1405,7 +1405,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 44f0c6243f2..7b73ab073ca 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index 876ad3508b0..0eda5998e8b 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 1174af19bd7..4c13930294f 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 7e9e10a5d76..056bf663efc 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -133,7 +133,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index ff50c624e24..305f2bea1d1 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -140,7 +140,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 8a34344ab9d..8d94e37fde9 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 148e95577c0..b9412020b9e 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -120,7 +120,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 159b0f2bb81..10f841e3ee7 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 5d512b8ec13..1983af2627f 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -178,7 +178,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -240,7 +240,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -346,7 +346,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 0aaa81fd5d7..2622fefb98f 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index bbb397994c2..7c0fbee21a3 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -145,7 +145,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -210,7 +210,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -341,7 +341,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -404,7 +404,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.2.91-closedown0 + helmVersion: 9.2.93-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 164d057070d..5626bf07968 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 2 ) -set ( HPCC_POINT 91 ) +set ( HPCC_POINT 93 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-05-23T17:20:41Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-05-31T15:16:03Z" ) ### From 92dab1725e320fca50aa1b7b055c60435bf125fe Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Fri, 31 May 2024 16:17:10 +0100 Subject: [PATCH 007/151] Split off 9.4.66 Signed-off-by: Jake Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index bdc71f2b09b..56229305ed3 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.4.65-closedown0 +version: 9.4.67-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.4.65-closedown0 +appVersion: 9.4.67-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 043b0ff4766..ec33dc62786 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1519,7 +1519,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 9ca7bc5e143..e4a30446fe2 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index 69cea36ac7d..bfc0e3e1540 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 14780166fdd..aeb366bd8bc 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 49b761982c8..96162e8bc8f 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -60,7 +60,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -137,7 +137,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index e8dc0a634b2..c1c96ab2bf1 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -143,7 +143,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 3f2c1d9c6d6..5e7880db618 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index a661da1969b..5d5b9c8f7e1 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -122,7 +122,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index e844550ea24..4ce7bde8f29 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 216a736a819..40be2c586f4 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 95698f31849..3235571027a 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 5806bfcd1fb..b0dcbbfb0f1 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -86,7 +86,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -151,7 +151,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -218,7 +218,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -351,7 +351,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -416,7 +416,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.4.65-closedown0 + helmVersion: 9.4.67-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index aa64c86cdaa..984982439dc 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 4 ) -set ( HPCC_POINT 65 ) +set ( HPCC_POINT 67 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-05-23T17:21:58Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-05-31T15:17:10Z" ) ### From 549d562c668ba9608523bb26dbc03e606e98443e Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Fri, 31 May 2024 16:18:09 +0100 Subject: [PATCH 008/151] Split off 9.6.18 Signed-off-by: Jake Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index f132cdfe59d..2e12be1ab82 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.6.17-closedown0 +version: 9.6.19-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.6.17-closedown0 +appVersion: 9.6.19-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 5d6291ecf01..ff08cdd0e7f 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1523,7 +1523,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 5b04cbedd25..8b7230b2acd 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index a47bf723a7d..bb93c173bab 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index b5f7b9fa0e3..5e061e18697 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 03b850cb4e5..ca4d4e7b8d4 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -139,7 +139,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 7b8362d151e..a3978595efb 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -147,7 +147,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 62ef3edcc94..540178fa8e6 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 411d67f80c4..f0bed901493 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -125,7 +125,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index c938671105c..37c03ea71fd 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 4f2df839f22..c3e555c856d 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 0c2efe892a5..e04067229d1 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 1b6e9a78a4e..5e14f86d71a 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -88,7 +88,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -153,7 +153,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -220,7 +220,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -353,7 +353,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -418,7 +418,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.6.17-closedown0 + helmVersion: 9.6.19-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 8603af30897..53bfbf37dd3 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 6 ) -set ( HPCC_POINT 17 ) +set ( HPCC_POINT 19 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-05-23T17:28:32Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-05-31T15:18:09Z" ) ### From 3b6892597a5d9ba34299618fa836324543129e4e Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Mon, 3 Jun 2024 10:24:50 +0100 Subject: [PATCH 009/151] HPCC-31982 Rename AtomicTimingStressTest class so the test can run Signed-off-by: Gavin Halliday --- testing/unittests/jlibtests.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testing/unittests/jlibtests.cpp b/testing/unittests/jlibtests.cpp index e935b67cb0e..5331e1eed7e 100644 --- a/testing/unittests/jlibtests.cpp +++ b/testing/unittests/jlibtests.cpp @@ -3095,9 +3095,9 @@ CPPUNIT_TEST_SUITE_NAMED_REGISTRATION(JlibIPTTest, "JlibIPTTest"); #include "jmutex.hpp" -class AtomicTimingTest : public CppUnit::TestFixture +class AtomicTimingStressTest : public CppUnit::TestFixture { - CPPUNIT_TEST_SUITE(AtomicTimingTest); + CPPUNIT_TEST_SUITE(AtomicTimingStressTest); CPPUNIT_TEST(runAllTests); CPPUNIT_TEST_SUITE_END(); @@ -3303,8 +3303,8 @@ class AtomicTimingTest : public CppUnit::TestFixture UInt64Array contendedTimes; }; -CPPUNIT_TEST_SUITE_REGISTRATION(AtomicTimingTest); -CPPUNIT_TEST_SUITE_NAMED_REGISTRATION(AtomicTimingTest, "AtomicTimingStressTest"); +CPPUNIT_TEST_SUITE_REGISTRATION(AtomicTimingStressTest); +CPPUNIT_TEST_SUITE_NAMED_REGISTRATION(AtomicTimingStressTest, "AtomicTimingStressTest"); //===================================================================================================================== From 2c1c7d28a67483fc420617d68799f015aa50fe9b Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Mon, 3 Jun 2024 11:58:39 +0100 Subject: [PATCH 010/151] HPCC-31982 Also add Mutex class to the timing stress tests Signed-off-by: Gavin Halliday --- testing/unittests/jlibtests.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/testing/unittests/jlibtests.cpp b/testing/unittests/jlibtests.cpp index 5331e1eed7e..c98b58521b5 100644 --- a/testing/unittests/jlibtests.cpp +++ b/testing/unittests/jlibtests.cpp @@ -3255,6 +3255,10 @@ class AtomicTimingStressTest : public CppUnit::TestFixture DO_TEST(CriticalSection, CriticalBlock, unsigned __int64, 2, 1); DO_TEST(CriticalSection, CriticalBlock, unsigned __int64, 5, 1); DO_TEST(CriticalSection, CriticalBlock, unsigned __int64, 1, 2); + DO_TEST(Mutex, synchronized, unsigned __int64, 1, 1); + DO_TEST(Mutex, synchronized, unsigned __int64, 2, 1); + DO_TEST(Mutex, synchronized, unsigned __int64, 5, 1); + DO_TEST(Mutex, synchronized, unsigned __int64, 1, 2); DO_TEST(SpinLock, SpinBlock, unsigned __int64, 1, 1); DO_TEST(SpinLock, SpinBlock, unsigned __int64, 2, 1); DO_TEST(SpinLock, SpinBlock, unsigned __int64, 5, 1); From 1d6a656bd501563ee146389f6af9562edddc920c Mon Sep 17 00:00:00 2001 From: Ken Rowland Date: Wed, 22 May 2024 15:22:31 -0400 Subject: [PATCH 011/151] HPCC-31556 HIDL compiler not honoring do_not_log attribute for non inline exceptions Added code to compile handling of do_not_log attribute for exceptions not handled inline Signed-Off-By: Kenneth Rowland kenneth.rowland@lexisnexisrisk.com --- tools/hidl/hidlcomp.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/hidl/hidlcomp.cpp b/tools/hidl/hidlcomp.cpp index 37e22303ae6..08cc6913654 100644 --- a/tools/hidl/hidlcomp.cpp +++ b/tools/hidl/hidlcomp.cpp @@ -4246,6 +4246,10 @@ void EspServInfo::write_esp_binding(const char *packagename) outf("\t\t\tif (clientVer!=-1.0 && clientVer<%s)\n", minVer.str()); outs("\t\t\t\tthrow MakeStringException(-1, \"Client version is too old, please update your client application.\");"); } + + if (mthi->getMetaInt("do_not_log",0)) + outs(2, "context.queryRequestParameters()->setProp(\"do_not_log\",1);\n"); + outs("\t\t\tresponse->set_status(SOAP_OK);\n"); if (servicefeatureurl.length() != 0) @@ -4653,6 +4657,8 @@ void EspServInfo::write_esp_binding(const char *packagename) { if (servicefeatureurl.length() != 0) outf("\t\t\tif(accessmap.ordinality()>0)\n\t\t\t\tonFeaturesAuthorize(context, accessmap, \"%s\", \"%s\");\n", name_, mthi->getName()); + if (mthi->getMetaInt("do_not_log",0)) + outf("\t\t\t\tcontext.queryRequestParameters()->setProp(\"do_not_log\",1);\n"); outf("\t\t\tiserv->on%s(*request->queryContext(), *esp_request.get(), *resp);\n", mthi->getName()); if (clearCacheGroupIDs.length() > 0) outf("\t\t\tclearCacheByGroupID(\"%s\");\n", clearCacheGroupIDs.str()); From ae8337ee1730d24a22822afc8e268aea23a88f4b Mon Sep 17 00:00:00 2001 From: Michael Gardner Date: Wed, 29 May 2024 11:35:46 -0400 Subject: [PATCH 012/151] HPCC-31901 Add Jfrog internal package releases to build-assets Signed-off-by: Michael Gardner --- .github/workflows/build-assets.yml | 43 ++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/.github/workflows/build-assets.yml b/.github/workflows/build-assets.yml index 195034d0efc..4fd56f61eb6 100644 --- a/.github/workflows/build-assets.yml +++ b/.github/workflows/build-assets.yml @@ -321,6 +321,29 @@ jobs: tag: ${{ needs.preamble.outputs.internal_tag }} artifacts: "${{ needs.preamble.outputs.folder_build }}/hpccsystems-*-internal*.deb,${{ needs.preamble.outputs.folder_build }}/hpccsystems-*-internal*.rpm" + - name: Upload Assets to Jfrog (debian internal) + if: ${{ matrix.ln && !matrix.container && contains(matrix.os, 'ubuntu') && github.repository_owner == 'hpcc-systems'}} + shell: bash + run: | + cd ${{ needs.preamble.outputs.folder_build }} + version=$(echo "${{ needs.preamble.outputs.internal_tag }}" | sed 's/internal_//') + packages=($(ls -1 hpccsystems-*.deb )) + for _package in ${packages[@]}; do + distribution=$( echo "${_package}" | sed "s/^.*${version}//" | awk -F '_' '{print $1;}' ) + curl -u${{ secrets.JFROG_USERNAME }}:${{ secrets.JFROG_PASSWORD }} -XPUT "https://${{ secrets.JFROG_REGISTRY }}/hpccpl-debian-local/pool/LN/${_package};deb.distribution=${distribution};deb.component=LN;deb.architecture=amd64" -T ${{ needs.preamble.outputs.folder_build }}/${_package} + done + + - name: Upload Assets to Jfrog (centos internal) + if: ${{ matrix.ln && !matrix.container && !contains(matrix.os, 'ubuntu') && github.repository_owner == 'hpcc-systems'}} + shell: bash + run: | + cd ${{ needs.preamble.outputs.folder_build }} + packages=($(ls -1 hpccsystems-*.rpm )) + for _package in ${packages[@]}; do + distribution=$( echo "${_package}" | awk -F '.' '{print $4;}' ) + curl -u${{ secrets.JFROG_USERNAME }}:${{ secrets.JFROG_PASSWORD }} -XPUT "https://${{ secrets.JFROG_REGISTRY }}/hpccpl-rpm-local/LN/${distribution}/x86_64/${_package}" -T ${{ needs.preamble.outputs.folder_build }}/${_package} + done + - name: Locate k8s deb file (internal) if: ${{ matrix.ln && matrix.container && !matrix.documentation }} id: ln-container @@ -470,6 +493,26 @@ jobs: tag: ${{ needs.preamble.outputs.internal_tag }} artifacts: "./build/hpccsystems-clienttools-internal*.exe,./build/hpccsystems-clienttools-internal*.msi,./build/hpccsystems-clienttools-internal*.dmg,./build/hpccsystems-clienttools-internal*.pkg,./build/hpccsystems-clienttools-internal*.tar.gz" + - name: Upload Assets to Jfrog (windows) + if: ${{ contains(matrix.os, 'windows') && github.repository_owner == 'hpcc-systems' }} + shell: bash + run: | + cd ./build + packages=($(ls -1 hpccsystems-*.exe )) + for _package in ${packages[@]}; do + curl -u${{ secrets.JFROG_USERNAME }}:${{ secrets.JFROG_PASSWORD }} "https://${{ secrets.JFROG_REGISTRY }}/hpccpl-windows-local/LN/windows/x86_64/${_package}" -T ${_package} + done + + - name: Upload Assets to Jfrog (macos) + if: ${{ contains(matrix.os, 'macos') && github.repository_owner == 'hpcc-systems' }} + shell: bash + run: | + cd ./build + packages=($(ls -1 hpccsystems-*.pkg )) + for _package in ${packages[@]}; do + curl -u${{ secrets.JFROG_USERNAME }}:${{ secrets.JFROG_PASSWORD }} "https://${{ secrets.JFROG_REGISTRY }}/hpccpl-macos-local/LN/macos/x86_64/${_package}" -T ${_package} + done + - name: Upload error logs if: ${{ failure() || cancelled() }} uses: actions/upload-artifact@v4 From 7b1115bc4481ebbc744a2e984c317fef445fdaa7 Mon Sep 17 00:00:00 2001 From: g-pan Date: Thu, 30 May 2024 17:19:38 -0400 Subject: [PATCH 013/151] HPCC-28781 Document PreferredReadPlanes Signed-off-by: g-pan --- .../ContainerizedMods/ConfigureValues.xml | 45 ++++++++++++++++--- 1 file changed, 40 insertions(+), 5 deletions(-) diff --git a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml index c4f60001e04..80e62634f7f 100644 --- a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml +++ b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml @@ -509,7 +509,7 @@ components - + @@ -839,8 +839,43 @@ format: ds := DATASET('~remote::hpcc2::somescope1::somelfn1', rec); + + + + Preferred Storage + + The preferredReadPlanes option is available + for each type of cluster--hThor, Thor, and Roxie. + + This option is only significant for logical files which reside + on multiple storage planes. When specified, the HPCC Systems platform + will seek to read files from the preferred plane(s) if a file resides + on them. These preferred planes must exist and be defined in + storage.planes + + The following is an example of a Thor cluster configured with + the preferredDataReadPlanes option. + + thor: +- name: thor + prefix: thor + numWorkers: 2 + maxJobs: 4 + maxGraphs: 3 + preferredDataReadPlanes: + - data-copy + - indexdata-copy + + + In the above example, running a query that reads a file that + resides on both "data" and "data-copy" (in that order) normally would + read the first copy on "data". With that + preferredDataReadPlanes specified, if that file + also resides on "data-copy", Thor will read that copy. - + This can be useful when there are multiple copies of files on + different planes with different characteristics that can impact + performance. @@ -1031,7 +1066,7 @@ thor: integrating applications in different domains. CORS defines how client web applications in one domain can interact with resources in another domain. You can configure CORS support settings in the ESP section of - the values.yaml file as illustrated below: + the values.yaml file as illustrated below: esp: - name: eclwatch @@ -1297,9 +1332,9 @@ thor: - + - + From eea9ede1a0ab61e0b3f94cff4869c25726870e82 Mon Sep 17 00:00:00 2001 From: Kunal Aswani Date: Wed, 20 Mar 2024 14:52:51 -0400 Subject: [PATCH 014/151] HPCC-31506 WuSnapShot Duplicate in Translation Files removed WuSnapshot to be replaced by WuSnapShot in translations files as well as the name change to be made in QuerySetDetailsWidget.html. Signed-off-by: Kunal Aswani --- esp/src/src/nls/es/hpcc.ts | 1 + esp/src/src/nls/hpcc.ts | 1 + 2 files changed, 2 insertions(+) diff --git a/esp/src/src/nls/es/hpcc.ts b/esp/src/src/nls/es/hpcc.ts index 06acad47549..9e2ac8135d5 100644 --- a/esp/src/src/nls/es/hpcc.ts +++ b/esp/src/src/nls/es/hpcc.ts @@ -1110,6 +1110,7 @@ export = { WSDL: "WSDL", WUID: "WUID", Wuidcannotbeempty: "Wuid no puede estar vacío.", + WuSnapshot: "Captura de Unidad de trabajo", WUSnapShot: "Captura de Unidad de trabajo", WUSnapshots: "Captura de Unidades de trabajo", XGMML: "XGMML", diff --git a/esp/src/src/nls/hpcc.ts b/esp/src/src/nls/hpcc.ts index df59de5add6..24874cee160 100644 --- a/esp/src/src/nls/hpcc.ts +++ b/esp/src/src/nls/hpcc.ts @@ -1131,6 +1131,7 @@ export = { WUID: "WUID", Wuidcannotbeempty: "Wuid Cannot Be Empty.", WUSnapshot: "WU Snapshot", + WUSnapShot: "WU Snapshot", WUSnapshots: "WU Snapshots", XGMML: "XGMML", XLS: "XLS", From 2f399ca9e56e12857c15e0853a6ad14843c714c9 Mon Sep 17 00:00:00 2001 From: Terrence Asselin Date: Fri, 31 May 2024 10:48:43 -0500 Subject: [PATCH 015/151] HPCC-31968 ECLWatch file upload buffer size increased to 1MB In HttpTransport, update readContentToBuffer function to read up to 1MB chunks directly into the MemoryBuffer rather than into a temporary stack buffer. Former 1K size likely cause of slow uploads to cloud, if not increased cost in some cases. Subsequent ticket will use configured preferred size per landing zone. Signed-off-by: Terrence Asselin --- esp/bindings/http/platform/httptransport.cpp | 31 ++++++++++---------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/esp/bindings/http/platform/httptransport.cpp b/esp/bindings/http/platform/httptransport.cpp index 6ffb35755c8..15113cccdd4 100644 --- a/esp/bindings/http/platform/httptransport.cpp +++ b/esp/bindings/http/platform/httptransport.cpp @@ -2123,22 +2123,23 @@ int CHttpRequest::processHeaders(IMultiException *me) bool CHttpRequest::readContentToBuffer(MemoryBuffer& buffer, __int64& bytesNotRead) { - char buf[1024 + 1]; - __int64 buflen = 1024; - if (buflen > bytesNotRead) - buflen = bytesNotRead; - - int readlen = m_bufferedsocket->read(buf, (int) buflen); - if(readlen < 0) - DBGLOG("Failed to read from socket"); - - if(readlen <= 0) - return false; - - buf[readlen] = 0; - buffer.append(readlen, buf);//'buffer' may have some left-over from previous read + constexpr size32_t readChunkSize = 0x100000; + size32_t sizeToRead = bytesNotRead > readChunkSize ? readChunkSize: (size32_t)bytesNotRead; + size32_t prevLen = buffer.length(); + + // BufferedSocket::read buffer must be at least one larger than its maxlen argument + char * target = (char *)buffer.reserve(sizeToRead + 1); + int readLen = m_bufferedsocket->read(target, sizeToRead); + if(readLen <= 0) + { + if(readLen < 0) + DBGLOG("Failed to read from socket"); + buffer.setLength(prevLen); + return false; + } - bytesNotRead -= readlen; + buffer.setLength(prevLen + readLen); + bytesNotRead -= readLen; return true; } From 9eb6e4a521e00dd2c0fcb4749183af767a9326aa Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Thu, 6 Jun 2024 12:38:39 +0100 Subject: [PATCH 016/151] HPCC-32001 Fix uninitialised variables in the new cache metrics Signed-off-by: Gavin Halliday --- system/jhtree/jhtree.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/system/jhtree/jhtree.cpp b/system/jhtree/jhtree.cpp index 292f2e1dcd5..024cbd59b4a 100644 --- a/system/jhtree/jhtree.cpp +++ b/system/jhtree/jhtree.cpp @@ -674,10 +674,10 @@ class CNodeMRUCache final : public CMRUCacheOf>> pNumDups = nullptr; std::shared_ptr>> pNumEvicts = nullptr; public: - RelaxedAtomic<__uint64> numHits; - RelaxedAtomic<__uint64> numAdds; - RelaxedAtomic<__uint64> numDups; - RelaxedAtomic<__uint64> numEvicts; + RelaxedAtomic<__uint64> numHits{0}; + RelaxedAtomic<__uint64> numAdds{0}; + RelaxedAtomic<__uint64> numDups{0}; + RelaxedAtomic<__uint64> numEvicts{0}; bool enabled = false; CNodeMRUCache(CacheType cacheType) { From 4a5338d96148266ab9868c22b69a7679b0329807 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Thu, 6 Jun 2024 18:27:55 +0100 Subject: [PATCH 017/151] HPCC-32010 Guard against kubectl check command failures (retry) Signed-off-by: Jake Smith --- system/jlib/jcontainerized.cpp | 48 ++++++++++++++++++++++++++-------- system/jlib/jcontainerized.hpp | 3 ++- 2 files changed, 39 insertions(+), 12 deletions(-) diff --git a/system/jlib/jcontainerized.cpp b/system/jlib/jcontainerized.cpp index e80154ed7e6..dfb0630d07f 100644 --- a/system/jlib/jcontainerized.cpp +++ b/system/jlib/jcontainerized.cpp @@ -215,7 +215,8 @@ bool applyYaml(const char *componentName, const char *wuid, const char *job, con } jobYaml.replaceString("_HPCC_ARGS_", args.str()); - runKubectlCommand(componentName, "kubectl replace --force -f -", jobYaml, nullptr); + // retrySecs=0 - I am not sure want to retry this command systematically.. + runKubectlCommand(componentName, "kubectl replace --force -f -", jobYaml, nullptr, 0); if (autoCleanup) { @@ -307,7 +308,7 @@ std::vector> getPodNodes(const char *selector) } } -void runKubectlCommand(const char *title, const char *cmd, const char *input, StringBuffer *output) +void runKubectlCommand(const char *title, const char *cmd, const char *input, StringBuffer *output, unsigned retrySecs) { #ifndef _CONTAINERIZED UNIMPLEMENTED_X("runKubectlCommand"); @@ -317,16 +318,41 @@ void runKubectlCommand(const char *title, const char *cmd, const char *input, St StringBuffer _output, error; if (!output) output = &_output; - unsigned ret = runExternalCommand(title, *output, error, cmd, input, ".", nullptr); - if (output->length()) - MLOG(MCdebugInfo, unknownJob, "%s: ret=%u, stdout=%s", cmd, ret, output->trimRight().str()); - if (error.length()) - MLOG(MCdebugError, unknownJob, "%s: ret=%u, stderr=%s", cmd, ret, error.trimRight().str()); - if (ret) + CTimeMon tm(retrySecs * 1000); + unsigned remainingMs = 0; + Owned exception; + while (true) { - if (input) - MLOG(MCdebugError, unknownJob, "Using input %s", input); - throw makeStringExceptionV(0, "Failed to run %s: error %u: %s", cmd, ret, error.str()); + try + { + unsigned ret = runExternalCommand(title, *output, error, cmd, input, ".", nullptr); + if (output->length()) + MLOG(MCdebugInfo, unknownJob, "%s: ret=%u, stdout=%s", cmd, ret, output->trimRight().str()); + if (error.length()) + MLOG(MCdebugError, unknownJob, "%s: ret=%u, stderr=%s", cmd, ret, error.trimRight().str()); + if (ret) + { + if (input) + MLOG(MCdebugError, unknownJob, "Using input %s", input); + throw makeStringExceptionV(0, "Failed to run %s: error %u: %s", cmd, ret, error.str()); + } + return; + } + catch (IException *e) + { + if (0 == retrySecs || tm.timedout(&remainingMs)) + throw; + exception.setown(e); + } + unsigned sleepMs = remainingMs; + // sleep for 10s (or remaining time) + if (sleepMs > 10000) + sleepMs = 10000; + VStringBuffer msg("retrying %s in %u ms", cmd, sleepMs); + OWARNLOG(exception, msg); + MilliSleep(sleepMs); + error.clear(); + output->clear(); } } diff --git a/system/jlib/jcontainerized.hpp b/system/jlib/jcontainerized.hpp index 065ea44518f..fab75264072 100644 --- a/system/jlib/jcontainerized.hpp +++ b/system/jlib/jcontainerized.hpp @@ -37,7 +37,8 @@ jlib_decl void waitJob(const char *componentName, const char *resourceType, cons jlib_decl bool applyYaml(const char *componentName, const char *wuid, const char *job, const char *resourceType, const std::list> &extraParams, bool optional, bool autoCleanup); jlib_decl void runJob(const char *componentName, const char *wuid, const char *job, const std::list> &extraParams={}); -extern jlib_decl void runKubectlCommand(const char *title, const char *cmd, const char *input, StringBuffer *output); +constexpr unsigned defaultKubectlRetrySecs = 60; +extern jlib_decl void runKubectlCommand(const char *title, const char *cmd, const char *input, StringBuffer *output, unsigned retrySecs=defaultKubectlRetrySecs); // return the k8s external host and port for serviceName extern jlib_decl std::pair getExternalService(const char *serviceName); From 720a901a2e4485dcf9c677fd8f7aec450d463cda Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Thu, 6 Jun 2024 19:10:14 +0100 Subject: [PATCH 018/151] HPCC-32014 Add .git to image.sh safe.directory Signed-off-by: Jake Smith --- dockerfiles/image.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/dockerfiles/image.sh b/dockerfiles/image.sh index 9ff1cda4a92..ecc9a34dd13 100755 --- a/dockerfiles/image.sh +++ b/dockerfiles/image.sh @@ -97,6 +97,7 @@ finalize_platform_core_image() { hpccsystems/platform-core:$GIT_BRANCH-$MODE-$crc "tail -f /dev/null") docker exec --user root --workdir /hpcc-dev $CONTAINER /bin/bash -c "rm -rf /hpcc-dev/HPCC-Platform && mkdir /hpcc-dev/HPCC-Platform && chown -R hpcc:hpcc /hpcc-dev/HPCC-Platform" docker exec --workdir /hpcc-dev $CONTAINER /bin/bash -c "git config --global --add safe.directory /hpcc-dev/HPCC-Platform-local" + docker exec --workdir /hpcc-dev $CONTAINER /bin/bash -c "git config --global --add safe.directory /hpcc-dev/HPCC-Platform-local/.git" docker exec --workdir /hpcc-dev $CONTAINER /bin/bash -c "git clone --single-branch file:///hpcc-dev/HPCC-Platform-local /hpcc-dev/HPCC-Platform" docker exec --workdir /hpcc-dev/HPCC-Platform $CONTAINER /bin/bash -c "git reset --hard --recurse-submodules" docker exec --workdir /hpcc-dev/HPCC-Platform-local $CONTAINER /bin/bash -c "git ls-files --modified --exclude-standard -z | xargs -0 -I {} cp {} /hpcc-dev/HPCC-Platform/{}" From f24321eb12d4bbb067941c7e6a2c79f1d2c381f3 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Thu, 6 Jun 2024 19:18:07 +0100 Subject: [PATCH 019/151] HPCC-32013 Allow dfuserver replicas to be set The dfuserver.yaml had a hardcoded replicas of 1, meaning any values yaml override of replicas was ignored. Signed-off-by: Jake Smith --- helm/hpcc/templates/dfuserver.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index aeb366bd8bc..2bd09732105 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -47,7 +47,7 @@ kind: Deployment metadata: name: {{ .name | quote }} spec: - replicas: 1 + replicas: {{ .replicas | default 1 }} selector: matchLabels: run: {{ .name | quote }} From 1c3642e0aad5562d2e5b182d7495b540ea73e07e Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Thu, 6 Jun 2024 19:50:24 +0100 Subject: [PATCH 020/151] HPCC-32015 Rationalize Dali operator messages Some (1 in particular) was inappropriately being output as a operator error ("Waiting for Dali ..") Others should have been operator warnings or errors, or disasters. Signed-off-by: Jake Smith --- dali/base/dasds.cpp | 52 ++++++++++++++++++++++---------------------- dali/base/dasess.cpp | 6 ++--- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/dali/base/dasds.cpp b/dali/base/dasds.cpp index a8e1a0e0b06..489612a06a8 100644 --- a/dali/base/dasds.cpp +++ b/dali/base/dasds.cpp @@ -1027,7 +1027,7 @@ void writeDelta(StringBuffer &xml, IFile &iFile, const char *msg="", unsigned re { exception.setown(e); StringBuffer s(msg); - IERRLOG(e, s.append("writeDelta, failed").str()); + OWARNLOG(e, s.append("writeDelta, failed").str()); } if (!exception.get()) break; @@ -1035,7 +1035,7 @@ void writeDelta(StringBuffer &xml, IFile &iFile, const char *msg="", unsigned re return; if (0 == --_retryAttempts) { - IWARNLOG("writeDelta, too many retry attempts [%d]", retryAttempts); + DISLOG("writeDelta, too many retry attempts [%d]", retryAttempts); return; } exception.clear(); @@ -1218,7 +1218,7 @@ class CDeltaWriter : implements IThreaded { exception.setown(e); StringBuffer err("Saving external (backup): "); - LOG(MCoperatorError, unknownJob, e, err.append(rL).str()); + OERRLOG(e, err.append(rL).str()); } if (!exception.get()) break; @@ -1254,7 +1254,7 @@ class CDeltaWriter : implements IThreaded { exception.setown(e); StringBuffer err("Removing external (backup): "); - LOG(MCoperatorWarning, unknownJob, e, err.append(rL).str()); + OWARNLOG(e, err.append(rL).str()); } if (!exception.get()) break; @@ -1492,7 +1492,7 @@ class CLegacyBinaryFileExternal : public CExternalFile, implements IExternalHand if (*_name) s.append("in property ").append(_name); Owned e = MakeSDSException(SDSExcpt_MissingExternalFile, "%s", filename.str()); - LOG(MCoperatorWarning, unknownJob, e, s.str()); + OWARNLOG(e, s.str()); if (withValue) { StringBuffer str("EXTERNAL BINARY FILE: \""); @@ -1574,7 +1574,7 @@ class CXMLFileExternal : public CExternalFile, implements IExternalHandler if (name && *name) s.append("in property ").append(name); Owned e = MakeSDSException(SDSExcpt_MissingExternalFile, "%s", filename.str()); - LOG(MCoperatorWarning, unknownJob, e, s.str()); + OWARNLOG(e, s.str()); StringBuffer str("EXTERNAL XML FILE: \""); str.append(filename.str()).append("\" MISSING"); tree.setown(createPTree(owner.queryName())); @@ -2102,7 +2102,7 @@ void CBinaryFileExternal::readValue(const char *name, MemoryBuffer &mb) { StringBuffer s("Missing external file "); Owned e = MakeSDSException(SDSExcpt_MissingExternalFile, "%s", filename.str()); - LOG(MCoperatorWarning, unknownJob, e, s.str()); + OWARNLOG(e, s.str()); StringBuffer str("EXTERNAL BINARY FILE: \""); str.append(filename.str()).append("\" MISSING"); CPTValue v(str.length()+1, str.str(), false); @@ -2148,7 +2148,7 @@ void CBinaryFileExternal::read(const char *name, IPropertyTree &owner, MemoryBuf if (*_name) s.append("in property ").append(_name); Owned e = MakeSDSException(SDSExcpt_MissingExternalFile, "%s", filename.str()); - LOG(MCoperatorWarning, unknownJob, e, s.str()); + OWARNLOG(e, s.str()); StringBuffer str("EXTERNAL BINARY FILE: \""); str.append(filename.str()).append("\" MISSING"); CPTValue v(str.length()+1, str.str(), false); @@ -2597,7 +2597,7 @@ class CServerRemoteTree : public CRemoteTreeBase try { SDSManager->deleteExternal(index); } catch (IException *e) { - LOG(MCoperatorWarning, unknownJob, e, StringBuffer("Deleting external reference for ").append(queryName()).str()); + OWARNLOG(e, StringBuffer("Deleting external reference for ").append(queryName()).str()); e->Release(); } } @@ -4723,7 +4723,7 @@ void CSDSTransactionServer::processMessage(CMessageBuffer &mb) mb.append(e->errorMessage(s).str()); StringBuffer clientUrl("EXCEPTION in reply to client "); mb.getSender().getEndpointHostText(clientUrl); - LOG(MCoperatorError, unknownJob, e); + OERRLOG(e); } try { CheckTime block10("DAMP_REQUEST reply"); @@ -4959,7 +4959,7 @@ IPropertyTree *loadStore(const char *storeFilename, unsigned edition, IPTreeMake catch (DALI_CATCHALL) { IException *e = MakeStringException(0, "Unknown exception - loading store file : %s", storeFilename); - LOG(MCoperatorDisaster, unknownJob, e, ""); + DISLOG(e); if (!logErrorsOnly) throw; e->Release(); @@ -5109,7 +5109,7 @@ class CLightCoalesceThread : implements ICoalesce, public CInterface t += idlePeriodSecs; if (t/3600 >= STORENOTSAVE_WARNING_PERIOD && ((t-lastWarning)/3600>(STORENOTSAVE_WARNING_PERIOD/2))) { - OWARNLOG("Store has not been saved for %d hours", t/3600); + OERRLOG("Store has not been saved for %d hours", t/3600); lastWarning = t; } } @@ -5662,7 +5662,7 @@ class CStoreHelper : implements IStoreHelper, public CInterface } catch (IException *e) { - OERRLOG(e, "Exception(1) - Error saving store file"); + DISLOG(e, "Exception(1) - Error saving store file"); iFileIOTmpStore.clear(); iFileTmpStore->remove(); throw; @@ -5675,7 +5675,7 @@ class CStoreHelper : implements IStoreHelper, public CInterface refreshStoreInfo(); if (storeInfo.edition != edition) { - OWARNLOG("Another process has updated the edition whilst saving the store: %s", newStoreNamePath.str()); + WARNLOG("Another process has updated the edition whilst saving the store: %s", newStoreNamePath.str()); iFileTmpStore->remove(); return; } @@ -5701,7 +5701,7 @@ class CStoreHelper : implements IStoreHelper, public CInterface catch (IException *e) { StringBuffer s("Exception(2) - Error saving store file"); - OERRLOG(e, s.str()); + DISLOG(e, s.str()); e->Release(); return; } @@ -5710,7 +5710,7 @@ class CStoreHelper : implements IStoreHelper, public CInterface try { renameDelta(edition, newEdition, remoteBackupLocation); } catch (IException *e) { - LOG(MCoperatorError, unknownJob, e, "Failure handling backup"); + OERRLOG(e, "Failure handling backup"); e->Release(); } } @@ -5740,7 +5740,7 @@ class CStoreHelper : implements IStoreHelper, public CInterface catch (IException *e) { StringBuffer s; - LOG(MCoperatorError, unknownJob, e, s.append("Failure to backup dali to remote location: ").append(remoteBackupLocation)); + OERRLOG(e, s.append("Failure to backup dali to remote location: ").append(remoteBackupLocation)); e->Release(); } @@ -5753,7 +5753,7 @@ class CStoreHelper : implements IStoreHelper, public CInterface catch (IException *e) { StringBuffer s("Exception(3) - Error saving store file"); - OERRLOG(e, s.str()); + DISLOG(e, s.str()); e->Release(); } if (done) @@ -6484,12 +6484,12 @@ void CCovenSDSManager::loadStore(const char *storeName, const bool *abort) } catch (IException *e) { - OERRLOG(e, "Exception - Failed to load main store"); + DISLOG(e, "Exception - Failed to load main store"); throw; } catch (DALI_CATCHALL) { - OERRLOG("Unknown exception - Failed to load main store"); + DISLOG("Unknown exception - Failed to load main store"); throw; } @@ -6503,7 +6503,7 @@ void CCovenSDSManager::loadStore(const char *storeName, const bool *abort) if (remoteBackupLocation.length()) { try { validateBackup(); } - catch (IException *e) { LOG(MCoperatorError, unknownJob, e, "Validating backup"); e->Release(); } + catch (IException *e) { OERRLOG(e, "Validating backup"); e->Release(); } StringBuffer deltaFilename(dataPath); iStoreHelper->getCurrentDeltaFilename(deltaFilename); @@ -8700,7 +8700,7 @@ bool CCovenSDSManager::fireException(IException *e) { if (handled) { - LOG(MCoperatorDisaster, unknownJob, e, "FATAL, too many exceptions"); + DISLOG(e, "FATAL, too many exceptions"); return false; // did not successfully handle. } IERRLOG(e, "Exception while restarting or shutting down"); @@ -8737,7 +8737,7 @@ bool CCovenSDSManager::fireException(IException *e) } manager.unhandledThread.clear(); } - catch (IException *_e) { LOG(MCoperatorError, unknownJob, _e, "Exception while restarting or shutting down"); _e->Release(); } + catch (IException *_e) { OERRLOG(_e, "Exception while restarting or shutting down"); _e->Release(); } catch (DALI_CATCHALL) { IERRLOG("Unknown exception while restarting or shutting down"); } if (!restart) { @@ -8846,7 +8846,7 @@ bool CDeltaWriter::save(std::queue> &todo) } catch (IException *e) { - LOG(MCoperatorWarning, unknownJob, e, "save: failed to touch delta in progress file"); + OERRLOG(e, "save: failed to touch delta in progress file"); e->Release(); } // here if exception only @@ -8909,14 +8909,14 @@ bool CDeltaWriter::save(std::queue> &todo) } catch (IException *e) { - LOG(MCoperatorWarning, unknownJob, e, "save: failure whilst committing deltas to disk! Remedial action must be taken"); + OERRLOG("save: failure whilst committing deltas to disk! Remedial action must be taken"); e->Release(); // this is really an attempt at disaster recovery at this point forceBlockingSave = true; } if (forceBlockingSave) { - LOG(MCoperatorWarning, unknownJob, "Due to earlier failures, attempting forced/blocking save of Dali store"); + OWARNLOG("Due to earlier failures, attempting forced/blocking save of Dali store"); while (todo.size()) todo.pop(); SDSManager->saveStore(nullptr, false, false); diff --git a/dali/base/dasess.cpp b/dali/base/dasess.cpp index ba4a1e58b57..07b0d2568d7 100644 --- a/dali/base/dasess.cpp +++ b/dali/base/dasess.cpp @@ -1963,11 +1963,11 @@ bool registerClientProcess(ICommunicator *comm, IGroup *& retcoven,unsigned time return true; } } - StringBuffer str; - OERRLOG("Waiting for Dali to be available - server: %s", comm->queryGroup().queryNode(r).endpoint().getEndpointHostText(str).str()); + StringBuffer str("Waiting for Dali to be available - server: "); + comm->queryGroup().queryNode(r).endpoint().getEndpointHostText(str); if (tm.timedout()) { - PROGLOG("%s", str.append(" Timed out.").str()); + OWARNLOG("%s", str.append(" Timed out.").str()); break; } else if (0 == nextLog) From f777a96a575d52715dd1a463958b4dd23b041ece Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Thu, 30 May 2024 15:15:00 +0100 Subject: [PATCH 021/151] HPCC-31966 Add internal spans for graph execution within roxie Signed-off-by: Gavin Halliday --- roxie/ccd/ccdcontext.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/roxie/ccd/ccdcontext.cpp b/roxie/ccd/ccdcontext.cpp index dc6d800b848..e064a6552cb 100644 --- a/roxie/ccd/ccdcontext.cpp +++ b/roxie/ccd/ccdcontext.cpp @@ -1658,6 +1658,7 @@ class CRoxieContextBase : implements IRoxieAgentContext, implements ICodeContext } else { + OwnedSpanScope graphScope = queryThreadedActiveSpan()->createInternalSpan(name); ProcessInfo startProcessInfo; if (workUnit || statsWu) startProcessInfo.update(ReadAllInfo); From de18896b7ba9a114493aeceec92d89a02effe47d Mon Sep 17 00:00:00 2001 From: Shamser Ahmed Date: Mon, 3 Jun 2024 14:09:34 +0100 Subject: [PATCH 022/151] HPCC-31983 Make iFile member of CFileOwner Linked rather than Owned Signed-off-by: Shamser Ahmed --- thorlcr/activities/hashdistrib/thhashdistribslave.cpp | 2 +- thorlcr/activities/lookupjoin/thlookupjoinslave.cpp | 5 +++-- thorlcr/thorutil/thmem.cpp | 2 +- thorlcr/thorutil/thormisc.hpp | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/thorlcr/activities/hashdistrib/thhashdistribslave.cpp b/thorlcr/activities/hashdistrib/thhashdistribslave.cpp index 5f543423889..6fcc98f6903 100644 --- a/thorlcr/activities/hashdistrib/thhashdistribslave.cpp +++ b/thorlcr/activities/hashdistrib/thhashdistribslave.cpp @@ -2727,7 +2727,7 @@ class CSpill : implements IRowWriter, public CSimpleInterface prefix.append(bucketN).append('_').append(desc); GetTempFilePath(tempname, prefix.str()); OwnedIFile iFile = createIFile(tempname.str()); - spillFile.setown(new CFileOwner(iFile.getLink(), tempFileSizeTracker)); + spillFile.setown(new CFileOwner(iFile, tempFileSizeTracker)); if (owner.getOptBool(THOROPT_COMPRESS_SPILLS, true)) { rwFlags |= rw_compress; diff --git a/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp b/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp index 47d4835213f..9465afc6a76 100644 --- a/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp +++ b/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp @@ -1878,7 +1878,8 @@ class CLookupJoinActivityBase : public CInMemJoinBase temp = createIFile(tempName.str()); + file.setown(new CFileOwner(temp)); VStringBuffer spillPrefixStr("clearAllNonLocalRows(%d)", SPILL_PRIORITY_SPILLABLE_STREAM); // 3rd param. is skipNulls = true, the row arrays may have had the non-local rows delete already. rows.save(file->queryIFile(), spillCompInfo, true, spillPrefixStr.str()); // saves committed rows @@ -2944,7 +2945,7 @@ class CLookupJoinActivityBase : public CInMemJoinBase iFile = createIFile(tempName.str()); VStringBuffer spillPrefixStr("%sRowCollector(%d)", tracingPrefix.str(), spillPriority); spillableRows.save(*iFile, spillCompInfo, false, spillPrefixStr.str()); // saves committed rows - spillFiles.append(new CFileOwner(iFile.getLink())); + spillFiles.append(new CFileOwner(iFile)); ++overflowCount; statOverflowCount.fastAdd(1); // NB: this is total over multiple uses of this class statSizeSpill.fastAdd(iFile->size()); diff --git a/thorlcr/thorutil/thormisc.hpp b/thorlcr/thorutil/thormisc.hpp index 73d57dbea31..2bc8f0c776d 100644 --- a/thorlcr/thorutil/thormisc.hpp +++ b/thorlcr/thorutil/thormisc.hpp @@ -340,7 +340,7 @@ class CFileSizeTracker: public CInterface // simple class which takes ownership of the underlying file and deletes it on destruction class graph_decl CFileOwner : public CSimpleInterface, implements IInterface { - OwnedIFile iFile; + Linked iFile; Linked fileSizeTracker; offset_t fileSize = 0; public: From df9c5cfd30d942638e8309701f67ba2986681946 Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Fri, 7 Jun 2024 10:33:54 -0400 Subject: [PATCH 023/151] HPCC-32020 ECL Watch v9 fix WU details ECL tab file list links Fixes an issue in ECL Watch v9 where clicking on files in the ECL tab of the Workunit details page would go to an unknown url, causing a JS exception instead of displaying the contents of the selected file. Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- esp/src/src-react/components/WorkunitDetails.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/esp/src/src-react/components/WorkunitDetails.tsx b/esp/src/src-react/components/WorkunitDetails.tsx index d75f611490b..6d790b184a8 100644 --- a/esp/src/src-react/components/WorkunitDetails.tsx +++ b/esp/src/src-react/components/WorkunitDetails.tsx @@ -221,7 +221,7 @@ export const WorkunitDetails: React.FunctionComponent = ({ - + From 17b49b1f8e6abd510a67ee27dc777b2e468feeb1 Mon Sep 17 00:00:00 2001 From: Terrence Asselin Date: Tue, 4 Jun 2024 11:49:32 -0500 Subject: [PATCH 024/151] HPCC-31936 Fix WsEcl sample XML, WSDL and XSD features Adjust EspHttpBinding and CWsEclBinding to use info from the roxie to generate WSDL/XSD and sample request/response XML rather than attempting to use ESDL. This means moving the old implmenetation of several functions from prior to HPCC-28978 from EspHttpBinding into CWsEclBinding. Specifically: - getSchema is overridden in CWsEclBinding, and called when the ESDL-based generation isn't applicable (there is no xmlServiceFileName). - getWsdlOrXsd is moved into CWsEclBinding, and the onXSD/onWsdl handlers in CWsEclBinding call it directly. Noted that the non-overridden CWsEclBinding member function getSchema was substantially similar to the overridden version, so renamed it getSimpleSchema and made it protected. Created ticket HPCC-32016 to refactor that code. Signed-off-by: Terrence Asselin --- esp/bindings/http/platform/httpbinding.cpp | 9 +- esp/services/ws_ecl/ws_ecl_service.cpp | 178 ++++++++++++++++++++- esp/services/ws_ecl/ws_ecl_service.hpp | 8 +- 3 files changed, 183 insertions(+), 12 deletions(-) diff --git a/esp/bindings/http/platform/httpbinding.cpp b/esp/bindings/http/platform/httpbinding.cpp index 884eb0d967f..c5a9e831844 100644 --- a/esp/bindings/http/platform/httpbinding.cpp +++ b/esp/bindings/http/platform/httpbinding.cpp @@ -1764,7 +1764,9 @@ void EspHttpBinding::getServiceSchema(IEspContext& context, CHttpRequest* reque StringBuffer xmlFilename; if (!getServiceXmlFilename(xmlFilename)) { - throw MakeStringException(-1, "Unable to get service XML filename"); + // Allow subclassed specialized implementation that doesn't use ESDL + getSchema(schema, context, request, serviceQName, methodQName, true); + return; } StringBuffer nstr; @@ -1841,8 +1843,7 @@ int EspHttpBinding::getServiceWsdlOrXsd(IEspContext &context, CHttpRequest* requ } StringBuffer schema; - getServiceSchema(context, request, serviceQName, methodQName, - version, isWsdl, false, schema); + getServiceSchema(context, request, serviceQName, methodQName, version, isWsdl, false, schema); response->setContent(schema.length(), schema.str()); response->setContentType(HTTP_TYPE_APPLICATION_XML_UTF8); @@ -1937,7 +1938,7 @@ void EspHttpBinding::generateSampleXml(bool isRequest, IEspContext &context, CHt content.appendf("generateSampleXml schema error: %s::%s", serv, method); return; } - + getServiceSchema(context, request, serviceQName, methodQName, getVersion(context), false, false, schemaXml); Owned schema; diff --git a/esp/services/ws_ecl/ws_ecl_service.cpp b/esp/services/ws_ecl/ws_ecl_service.cpp index 9a2db171a3d..6856305cc8e 100644 --- a/esp/services/ws_ecl/ws_ecl_service.cpp +++ b/esp/services/ws_ecl/ws_ecl_service.cpp @@ -1403,7 +1403,7 @@ int CWsEclBinding::getXsdDefinition(IEspContext &context, CHttpRequest *request, } -bool CWsEclBinding::getSchema(StringBuffer& schema, IEspContext &ctx, CHttpRequest* req, WsEclWuInfo &wsinfo) +bool CWsEclBinding::getSimpleSchema(StringBuffer& schema, IEspContext &ctx, CHttpRequest* req, WsEclWuInfo &wsinfo) { Owned namespaces = createPTree(); appendSchemaNamespaces(namespaces, ctx, req, wsinfo); @@ -1453,6 +1453,94 @@ bool CWsEclBinding::getSchema(StringBuffer& schema, IEspContext &ctx, CHttpReque return true; } +// Moved from the prior implementation in EspHttpBinding which now relies on ESDL to generate the schema. +// However, since WsEcl is acting a front-end for roxie queries, it needs this custom implementation +// that uses the information from the roxie instead of ESDL. +bool CWsEclBinding::getSchema(StringBuffer& schema, IEspContext &ctx, CHttpRequest* req, const char *service, const char *method,bool standalone) +{ + StringBuffer serviceQName; + StringBuffer methodQName; + + if (!qualifyServiceName(ctx, service, method, serviceQName, &methodQName)) + return false; + + const char *sqName = serviceQName.str(); + const char *mqName = methodQName.str(); + + Owned namespaces = createPTree(); + appendSchemaNamespaces(namespaces, ctx, req, service, method); + Owned nsiter = namespaces->getElements("namespace"); + + StringBuffer nstr; + generateNamespace(ctx, req, sqName, mqName, nstr); + schema.appendf("query(); + schema.appendf(" xmlns:%s=\"%s\"", ns.queryProp("@nsvar"), ns.queryProp("@ns")); + } + schema.append(">\n"); + ForEach(*nsiter) + { + IPropertyTree &ns = nsiter->query(); + if (ns.hasProp("@import")) + schema.appendf("", ns.queryProp("@ns"), ns.queryProp("@location")); + } + + + schema.append( + "" + "" + "" + "" + "" + "" + "" + "\n" + "" + "" + "" + "" + "" + "\n" + "\n" + ); + + if (ctx.queryOptions()&ESPCTX_WSDL_EXT) + { + schema.append( + "" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "" + "\n" + ); + } + + bool mda=(req->queryParameters()->getPropInt("mda")!=0); + getXsdDefinition(ctx, req, schema, sqName, mqName, mda); + schema.append("\n"); + schema.append(""); + return true; +} + int CWsEclBinding::getGenForm(IEspContext &context, CHttpRequest* request, CHttpResponse* response, WsEclWuInfo &wuinfo, bool box) { IConstWorkUnit *wu = wuinfo.ensureWorkUnit(); @@ -1513,7 +1601,7 @@ int CWsEclBinding::getGenForm(IEspContext &context, CHttpRequest* request, CHttp } } else - getSchema(formxml, context, request, wuinfo); + getSimpleSchema(formxml, context, request, wuinfo); formxml.append(""); if (web) @@ -1621,7 +1709,7 @@ void CWsEclBinding::getWsEcl2XmlRequest(StringBuffer& soapmsg, IEspContext &cont element.append(wsinfo.queryname.str()).append("Request"); StringBuffer schemaXml; - getSchema(schemaXml, context, request, wsinfo); + getSimpleSchema(schemaXml, context, request, wsinfo); ESPLOG(LogMax,"request schema: %s", schemaXml.str()); Owned schema = createXmlSchemaFromString(schemaXml); if (schema.get()) @@ -1657,7 +1745,7 @@ void CWsEclBinding::getWsEclJsonRequest(StringBuffer& jsonmsg, IEspContext &cont element.append("Request"); StringBuffer schemaXml; - getSchema(schemaXml, context, request, wsinfo); + getSimpleSchema(schemaXml, context, request, wsinfo); ESPLOG(LogMax,"request schema: %s", schemaXml.str()); Owned schema = createXmlSchemaFromString(schemaXml); if (schema.get()) @@ -2290,7 +2378,7 @@ int CWsEclBinding::getWsdlBindings(IEspContext &context, CHttpRequest *request, int CWsEclBinding::onGetWsdl(IEspContext &context, CHttpRequest* request, CHttpResponse* response, WsEclWuInfo &wsinfo) { context.setBindingValue(&wsinfo); - EspHttpBinding::onGetWsdl(context, request, response, wsinfo.qsetname.str(), wsinfo.queryname.str()); + getWsdlOrXsd(context, request, response, wsinfo.qsetname.str(), wsinfo.queryname.str(), true); context.setBindingValue(NULL); return 0; } @@ -2298,12 +2386,88 @@ int CWsEclBinding::onGetWsdl(IEspContext &context, CHttpRequest* request, CHttpR int CWsEclBinding::onGetXsd(IEspContext &context, CHttpRequest* request, CHttpResponse* response, WsEclWuInfo &wsinfo) { context.setBindingValue(&wsinfo); - EspHttpBinding::onGetXsd(context, request, response, wsinfo.qsetname.str(), wsinfo.queryname.str()); + getWsdlOrXsd(context, request, response, wsinfo.qsetname.str(), wsinfo.queryname.str(), false); context.setBindingValue(NULL); return 0; } +int CWsEclBinding::getWsdlOrXsd(IEspContext &context, CHttpRequest* request, CHttpResponse* response, const char *service, const char *method, bool isWsdl) +{ + bool mda=(request->queryParameters()->getPropInt("mda")!=0); + try + { + StringBuffer serviceQName; + StringBuffer methodQName; + + if (!qualifyServiceName(context, service, method, serviceQName, &methodQName)) + { + return onGetNotFound(context, request, response, service); + } + else + { + const char *sqName = serviceQName.str(); + const char *mqName = methodQName.str(); + StringBuffer ns; + generateNamespace(context, request, serviceQName.str(), methodQName.str(), ns); + + StringBuffer content(""); + if (context.queryRequestParameters()->hasProp("display")) + content.append(""); + else if (isWsdl && context.queryRequestParameters()->hasProp("wsdlviewer")) + content.append(""); + if (isWsdl) + { + content.appendf("", ns.str(), ns.str()); + content.append(""); + } + + getSchema(content,context,request,service,method,!isWsdl); + + if (isWsdl) + { + content.append(""); + + getWsdlMessages(context, request, content, sqName, mqName, mda); + getWsdlPorts(context, request, content, sqName, mqName, mda); + getWsdlBindings(context, request, content, sqName, mqName, mda); + + StringBuffer location(getWsdlAddress()); + if (request->queryParameters()->hasProp("wsdl_destination_path")) + location.append(request->queryParameters()->queryProp("wsdl_destination_path")); + else + location.append('/').append(sqName).appendf("?ver_=%g", context.getClientVersion()); + + if (request->queryParameters()->hasProp("encode_results")) + { + const char *encval = request->queryParameters()->queryProp("encode_results"); + location.append("&").appendf("encode_=%s", (encval && *encval) ? encval : "1"); + } + + content.appendf("", sqName); + content.appendf("", sqName, sqName); + content.appendf("", location.str()); + content.append(""); + content.append(""); + content.append(""); + } + + response->setContent(content.length(), content.str()); + response->setContentType(HTTP_TYPE_APPLICATION_XML_UTF8); + response->setStatus(HTTP_STATUS_OK); + } + } + catch (IException *e) + { + return onGetException(context, request, response, *e); + } + + response->send(); + return 0; +} + int CWsEclBinding::getWsEclDefinition(CHttpRequest* request, CHttpResponse* response, const char *thepath) { @@ -2423,7 +2587,7 @@ int CWsEclBinding::getRestURL(IEspContext *ctx, CHttpRequest *request, CHttpResp StringBuffer schemaXml; - getSchema(schemaXml, *ctx, request, wsinfo); + getSimpleSchema(schemaXml, *ctx, request, wsinfo); Owned schema = createXmlSchemaFromString(schemaXml); if (schema.get()) { diff --git a/esp/services/ws_ecl/ws_ecl_service.hpp b/esp/services/ws_ecl/ws_ecl_service.hpp index 8d3ce45e1ab..2a8e9edbe82 100644 --- a/esp/services/ws_ecl/ws_ecl_service.hpp +++ b/esp/services/ws_ecl/ws_ecl_service.hpp @@ -140,6 +140,12 @@ class CWsEclBinding : public CHttpSoapBinding private: CWsEclService *wsecl; +protected: + bool getSchema(StringBuffer& schema, IEspContext &ctx, CHttpRequest* req, const char *service, const char *method,bool standalone) override; + int getWsdlOrXsd(IEspContext &context, CHttpRequest* request, CHttpResponse* response, const char *service, const char *method, bool isWsdl); + // Does not provide all the flexibility of the getSchema override. Consider refactoring out to use getSchema in its place. + bool getSimpleSchema(StringBuffer& schema, IEspContext &ctx, CHttpRequest* req, WsEclWuInfo &wsinfo) ; + public: CWsEclBinding(IPropertyTree *cfg, const char *bindname, const char *procname) : CHttpSoapBinding(cfg, bindname, procname), wsecl(NULL) @@ -199,7 +205,7 @@ class CWsEclBinding : public CHttpSoapBinding bool qualifyServiceName(IEspContext &context, const char *servname, const char *methname, StringBuffer &servQName, StringBuffer *methQName){servQName.clear().append(servname); if (methQName) methQName->clear().append(methname); return true;} int getXsdDefinition(IEspContext &context, CHttpRequest *request, StringBuffer &content, WsEclWuInfo &wsinfo); - bool getSchema(StringBuffer& schema, IEspContext &ctx, CHttpRequest* req, WsEclWuInfo &wsinfo) ; + void appendSchemaNamespaces(IPropertyTree *namespaces, IEspContext &ctx, CHttpRequest* req, WsEclWuInfo &wsinfo); void appendSchemaNamespaces(IPropertyTree *namespaces, IEspContext &ctx, CHttpRequest* req, const char *service, const char *method); From b507640173bd3a27131349272b4800ee128f0fa0 Mon Sep 17 00:00:00 2001 From: Rodrigo Pastrana Date: Fri, 17 May 2024 17:52:41 -0400 Subject: [PATCH 025/151] HPCC-31886 LogAccess Trace/Span ID filtering - Exposes trace/span filter logic in wslogaccess - Adds logic generate Trace/span id filter commands Signed-off-by: Rodrigo Pastrana --- esp/scm/ws_logaccess.ecm | 14 +++++-- .../ws_logaccess/WsLogAccessService.cpp | 15 ++++++- system/jlib/jlog.cpp | 10 +++++ system/jlib/jlog.hpp | 14 +++++++ .../AzureLogAnalyticsCurlClient.cpp | 32 +++++++++++++++ .../AzureLogAnalyticsCurlClient.hpp | 7 ++++ .../ElasticStack/ElasticStackLogAccess.cpp | 40 +++++++++++++++++++ .../ElasticStack/ElasticStackLogAccess.hpp | 6 +++ 8 files changed, 133 insertions(+), 5 deletions(-) diff --git a/esp/scm/ws_logaccess.ecm b/esp/scm/ws_logaccess.ecm index 71717e9ada4..bd214a668fa 100644 --- a/esp/scm/ws_logaccess.ecm +++ b/esp/scm/ws_logaccess.ecm @@ -25,7 +25,9 @@ ESPenum LogAccessType : int BySourceInstance(5, "BySourceInstance"), BySourceNode(6, "BySourceNode"), ByFieldName(7, "ByFieldName"), - ByPod(8, "ByPod") + ByPod(8, "ByPod"), + ByTraceID(9, "ByTraceID"), + BySpanID(10, "BySpanID") }; ESPenum LogAccessLogFormat : int @@ -67,7 +69,9 @@ ESPenum LogColumnType : string processid("processid"), threadid("threadid"), timestamp("timestamp"), - pod("pod") + pod("pod"), + traceid("traceid"), + spanid("spanid") }; ESPenum LogColumnValueType : string @@ -191,7 +195,9 @@ ESPenum SortColumType : int BySourceInstance(5, "BySourceInstance"), BySourceNode(6, "BySourceNode"), ByFieldName(7, "ByFieldName"), - ByPod(8, "ByPod") + ByPod(8, "ByPod"), + ByTraceID(9, "ByTraceID"), + BySpanID(10, "BySpanID") }; ESPStruct SortCondition @@ -222,7 +228,7 @@ ESPResponse GetLogsResponse [min_ver("1.02")] unsigned int TotalLogLinesAvailable; }; -ESPservice [auth_feature("WsLogAccess:READ"), version("1.05"), default_client_version("1.05"), exceptions_inline("xslt/exceptions.xslt")] ws_logaccess +ESPservice [auth_feature("WsLogAccess:READ"), version("1.06"), default_client_version("1.06"), exceptions_inline("xslt/exceptions.xslt")] ws_logaccess { ESPmethod GetLogAccessInfo(GetLogAccessInfoRequest, GetLogAccessInfoResponse); ESPmethod GetLogs(GetLogsRequest, GetLogsResponse); diff --git a/esp/services/ws_logaccess/WsLogAccessService.cpp b/esp/services/ws_logaccess/WsLogAccessService.cpp index 97c035f6ab0..0e8b12c329d 100644 --- a/esp/services/ws_logaccess/WsLogAccessService.cpp +++ b/esp/services/ws_logaccess/WsLogAccessService.cpp @@ -164,7 +164,14 @@ ILogAccessFilter * buildLogFilterByFields(CLogAccessType searchByCategory, const case CLogAccessType_BySourceNode: { return getHostLogAccessFilter(searchByValue); - break; + } + case CLogAccessType_ByTraceID: + { + return getTraceIDLogAccessFilter(searchByValue); + } + case CLogAccessType_BySpanID: + { + return getSpanIDLogAccessFilter(searchByValue); } case CLogAccessType_ByFieldName: { @@ -343,6 +350,12 @@ bool Cws_logaccessEx::onGetLogs(IEspContext &context, IEspGetLogsRequest &req, I case CSortColumType_BySourceNode: mappedField = LOGACCESS_MAPPEDFIELD_host; break; + case CSortColumType_ByTraceID: + mappedField = LOGACCESS_MAPPEDFIELD_traceid; + break; + case CSortColumType_BySpanID: + mappedField = LOGACCESS_MAPPEDFIELD_spanid; + break; case CSortColumType_ByFieldName: if (isEmptyString(condition.getColumnName())) throw makeStringExceptionV(-1, "WsLogAccess: SortByFieldName option requires ColumnName!"); diff --git a/system/jlib/jlog.cpp b/system/jlib/jlog.cpp index 6039e30ba71..4747c853759 100644 --- a/system/jlib/jlog.cpp +++ b/system/jlib/jlog.cpp @@ -3194,6 +3194,16 @@ ILogAccessFilter * getJobIDLogAccessFilter(const char * jobId) return new FieldLogAccessFilter(jobId, LOGACCESS_FILTER_jobid); } +ILogAccessFilter * getTraceIDLogAccessFilter(const char * traceId) +{ + return new FieldLogAccessFilter(traceId, LOGACCESS_FILTER_trace); +} + +ILogAccessFilter * getSpanIDLogAccessFilter(const char * spanId) +{ + return new FieldLogAccessFilter(spanId, LOGACCESS_FILTER_span); +} + ILogAccessFilter * getColumnLogAccessFilter(const char * columnName, const char * value) { return new ColumnLogAccessFilter(columnName, value, LOGACCESS_FILTER_column); diff --git a/system/jlib/jlog.hpp b/system/jlib/jlog.hpp index 02d5dfff2e6..29c37c4164b 100644 --- a/system/jlib/jlog.hpp +++ b/system/jlib/jlog.hpp @@ -1382,6 +1382,8 @@ typedef enum LOGACCESS_FILTER_host, LOGACCESS_FILTER_column, LOGACCESS_FILTER_pod, + LOGACCESS_FILTER_trace, + LOGACCESS_FILTER_span, LOGACCESS_FILTER_unknown } LogAccessFilterType; @@ -1403,6 +1405,10 @@ inline const char * logAccessFilterTypeToString(LogAccessFilterType field) return "instance"; case LOGACCESS_FILTER_host: return "host"; + case LOGACCESS_FILTER_trace: + return "trace"; + case LOGACCESS_FILTER_span: + return "span"; case LOGACCESS_FILTER_or: return "OR"; case LOGACCESS_FILTER_and: @@ -1431,6 +1437,10 @@ inline unsigned logAccessFilterTypeFromName(char const * name) return LOGACCESS_FILTER_pod; if(strieq(name, "instance")) return LOGACCESS_FILTER_instance; + if(strieq(name, "trace")) + return LOGACCESS_FILTER_trace; + if(strieq(name, "span")) + return LOGACCESS_FILTER_span; if(strieq(name, "host")) return LOGACCESS_FILTER_host; if(strieq(name, "OR")) @@ -1481,6 +1491,8 @@ enum LogAccessMappedField LOGACCESS_MAPPEDFIELD_instance, LOGACCESS_MAPPEDFIELD_pod, LOGACCESS_MAPPEDFIELD_host, + LOGACCESS_MAPPEDFIELD_traceid, + LOGACCESS_MAPPEDFIELD_spanid, LOGACCESS_MAPPEDFIELD_unmapped }; @@ -1680,6 +1692,8 @@ extern jlib_decl ILogAccessFilter * getHostLogAccessFilter(const char * host); extern jlib_decl ILogAccessFilter * getJobIDLogAccessFilter(const char * jobId); extern jlib_decl ILogAccessFilter * getComponentLogAccessFilter(const char * component); extern jlib_decl ILogAccessFilter * getPodLogAccessFilter(const char * podName); +extern jlib_decl ILogAccessFilter * getTraceIDLogAccessFilter(const char * traceId); +extern jlib_decl ILogAccessFilter * getSpanIDLogAccessFilter(const char * spanId); extern jlib_decl ILogAccessFilter * getAudienceLogAccessFilter(MessageAudience audience); extern jlib_decl ILogAccessFilter * getClassLogAccessFilter(LogMsgClass logclass); extern jlib_decl ILogAccessFilter * getBinaryLogAccessFilter(ILogAccessFilter * arg1, ILogAccessFilter * arg2, LogAccessFilterType type); diff --git a/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.cpp b/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.cpp index 92a92d3931f..3c3fd413d20 100644 --- a/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.cpp +++ b/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.cpp @@ -672,6 +672,38 @@ void AzureLogAnalyticsCurlClient::populateKQLQueryString(StringBuffer & queryStr DBGLOG("%s: Searching log entries by class: '%s'...", COMPONENT_NAME, queryValue.str()); break; } + case LOGACCESS_FILTER_trace: + { + if (m_traceSearchColName.isEmpty()) + throw makeStringExceptionV(-1, "%s: 'Trace' log entry field not configured", COMPONENT_NAME); + + queryField = m_traceSearchColName.str(); + + if (!m_traceIndexSearchPattern.isEmpty()) + { + throwIfMultiIndexDetected(queryIndex.str(), m_traceIndexSearchPattern.str()); + queryIndex = m_traceIndexSearchPattern.str(); + } + + DBGLOG("%s: Searching log entries by traceid: '%s'...", COMPONENT_NAME, queryValue.str()); + break; + } + case LOGACCESS_FILTER_span: + { + if (m_spanSearchColName.isEmpty()) + throw makeStringExceptionV(-1, "%s: 'Span' log entry field not configured", COMPONENT_NAME); + + queryField = m_spanSearchColName.str(); + + if (!m_spanIndexSearchPattern.isEmpty()) + { + throwIfMultiIndexDetected(queryIndex.str(), m_spanIndexSearchPattern.str()); + queryIndex = m_spanIndexSearchPattern.str(); + } + + DBGLOG("%s: Searching log entries by spanid: '%s'...", COMPONENT_NAME, queryValue.str()); + break; + } case LOGACCESS_FILTER_audience: { if (m_audienceSearchColName.isEmpty()) diff --git a/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.hpp b/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.hpp index edc0c3d84cb..1f7e9d040b5 100644 --- a/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.hpp +++ b/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.hpp @@ -69,6 +69,13 @@ class AzureLogAnalyticsCurlClient : public CInterfaceOf StringBuffer m_componentsLookupKeyColumn; StringBuffer m_instanceLookupKeyColumn; + + StringBuffer m_spanSearchColName; + StringBuffer m_spanIndexSearchPattern; + + StringBuffer m_traceSearchColName; + StringBuffer m_traceIndexSearchPattern; + bool targetIsContainerLogV2 = false; public: diff --git a/system/logaccess/ElasticStack/ElasticStackLogAccess.cpp b/system/logaccess/ElasticStack/ElasticStackLogAccess.cpp index 9724a6c10ad..0540845764c 100644 --- a/system/logaccess/ElasticStack/ElasticStackLogAccess.cpp +++ b/system/logaccess/ElasticStack/ElasticStackLogAccess.cpp @@ -560,6 +560,12 @@ void ElasticStackLogAccess::esSearchMetaData(std::string & search, const LogAcce case LOGACCESS_MAPPEDFIELD_host: sortByFieldName = m_hostSearchColName.str(); break; + case LOGACCESS_MAPPEDFIELD_traceid: + sortByFieldName = m_traceSearchColName.str(); + break; + case LOGACCESS_MAPPEDFIELD_spanid: + sortByFieldName = m_spanSearchColName.str(); + break; case LOGACCESS_MAPPEDFIELD_unmapped: default: sortByFieldName = condition.fieldName.get(); @@ -630,6 +636,40 @@ void ElasticStackLogAccess::populateESQueryQueryString(std::string & queryString DBGLOG("%s: Searching log entries by jobid: '%s'...", COMPONENT_NAME, queryValue.str() ); break; } + case LOGACCESS_FILTER_trace: + { + if (m_traceSearchColName.isEmpty()) + throw makeStringExceptionV(-1, "%s: 'traceid' log entry field not configured", COMPONENT_NAME); + + queryField = m_traceSearchColName.str(); + + if (!m_traceIndexSearchPattern.isEmpty()) + { + if (!queryIndex.empty() && queryIndex != m_traceIndexSearchPattern.str()) + throw makeStringExceptionV(-1, "%s: Multi-index query not supported: '%s' - '%s'", COMPONENT_NAME, queryIndex.c_str(), m_workunitIndexSearchPattern.str()); + queryIndex = m_traceIndexSearchPattern; + } + + DBGLOG("%s: Searching log entries by traceid: '%s'...", COMPONENT_NAME, queryValue.str() ); + break; + } + case LOGACCESS_FILTER_span: + { + if (m_spanSearchColName.isEmpty()) + throw makeStringExceptionV(-1, "%s: 'spanid' log entry field not configured", COMPONENT_NAME); + + queryField = m_spanSearchColName.str(); + + if (!m_spanIndexSearchPattern.isEmpty()) + { + if (!queryIndex.empty() && queryIndex != m_spanIndexSearchPattern.str()) + throw makeStringExceptionV(-1, "%s: Multi-index query not supported: '%s' - '%s'", COMPONENT_NAME, queryIndex.c_str(), m_workunitIndexSearchPattern.str()); + queryIndex = m_spanIndexSearchPattern; + } + + DBGLOG("%s: Searchingsort log entries by spanid: '%s'...", COMPONENT_NAME, queryValue.str() ); + break; + } case LOGACCESS_FILTER_class: { if (m_classSearchColName.isEmpty()) diff --git a/system/logaccess/ElasticStack/ElasticStackLogAccess.hpp b/system/logaccess/ElasticStack/ElasticStackLogAccess.hpp index 0e78b20d821..922f3706b41 100644 --- a/system/logaccess/ElasticStack/ElasticStackLogAccess.hpp +++ b/system/logaccess/ElasticStack/ElasticStackLogAccess.hpp @@ -65,6 +65,12 @@ class ElasticStackLogAccess : public CInterfaceOf StringBuffer m_hostSearchColName; StringBuffer m_hostIndexSearchPattern; + StringBuffer m_spanSearchColName; + StringBuffer m_spanIndexSearchPattern; + + StringBuffer m_traceSearchColName; + StringBuffer m_traceIndexSearchPattern; + StringBuffer m_defaultDocType; //default doc type to query elasticlient::Client m_esClient; From 6c4f9a10e511964d7d7524d222c226c92ffde793 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Fri, 7 Jun 2024 17:28:53 +0100 Subject: [PATCH 026/151] Remove invalid artifacts from helm directory Signed-off-by: Gavin Halliday --- .github/workflows/build-assets.yml | 43 -- helm/hpcc/Chart.yaml | 4 +- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 +- helm/hpcc/templates/eclccserver.yaml | 4 +- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/localroxie.yaml.fixed | 161 -------- helm/hpcc/templates/roxie.yaml | 8 +- helm/hpcc/templates/roxie.yaml.fixed | 479 ---------------------- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +- system/jlib/jlzw.cpp | 9 +- version.cmake | 4 +- 18 files changed, 28 insertions(+), 714 deletions(-) delete mode 100644 helm/hpcc/templates/localroxie.yaml.fixed delete mode 100644 helm/hpcc/templates/roxie.yaml.fixed diff --git a/.github/workflows/build-assets.yml b/.github/workflows/build-assets.yml index 4fd56f61eb6..195034d0efc 100644 --- a/.github/workflows/build-assets.yml +++ b/.github/workflows/build-assets.yml @@ -321,29 +321,6 @@ jobs: tag: ${{ needs.preamble.outputs.internal_tag }} artifacts: "${{ needs.preamble.outputs.folder_build }}/hpccsystems-*-internal*.deb,${{ needs.preamble.outputs.folder_build }}/hpccsystems-*-internal*.rpm" - - name: Upload Assets to Jfrog (debian internal) - if: ${{ matrix.ln && !matrix.container && contains(matrix.os, 'ubuntu') && github.repository_owner == 'hpcc-systems'}} - shell: bash - run: | - cd ${{ needs.preamble.outputs.folder_build }} - version=$(echo "${{ needs.preamble.outputs.internal_tag }}" | sed 's/internal_//') - packages=($(ls -1 hpccsystems-*.deb )) - for _package in ${packages[@]}; do - distribution=$( echo "${_package}" | sed "s/^.*${version}//" | awk -F '_' '{print $1;}' ) - curl -u${{ secrets.JFROG_USERNAME }}:${{ secrets.JFROG_PASSWORD }} -XPUT "https://${{ secrets.JFROG_REGISTRY }}/hpccpl-debian-local/pool/LN/${_package};deb.distribution=${distribution};deb.component=LN;deb.architecture=amd64" -T ${{ needs.preamble.outputs.folder_build }}/${_package} - done - - - name: Upload Assets to Jfrog (centos internal) - if: ${{ matrix.ln && !matrix.container && !contains(matrix.os, 'ubuntu') && github.repository_owner == 'hpcc-systems'}} - shell: bash - run: | - cd ${{ needs.preamble.outputs.folder_build }} - packages=($(ls -1 hpccsystems-*.rpm )) - for _package in ${packages[@]}; do - distribution=$( echo "${_package}" | awk -F '.' '{print $4;}' ) - curl -u${{ secrets.JFROG_USERNAME }}:${{ secrets.JFROG_PASSWORD }} -XPUT "https://${{ secrets.JFROG_REGISTRY }}/hpccpl-rpm-local/LN/${distribution}/x86_64/${_package}" -T ${{ needs.preamble.outputs.folder_build }}/${_package} - done - - name: Locate k8s deb file (internal) if: ${{ matrix.ln && matrix.container && !matrix.documentation }} id: ln-container @@ -493,26 +470,6 @@ jobs: tag: ${{ needs.preamble.outputs.internal_tag }} artifacts: "./build/hpccsystems-clienttools-internal*.exe,./build/hpccsystems-clienttools-internal*.msi,./build/hpccsystems-clienttools-internal*.dmg,./build/hpccsystems-clienttools-internal*.pkg,./build/hpccsystems-clienttools-internal*.tar.gz" - - name: Upload Assets to Jfrog (windows) - if: ${{ contains(matrix.os, 'windows') && github.repository_owner == 'hpcc-systems' }} - shell: bash - run: | - cd ./build - packages=($(ls -1 hpccsystems-*.exe )) - for _package in ${packages[@]}; do - curl -u${{ secrets.JFROG_USERNAME }}:${{ secrets.JFROG_PASSWORD }} "https://${{ secrets.JFROG_REGISTRY }}/hpccpl-windows-local/LN/windows/x86_64/${_package}" -T ${_package} - done - - - name: Upload Assets to Jfrog (macos) - if: ${{ contains(matrix.os, 'macos') && github.repository_owner == 'hpcc-systems' }} - shell: bash - run: | - cd ./build - packages=($(ls -1 hpccsystems-*.pkg )) - for _package in ${packages[@]}; do - curl -u${{ secrets.JFROG_USERNAME }}:${{ secrets.JFROG_PASSWORD }} "https://${{ secrets.JFROG_REGISTRY }}/hpccpl-macos-local/LN/macos/x86_64/${_package}" -T ${_package} - done - - name: Upload error logs if: ${{ failure() || cancelled() }} uses: actions/upload-artifact@v4 diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index e13e0b82478..cd689f32b18 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.0.115-closedown0 +version: 9.0.111-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.0.115-closedown0 +appVersion: 9.0.111-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 5e5f62840e2..34fe972ae12 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1314,7 +1314,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 503ce98f06a..478354bde26 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 annotations: checksum/config: {{ $configSHA }} spec: diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index b8f744494a1..c73d45c4185 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 3cb50bb8770..9e3ee1607ba 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 1633dc80d63..067dddd5bdb 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -135,7 +135,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index a703430ed65..b269a2030f7 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -142,7 +142,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index c7e902c158a..a051e0a480e 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 781c395abb3..60668168a9a 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -117,7 +117,7 @@ spec: server: {{ .name | quote }} accessDali: "yes" app: {{ $application }} - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 447a38d6189..37f2da967e7 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml.fixed b/helm/hpcc/templates/localroxie.yaml.fixed deleted file mode 100644 index 9cda81d8b8f..00000000000 --- a/helm/hpcc/templates/localroxie.yaml.fixed +++ /dev/null @@ -1,161 +0,0 @@ -{{/* - ---- DO NOT EDIT THIS FILE - all configuration of HPCC platform should be done via values.yaml ---- - -############################################################################## - - HPCC SYSTEMS software Copyright (C) 2021 HPCC Systems®. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -############################################################################## - -*/}} -{{/* -localroxie configmap -Pass in dict with root and me -*/}} -{{- define "hpcc.localroxieConfigMap" }} -apiVersion: v1 -metadata: - name: {{ .me.name }}-configmap -data: - {{ .me.name }}.yaml: - version: 1.0 - roxie: -{{ toYaml (omit .me "logging" "tracing" "env") | indent 6 }} -{{- include "hpcc.generateLoggingConfig" . | indent 6 }} -{{- include "hpcc.generateTracingConfig" . | indent 6 }} -{{ include "hpcc.generateVaultConfig" . | indent 6 }} - global: -{{ include "hpcc.generateGlobalConfigMap" .root | indent 6 }} -{{- end -}}{{/* define "hpcc.localroxieConfigMap */}} - -{{ range $roxie := $.Values.roxie -}} -{{- if not $roxie.disabled -}} -{{- $env := concat ($.Values.global.env | default list) (.env | default list) -}} -{{- $secretsCategories := list "system" "eclUser" "ecl" "storage" }} -{{- $enginePlaneDetails := dict -}} -{{- $_ := include "hpcc.getEnginePlanes" (dict "root" $ "me" . "result" $enginePlaneDetails) -}} -{{- $commonCtx := dict "root" $ "me" $roxie "includeCategories" $enginePlaneDetails.planeCategories "includeNames" $enginePlaneDetails.namedPlanes "secretsCategories" $secretsCategories "env" $env }} -{{- $configSHA := include "hpcc.getConfigSHA" ($commonCtx | merge (dict "configMapHelper" "hpcc.localroxieConfigMap" "component" "roxie" "excludeKeys" "global")) }} -{{- include "hpcc.checkDefaultStoragePlane" $commonCtx }} -{{- $singleNode := (hasKey $roxie "singleNode") | ternary $roxie.singleNode ((hasKey $roxie "localAgent") | ternary $roxie.localAgent false) }} -{{- if $singleNode -}} -{{- $localAgent := ((hasKey $roxie "localAgent") | ternary $roxie.localAgent true) -}} -{{- $name := $roxie.name -}} -{{- $servername := printf "%s-server" $roxie.name -}} - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ $roxie.name | quote }} -spec: - replicas: {{ $roxie.replicas | default 1 }} - selector: - matchLabels: - run: {{ $roxie.name | quote }} - server: {{ $servername | quote }} - template: - metadata: - labels: - run: {{ $roxie.name | quote }} - server: {{ $servername | quote }} - accessDali: "yes" - accessEsp: "yes" -<<<<<<< HEAD - helmVersion: 9.0.115-closedown0 -{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} -{{- if hasKey . "labels" }} -{{ toYaml .labels | indent 8 }} -{{- end }} -======= - helmVersion: 9.0.115-closedown0 - {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} -{{- if hasKey . "labels" }} -{{ toYaml .labels | indent 8 }} -{{- end }} ->>>>>>> origin/candidate-9.6.x - annotations: - checksum/config: {{ $configSHA }} -{{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} - spec: -{{- include "hpcc.placementsByPodTargetType" (dict "root" $ "pod" $roxie.name "target" $roxie.name "type" "roxie") | indent 6 }} - serviceAccountName: "hpcc-default" - initContainers: -{{- include "hpcc.createConfigInitContainers" $commonCtx | indent 6 }} -{{- include "hpcc.addImagePullSecrets" $commonCtx | nindent 6 -}} - containers: - - name: {{ $roxie.name | quote }} - workingDir: /var/lib/HPCCSystems - command: [ {{ include "hpcc.componentCommand" (dict "me" $roxie "root" $ "process" "roxie") }} ] - args: [ {{- include "hpcc.componentStartArgs" (dict "me" $roxie "root" $ "process" "roxie") | nindent 16 }} - {{ include "hpcc.configArg" $roxie }}, - {{ include "hpcc.daliArg" (dict "root" $ "component" "Local Roxie" "optional" false) }}, - "--server=true", - "--localAgent={{ $localAgent }}", - "--resolveLocally=false" - ] - env: -{{ include "hpcc.mergeEnvironments" $env | indent 8 -}} - - name: "SENTINEL" - value: "/tmp/{{ $roxie.name }}.sentinel" -{{- $local := dict "first" true }} -{{- range $service := $roxie.services }} -{{- if ne (int $service.servicePort) 0 }} -{{- if $local.first }} -{{- $_ := set $local "first" false }} - ports: -{{- end }} - - name: {{ $service.name }} - containerPort: {{ $service.servicePort }} -{{- end }} -{{- end }} -{{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- include "hpcc.addResources" (dict "me" $roxie.resources "root" $) | indent 8 }} -{{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} - volumeMounts: -{{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }} -{{ include "hpcc.addVolumeMounts" $commonCtx | indent 8 }} -{{- include "hpcc.addSecretVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addVaultClientCertificateVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "name" $roxie.name "component" "localroxie" "external" false) | indent 8 }} -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "name" $roxie.name "component" "localroxie" "external" true "includeRemote" true) | indent 8 }} -{{ include "hpcc.addUDPCertificateVolumeMount" (dict "root" $ "name" $roxie.name "component" "localudpkey" ) | indent 8 }} - volumes: -{{ include "hpcc.addConfigMapVolume" . | indent 6 }} -{{ include "hpcc.addVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addSecretVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addVaultClientCertificateVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "name" $roxie.name "component" "localroxie" "external" false) | indent 6 }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "name" $roxie.name "component" "localroxie" "external" true "includeRemote" true) | indent 6 }} -{{ include "hpcc.addUDPCertificateVolume" (dict "root" $ "name" $roxie.name "component" "localudpkey" ) | indent 6 }} ---- -{{- range $service := $roxie.services }} -{{- if ne (int $service.servicePort) 0 }} -{{ include "hpcc.addService" ( dict "root" $ "name" $service.name "service" $service "selector" $servername "defaultPort" $service.servicePort ) }} ---- -{{- end }} -{{- end }} -kind: ConfigMap -{{ include "hpcc.generateConfig" ($commonCtx | merge (dict "configMapHelper" "hpcc.localroxieConfigMap")) }} ---- -{{ include "hpcc.addCertificate" (dict "root" $ "name" $roxie.name "services" $roxie.services "component" "localroxie" "external" false) }} -{{ include "hpcc.addCertificate" (dict "root" $ "name" $roxie.name "services" $roxie.services "component" "localroxie" "external" true "includeRemote" true) }} -{{ include "hpcc.addUDPCertificate" (dict "root" $ "name" $roxie.name "component" "localudpkey") }} ---- -{{ include "hpcc.addEgress" $commonCtx }} - -{{- end }}{{/* if singleNode */}} -{{- end }}{{/* if not disabled */}} -{{- end }}{{/* range */}} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 0662bd2c291..00f0b503d46 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -180,7 +180,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -242,7 +242,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -347,7 +347,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/roxie.yaml.fixed b/helm/hpcc/templates/roxie.yaml.fixed deleted file mode 100644 index 55a501816f0..00000000000 --- a/helm/hpcc/templates/roxie.yaml.fixed +++ /dev/null @@ -1,479 +0,0 @@ -{{/* - ---- DO NOT EDIT THIS FILE - all configuration of HPCC platform should be done via values.yaml ---- - -############################################################################## - - HPCC SYSTEMS software Copyright (C) 2021 HPCC Systems®. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -############################################################################## - -*/}} - -{{/* -roxie configmap -Pass in dict with root and me -*/}} -{{- define "hpcc.roxieConfigMap" -}} -apiVersion: v1 -metadata: - name: {{ .me.name }}-configmap -data: - {{ .me.name }}.yaml: - version: 1.0 - roxie: -{{- $root := .root -}} -{{- $component := .me }} - services: -{{- range $service := .me.services }} - - name: {{ $service.name }} -{{ toYaml (omit $service "tls" "name") | indent 8 }} -{{- if ne (int $service.servicePort) 0 }} -{{- include "hpcc.addTLSServiceEntries" (dict "root" $root "service" $service "component" $component "visibility" $service.visibility "remoteClients" $service.remoteClients "trustClients" $service.trustClients "includeTrustedPeers" true "incluedRoxieAndEspServices" true) | indent 6 }} -{{- end }} -{{- end }} -{{ toYaml ( omit .me "logging" "tracing" "topoServer" "encryptInTransit" "env" "services") | indent 6 }} - numChannels: {{ .numChannels }} - topologyServers: "{{ .toponame }}:{{ .topoport }}" - heartbeatInterval: {{ .heartbeatInterval }} - resolveLocally: false -{{- $mtlsEnabled := (eq (include "hpcc.isMtlsEnabled" (dict "root" .root)) "true") -}} -{{/* By default use encryption if local certificates are enabled, but allow it to be turned off via roxie .encryptInTransit value */}} -{{- if (hasKey .me "encryptInTransit") -}} -{{- if and (.me.encryptInTransit) (not $mtlsEnabled) -}} -{{- $_ := fail (printf "Roxie %s encryptInTransit requires local cert-manager configuration." .me.name ) }} -{{- end }} - encryptInTransit: {{ .me.encryptInTransit }} -{{ else }} - encryptInTransit: {{ $mtlsEnabled }} -{{ end -}} -{{- include "hpcc.generateLoggingConfig" (dict "root" .root "me" .me) | indent 6 }} -{{- include "hpcc.generateTracingConfig" (dict "root" .root "me" .me) | indent 6 }} -{{ include "hpcc.generateVaultConfig" . | indent 6 }} - global: -{{ include "hpcc.generateGlobalConfigMap" .root | indent 6 }} -{{- end -}}{{/*define "hpcc.roxieConfigMap"*/}} - -{{- define "hpcc.roxieTopoConfigMap" -}} -apiVersion: v1 -metadata: - name: {{ .toponame }}-configmap -data: - {{ .toponame }}.yaml: - version: 1.0 - toposerver: -{{ toYaml ( omit .toposerver "logging" "tracing" "env") | indent 6 }} -{{- include "hpcc.generateLoggingConfig" (dict "root" .root "me" .toposerver) | indent 6 }} -{{- include "hpcc.generateTracingConfig" (dict "root" .root "me" .toposerver) | indent 6 }} - global: -{{ include "hpcc.generateGlobalConfigMap" .root | indent 6 }} -{{- end -}}{{/*define "hpcc.roxieConfigMap"*/}} - -{{ range $roxie := $.Values.roxie -}} -{{- if not $roxie.disabled -}} -{{- $env := concat ($.Values.global.env | default list) (.env | default list) -}} -{{- $secretsCategories := list "system" "eclUser" "ecl" "storage" }} -{{- $toposerver := ($roxie.topoServer | default dict) -}} -{{- $enginePlaneDetails := dict -}} -{{- $_ := include "hpcc.getEnginePlanes" (dict "root" $ "me" . "result" $enginePlaneDetails) -}} -{{- $commonCtx := dict "root" $ "me" $roxie "includeCategories" $enginePlaneDetails.planeCategories "includeNames" $enginePlaneDetails.namedPlanes "secretsCategories" $secretsCategories "toposerver" $toposerver "env" $env }} -{{- $_ := set $commonCtx "toponame" (printf "%s-toposerver" $roxie.name) -}} -{{- $_ := set $commonCtx "numChannels" ($roxie.numChannels | int | default 1) -}} -{{- $_ := set $commonCtx "topoport" ($toposerver.port | int | default 9004) -}} -{{- $_ := set $commonCtx "heartbeatInterval" ($toposerver.heartbeatInterval | int | default 10000) -}} -{{- $_ := set $toposerver "name" $commonCtx.toponame -}} -{{- $configSHA := include "hpcc.getConfigSHA" ($commonCtx | merge (dict "configMapHelper" "hpcc.roxieConfigMap" "component" "roxie" "excludeKeys" "global")) }} -{{- $topoconfigSHA := include "hpcc.getConfigSHA" ($commonCtx | merge (dict "configMapHelper" "hpcc.roxieTopoConfigMap" "component" "toposerver" "excludeKeys" "global")) }} -{{- include "hpcc.checkDefaultStoragePlane" $commonCtx }} -{{- $singleNode := (hasKey $roxie "singleNode") | ternary $roxie.singleNode ((hasKey $roxie "localAgent") | ternary $roxie.localAgent false) }} -{{- if not $singleNode -}} -{{- $servername := printf "%s-server" $roxie.name -}} -{{- $udpkeyname := $roxie.name -}} -{{- range $service := $roxie.services }} -{{- range $remoteClient := $service.remoteClients }} - {{ include "hpcc.addExternalRemoteClientCertificate" (dict "root" $ "client" $remoteClient.name "organization" $remoteClient.organization "instance" $service.name "component" "roxie" "visibility" $service.visibility "secretTemplate" $remoteClient.secretTemplate) }} -{{- end }} -{{- if ne (int $service.servicePort) 0 }} -{{- $_ := set $service "port" $service.servicePort }} -{{- end }} -{{- end }} - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ $commonCtx.toponame | quote }} -spec: - replicas: {{ $toposerver.replicas | default 1 }} - selector: - matchLabels: - run: {{ $commonCtx.toponame | quote }} - template: - metadata: - labels: -{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} - run: {{ $commonCtx.toponame | quote }} - roxie-cluster: {{ $roxie.name | quote }} -<<<<<<< HEAD - helmVersion: 9.0.115-closedown0 -{{- if hasKey $.Values.global "metrics" }} -{{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} -{{- end }} -{{- if hasKey $toposerver "labels" }} -{{ toYaml $toposerver.labels | indent 8 }} -{{- end }} -======= - helmVersion: 9.0.115-closedown0 -{{- if hasKey $.Values.global "metrics" }} - {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} -{{- end }} -{{- if hasKey $toposerver "labels" }} -{{ toYaml $toposerver.labels | indent 8 }} -{{- end }} ->>>>>>> origin/candidate-9.6.x - annotations: - checksum/config: {{ $topoconfigSHA }} -{{- include "hpcc.generateAnnotations" (dict "root" $commonCtx.root "me" $toposerver) | indent 8 }} -{{- if hasKey $.Values.global "metrics" }} -{{- include "hpcc.addPrometheusScrapeAnnotations" $.Values.global.metrics | nindent 8 }} -{{- end }} - spec: -{{- include "hpcc.placementsByPodTargetType" (dict "root" $ "pod" $commonCtx.toponame "target" $roxie.name "type" "roxie") | indent 6 }} - serviceAccountName: "hpcc-default" -{{- include "hpcc.addImagePullSecrets" $commonCtx | nindent 6 -}} - containers: - - name: {{ $commonCtx.toponame | quote }} -{{ include "hpcc.addSentinelProbes" $toposerver | indent 8 }} -{{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- $defaultResources := dict "cpu" "500m" "memory" "200Mi" }} -{{- include "hpcc.addResources" (dict "me" .topoResources "defaults" $defaultResources "root" $) | indent 8 }} -{{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} - workingDir: /var/lib/HPCCSystems - command: [ {{ include "hpcc.componentCommand" (dict "me" $toposerver "root" $ "process" "toposerver") }} ] - args: [ {{- include "hpcc.componentStartArgs" (dict "me" $toposerver "root" $ "process" "toposerver") | nindent 16 }} - {{ include "hpcc.configArg" $toposerver }} - ] - env: -{{ include "hpcc.mergeEnvironments" $env | indent 8 -}} - - name: "SENTINEL" - value: "/tmp/{{ $commonCtx.toponame }}.sentinel" - volumeMounts: -{{ include "hpcc.addConfigMapVolumeMount" $toposerver | indent 8 }} -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "topo" "name" $commonCtx.toponame "external" false) | indent 8 }} - volumes: -{{ include "hpcc.addConfigMapVolume" $toposerver | indent 6 }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "topo" "name" $commonCtx.toponame "external" false) | indent 6 }} - ---- -{{ include "hpcc.addCertificate" (dict "root" $ "name" $commonCtx.toponame "servicename" $commonCtx.toponame "component" "topo" "external" false) }} -{{ include "hpcc.addUDPCertificate" (dict "root" $ "name" $udpkeyname "component" "udpkey") }} - ---- -{{- range $service := $roxie.services }} -{{- if ne (int $service.servicePort) 0 }} -{{ include "hpcc.addService" ( dict "root" $ "name" $service.name "service" $service "selector" $servername "defaultPort" $service.servicePort) }} ---- -{{- end }} -{{- end }} - -apiVersion: v1 -kind: Service -metadata: - name: {{ $commonCtx.toponame | quote }} - labels: -<<<<<<< HEAD - helmVersion: 9.0.115-closedown0 -{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} -======= - helmVersion: 9.0.115-closedown0 - {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} ->>>>>>> origin/candidate-9.6.x -spec: - ports: - - port: {{ $commonCtx.topoport }} - protocol: TCP - targetPort: {{ $commonCtx.topoport }} - selector: - run: {{ $commonCtx.toponame | quote }} - clusterIP: None # Headless service ---- - -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ printf "%s-internal-traffic" $roxie.name }} -spec: - podSelector: - matchLabels: - roxie-cluster: {{ $roxie.name | quote }} - policyTypes: - - Ingress - - Egress - ingress: - - from: - - podSelector: - matchLabels: - roxie-cluster: {{ $roxie.name | quote }} - egress: - - to: - - podSelector: - matchLabels: - roxie-cluster: {{ $roxie.name | quote }} - ---- -kind: ConfigMap -{{ include "hpcc.generateConfig" ($commonCtx | merge (dict "configMapHelper" "hpcc.roxieConfigMap")) }} ---- -kind: ConfigMap -{{ include "hpcc.generateConfig" ($commonCtx | merge (dict "configMapHelper" "hpcc.roxieTopoConfigMap")) }} ---- - -{{- $_ := set $commonCtx "instanceNames" list -}} -{{ if $roxie.serverReplicas -}} -{{ $_ := set $commonCtx "instanceNames" (list $servername) -}} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ $servername | quote }} -spec: - replicas: {{ $roxie.serverReplicas }} - selector: - matchLabels: - run: {{ $servername | quote }} - template: - metadata: - labels: - run: {{ $servername | quote }} - server: {{ $roxie.name | quote }} - roxie-cluster: {{ $roxie.name | quote }} - accessDali: "yes" - accessEsp: "yes" -<<<<<<< HEAD - helmVersion: 9.0.115-closedown0 -{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} -{{- if hasKey $.Values.global "metrics" }} -{{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} -{{- end }} -{{- if hasKey $roxie "labels" }} -{{ toYaml $roxie.labels | indent 8 }} -{{- end }} -======= - helmVersion: 9.0.115-closedown0 - {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} -{{- if hasKey $.Values.global "metrics" }} - {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} -{{- end }} -{{- if hasKey $roxie "labels" }} -{{ toYaml $roxie.labels | indent 8 }} -{{- end }} ->>>>>>> origin/candidate-9.6.x - annotations: - checksum/config: {{ $configSHA }} -{{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} -{{- if hasKey $.Values.global "metrics" }} -{{- include "hpcc.addPrometheusScrapeAnnotations" $.Values.global.metrics | nindent 8 }} -{{- end }} -{{- if hasKey $roxie "annotations" }} -{{ toYaml $roxie.annotations | indent 8 }} -{{- end }} - spec: -{{- include "hpcc.placementsByPodTargetType" (dict "root" $ "pod" $servername "target" $roxie.name "type" "roxie") | indent 6 }} - serviceAccountName: "hpcc-default" - initContainers: -{{- include "hpcc.createConfigInitContainers" $commonCtx | indent 6 }} -{{- include "hpcc.addImagePullSecrets" $commonCtx | nindent 6 -}} - terminationGracePeriodSeconds: {{ add ($roxie.agentQueryReleaseDelaySeconds | default 60) 30 }} - containers: - - name: {{ $servername | quote }} - workingDir: /var/lib/HPCCSystems - command: [ {{ include "hpcc.componentCommand" (dict "me" $roxie "root" $ "process" "roxie") }} ] - args: [ {{- include "hpcc.componentStartArgs" (dict "me" $roxie "root" $ "process" "roxie") | nindent 16 }} - {{ include "hpcc.daliArg" (dict "root" $ "component" "Roxie" "optional" false) }}, - "--server=true" - ] - env: -{{ include "hpcc.mergeEnvironments" $env | indent 8 -}} - - name: "SENTINEL" - value: "/tmp/{{ $roxie.name }}.sentinel" -{{- $local := dict "first" true }} -{{- range $service := $roxie.services }} -{{- if ne (int $service.servicePort) 0 }} -{{- if $local.first }} -{{- $_ := set $local "first" false }} - ports: -{{- end }} - - name: {{ $service.name }} - containerPort: {{ $service.servicePort }} -{{- end }} -{{- end }} - lifecycle: - preStop: - exec: - command: ["testsocket", ".", "control:closedown"] -{{ include "hpcc.addSentinelProbes" ( $roxie | merge (dict "readyProbeName" ".ready" )) | indent 8 }} -{{ include "hpcc.addSecurityContext" (dict "root" $ "me" .) | indent 8 }} -{{- include "hpcc.addResources" (dict "me" ($roxie.serverResources | default $roxie.resources) "root" $) | indent 8 }} -{{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} - volumeMounts: -{{ include "hpcc.addConfigMapVolumeMount" $roxie | indent 8 }} -{{ include "hpcc.addVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addSecretVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addVaultClientCertificateVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "roxie-server" "name" $servername "external" false) | indent 8 }} -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "roxie-server" "name" $servername "certificate" $roxie.certificate "external" true "includeRemote" true) | indent 8 }} -{{ include "hpcc.addUDPCertificateVolumeMount" (dict "root" $ "component" "udpkey" "name" $udpkeyname ) | indent 8 }} - volumes: -{{ include "hpcc.addConfigMapVolume" $roxie | indent 6 }} -{{ include "hpcc.addVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addSecretVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addVaultClientCertificateVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "roxie-server" "name" $servername "external" false) | indent 6 }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "roxie-server" "name" $servername "certificate" $roxie.certificate "external" true "includeRemote" true) | indent 6 }} -{{ include "hpcc.addUDPCertificateVolume" (dict "root" $ "component" "udpkey" "name" $udpkeyname) | indent 6 }} - ---- -{{ include "hpcc.addCertificate" (dict "root" $ "name" $servername "services" $roxie.services "component" "roxie-server" "external" false) }} -{{ include "hpcc.addCertificate" (dict "root" $ "name" $servername "services" $roxie.services "component" "roxie-server" "external" true "includeRemote" true) }} ---- -{{ end -}}{{/* if serverReplicas */}} - -{{- $agentPublicCertName := printf "%s-agent" $roxie.name }} -{{ include "hpcc.addCertificate" (dict "root" $ "name" $agentPublicCertName "services" $roxie.services "component" "roxie-agent" "external" true "includeRemote" true) }} - -{{ range $c, $e := until ($commonCtx.numChannels|int) -}} -{{- $channel := add $c 1 -}} -{{- $name := printf "%s-agent-%d" $roxie.name $channel }} -{{- $_ := set $commonCtx "instanceNames" (append $commonCtx.instanceNames $name) }} - -{{ include "hpcc.addCertificate" (dict "root" $ "name" $name "services" $roxie.services "component" "roxie-agent" "external" false) }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ $name | quote}} -spec: - replicas: {{ (hasKey $roxie "replicas") | ternary $roxie.replicas 1 }} - selector: - matchLabels: - run: {{ $name | quote}} - template: - metadata: - labels: -{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-agent" "name" "roxie" "instance" $name) | indent 8 }} - run: {{ $name | quote}} -{{- if not $roxie.serverReplicas }} - server: {{ $servername | quote }} -{{- end }} - roxie-cluster: {{ $roxie.name | quote }} - accessDali: "yes" - accessEsp: "yes" -<<<<<<< HEAD - helmVersion: 9.0.115-closedown0 -{{- if hasKey $.Values.global "metrics" }} -{{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} -{{- end }} -{{- if hasKey $roxie "labels" }} -{{ toYaml $roxie.labels | indent 8 }} -{{- end }} -======= - helmVersion: 9.0.115-closedown0 -{{- if hasKey $.Values.global "metrics" }} - {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} -{{- end }} -{{- if hasKey $roxie "labels" }} -{{ toYaml $roxie.labels | indent 8 }} -{{- end }} ->>>>>>> origin/candidate-9.6.x - annotations: - checksum/config: {{ $configSHA }} -{{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} -{{- if hasKey $.Values.global "metrics" }} -{{- include "hpcc.addPrometheusScrapeAnnotations" $.Values.global.metrics | nindent 8 }} -{{- end }} - spec: -{{- include "hpcc.placementsByPodTargetType" (dict "root" $ "pod" $name "target" $roxie.name "type" "roxie") | indent 6 }} - serviceAccountName: "hpcc-default" - initContainers: -{{- include "hpcc.createConfigInitContainers" $commonCtx | indent 6 }} -{{- include "hpcc.addImagePullSecrets" $commonCtx | nindent 6 -}} - terminationGracePeriodSeconds: {{ add ($roxie.agentQueryReleaseDelaySeconds | default 60) 30 }} - containers: - - name: {{ $name | quote}} - workingDir: /var/lib/HPCCSystems - command: [ {{ include "hpcc.componentCommand" (dict "me" $roxie "root" $ "process" "roxie") }} ] - args: [ {{- include "hpcc.componentStartArgs" (dict "me" $roxie "root" $ "process" "roxie") | nindent 16 }} - {{ include "hpcc.configArg" $roxie }}, - {{ include "hpcc.daliArg" (dict "root" $ "component" "Roxie" "optional" false) }}, - "--channels={{ $channel }}", - "--server={{ not $roxie.serverReplicas }}", - ] - env: -{{ include "hpcc.mergeEnvironments" $env | indent 8 -}} - - name: "SENTINEL" - value: "/tmp/{{ $roxie.name }}.sentinel" -{{- if not $roxie.serverReplicas }} -{{- $local := dict "first" true }} -{{- range $service := $roxie.services }} -{{- if ne (int $service.servicePort) 0 }} -{{- if $local.first }} -{{- $_ := set $local "first" false }} - ports: -{{- end }} - - name: {{ $service.name }} - containerPort: {{ $service.servicePort }} -{{- end }} -{{- end }} - lifecycle: - preStop: - exec: - command: ["testsocket", ".", "control:closedown"] -{{ include "hpcc.addSentinelProbes" ( $roxie | merge (dict "readyProbeName" ".ready" )) | indent 8 }} -{{- end }}{{/* not serverReplicas */}} -{{ include "hpcc.addSecurityContext" (dict "root" $ "me" .) | indent 8 }} -{{- include "hpcc.addResources" (dict "me" ($roxie.channelResources | default $roxie.resources) "root" $) | indent 8 }} -{{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} - volumeMounts: -{{ include "hpcc.addConfigMapVolumeMount" $roxie | indent 8 }} -{{ include "hpcc.addVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addSecretVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addVaultClientCertificateVolumeMounts" $commonCtx | indent 8 }} -{{- if not $roxie.serverReplicas }} - -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "roxie-agent" "name" $name "external" false) | indent 8 }} -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "roxie-agent" "name" $agentPublicCertName "certificate" $roxie.certificate "external" true "includeRemote" true) | indent 8 }} -{{ include "hpcc.addUDPCertificateVolumeMount" (dict "root" $ "component" "udpkey" "name" $udpkeyname ) | indent 8 }} -{{- end }}{{/* not serverReplicas */}} - - volumes: -{{ include "hpcc.addConfigMapVolume" $roxie | indent 6 }} -{{ include "hpcc.addVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addSecretVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addVaultClientCertificateVolumes" $commonCtx | indent 6 }} -{{- if not $roxie.serverReplicas }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "roxie-agent" "name" $name "external" false) | indent 6 }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "roxie-agent" "name" $agentPublicCertName "certificate" $roxie.certificate "external" true "includeRemote" true) | indent 6 }} -{{ include "hpcc.addUDPCertificateVolume" (dict "root" $ "component" "udpkey" "name" $udpkeyname) | indent 6 }} -{{- end }}{{/* not serverReplicas */}} ---- - -{{- end }} -{{- end }}{{/* if not singlenode */}} ---- -{{ include "hpcc.addEgress" (dict "root" $ "me" $roxie "labels" $commonCtx.instanceNames) }} -{{- if hasKey . "hpa" }} -{{- include "hpcc.addHorizontalPodAutoscaler" (dict "name" $roxie.name "kind" "Deployment" "hpa" $roxie.hpa) }} -{{- end }} -{{- end }}{{/* if not disabled */}} -{{- end }}{{/* range */}} - diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index f21fe53a9e3..0592ec984db 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -52,7 +52,7 @@ spec: run: {{ $serviceName | quote }} server: {{ $serviceName | quote }} accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index e889da23c4a..2ec4ae3ac40 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -147,7 +147,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -214,7 +214,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -347,7 +347,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -412,7 +412,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.111-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/system/jlib/jlzw.cpp b/system/jlib/jlzw.cpp index 03f1344f510..2dbf48aca62 100644 --- a/system/jlib/jlzw.cpp +++ b/system/jlib/jlzw.cpp @@ -804,13 +804,10 @@ void decompressToBuffer(MemoryBuffer & out, const void * src) void decompressToBuffer(MemoryBuffer & out, MemoryBuffer & in) { - unsigned char method; + bool compressed; size32_t srcLen; - in.read(method).read(srcLen); - if (method > 1) - throw makeStringException(-1, "New compression format is not supported in this version"); - - if (method != 0) + in.read(compressed).read(srcLen); + if (compressed) decompressToBuffer(out, in.readDirect(srcLen)); else out.append(srcLen, in.readDirect(srcLen)); diff --git a/version.cmake b/version.cmake index a4c5c3ed115..87c5c4c9492 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 0 ) -set ( HPCC_POINT 115 ) +set ( HPCC_POINT 111 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-05-31T15:15:05Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-05-17T17:04:28Z" ) ### From 4e09ab179bfcd3a07c6c9d432d30ccd47646b995 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Fri, 7 Jun 2024 17:56:53 +0100 Subject: [PATCH 027/151] Fix invalid version numbers Signed-off-by: Gavin Halliday --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 2 +- 14 files changed, 24 insertions(+), 24 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index cd689f32b18..e13e0b82478 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.0.111-closedown0 +version: 9.0.115-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.0.111-closedown0 +appVersion: 9.0.115-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 34fe972ae12..5e5f62840e2 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1314,7 +1314,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 478354bde26..503ce98f06a 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 annotations: checksum/config: {{ $configSHA }} spec: diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index c73d45c4185..b8f744494a1 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 9e3ee1607ba..3cb50bb8770 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 067dddd5bdb..1633dc80d63 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -135,7 +135,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index b269a2030f7..a703430ed65 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -142,7 +142,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index a051e0a480e..c7e902c158a 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 60668168a9a..781c395abb3 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -117,7 +117,7 @@ spec: server: {{ .name | quote }} accessDali: "yes" app: {{ $application }} - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 37f2da967e7..447a38d6189 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 00f0b503d46..0662bd2c291 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -180,7 +180,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -242,7 +242,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -347,7 +347,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 0592ec984db..f21fe53a9e3 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -52,7 +52,7 @@ spec: run: {{ $serviceName | quote }} server: {{ $serviceName | quote }} accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 2ec4ae3ac40..e889da23c4a 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -147,7 +147,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -214,7 +214,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -347,7 +347,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -412,7 +412,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.0.111-closedown0 + helmVersion: 9.0.115-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 87c5c4c9492..e588462df44 100644 --- a/version.cmake +++ b/version.cmake @@ -5,7 +5,7 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 0 ) -set ( HPCC_POINT 111 ) +set ( HPCC_POINT 115 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) set ( HPCC_TAG_TIMESTAMP "2024-05-17T17:04:28Z" ) From 181e7000d6bb094c477496f3a5c020174b766af4 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Fri, 7 Jun 2024 17:59:28 +0100 Subject: [PATCH 028/151] Split off 9.0.116 Signed-off-by: Gavin Halliday --- helm/hpcc/Chart.yaml | 4 +- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 +- helm/hpcc/templates/eclccserver.yaml | 4 +- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/localroxie.yaml.fixed | 161 ++++++++ helm/hpcc/templates/roxie.yaml | 8 +- helm/hpcc/templates/roxie.yaml.fixed | 479 ++++++++++++++++++++++ helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +- version.cmake | 4 +- 16 files changed, 665 insertions(+), 25 deletions(-) create mode 100644 helm/hpcc/templates/localroxie.yaml.fixed create mode 100644 helm/hpcc/templates/roxie.yaml.fixed diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index e13e0b82478..cfbfe006f2d 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.0.115-closedown0 +version: 9.0.117-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.0.115-closedown0 +appVersion: 9.0.117-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 5e5f62840e2..c84dc4ed448 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1314,7 +1314,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 503ce98f06a..f7329d5447a 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 annotations: checksum/config: {{ $configSHA }} spec: diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index b8f744494a1..d9a3507dd28 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 3cb50bb8770..5976009b5c4 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 1633dc80d63..cbdd2a61145 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -135,7 +135,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index a703430ed65..df4d7a92cbc 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -142,7 +142,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index c7e902c158a..e3cc9b35284 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 781c395abb3..ddfeebe8f12 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -117,7 +117,7 @@ spec: server: {{ .name | quote }} accessDali: "yes" app: {{ $application }} - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 447a38d6189..e634cb96ed2 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml.fixed b/helm/hpcc/templates/localroxie.yaml.fixed new file mode 100644 index 00000000000..099b501840b --- /dev/null +++ b/helm/hpcc/templates/localroxie.yaml.fixed @@ -0,0 +1,161 @@ +{{/* + +--- DO NOT EDIT THIS FILE - all configuration of HPCC platform should be done via values.yaml ---- + +############################################################################## + + HPCC SYSTEMS software Copyright (C) 2021 HPCC Systems®. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +############################################################################## + +*/}} +{{/* +localroxie configmap +Pass in dict with root and me +*/}} +{{- define "hpcc.localroxieConfigMap" }} +apiVersion: v1 +metadata: + name: {{ .me.name }}-configmap +data: + {{ .me.name }}.yaml: + version: 1.0 + roxie: +{{ toYaml (omit .me "logging" "tracing" "env") | indent 6 }} +{{- include "hpcc.generateLoggingConfig" . | indent 6 }} +{{- include "hpcc.generateTracingConfig" . | indent 6 }} +{{ include "hpcc.generateVaultConfig" . | indent 6 }} + global: +{{ include "hpcc.generateGlobalConfigMap" .root | indent 6 }} +{{- end -}}{{/* define "hpcc.localroxieConfigMap */}} + +{{ range $roxie := $.Values.roxie -}} +{{- if not $roxie.disabled -}} +{{- $env := concat ($.Values.global.env | default list) (.env | default list) -}} +{{- $secretsCategories := list "system" "eclUser" "ecl" "storage" }} +{{- $enginePlaneDetails := dict -}} +{{- $_ := include "hpcc.getEnginePlanes" (dict "root" $ "me" . "result" $enginePlaneDetails) -}} +{{- $commonCtx := dict "root" $ "me" $roxie "includeCategories" $enginePlaneDetails.planeCategories "includeNames" $enginePlaneDetails.namedPlanes "secretsCategories" $secretsCategories "env" $env }} +{{- $configSHA := include "hpcc.getConfigSHA" ($commonCtx | merge (dict "configMapHelper" "hpcc.localroxieConfigMap" "component" "roxie" "excludeKeys" "global")) }} +{{- include "hpcc.checkDefaultStoragePlane" $commonCtx }} +{{- $singleNode := (hasKey $roxie "singleNode") | ternary $roxie.singleNode ((hasKey $roxie "localAgent") | ternary $roxie.localAgent false) }} +{{- if $singleNode -}} +{{- $localAgent := ((hasKey $roxie "localAgent") | ternary $roxie.localAgent true) -}} +{{- $name := $roxie.name -}} +{{- $servername := printf "%s-server" $roxie.name -}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ $roxie.name | quote }} +spec: + replicas: {{ $roxie.replicas | default 1 }} + selector: + matchLabels: + run: {{ $roxie.name | quote }} + server: {{ $servername | quote }} + template: + metadata: + labels: + run: {{ $roxie.name | quote }} + server: {{ $servername | quote }} + accessDali: "yes" + accessEsp: "yes" +<<<<<<< HEAD + helmVersion: 9.0.117-closedown0 +{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} +{{- if hasKey . "labels" }} +{{ toYaml .labels | indent 8 }} +{{- end }} +======= + helmVersion: 9.0.117-closedown0 + {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} +{{- if hasKey . "labels" }} +{{ toYaml .labels | indent 8 }} +{{- end }} +>>>>>>> origin/candidate-9.6.x + annotations: + checksum/config: {{ $configSHA }} +{{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} + spec: +{{- include "hpcc.placementsByPodTargetType" (dict "root" $ "pod" $roxie.name "target" $roxie.name "type" "roxie") | indent 6 }} + serviceAccountName: "hpcc-default" + initContainers: +{{- include "hpcc.createConfigInitContainers" $commonCtx | indent 6 }} +{{- include "hpcc.addImagePullSecrets" $commonCtx | nindent 6 -}} + containers: + - name: {{ $roxie.name | quote }} + workingDir: /var/lib/HPCCSystems + command: [ {{ include "hpcc.componentCommand" (dict "me" $roxie "root" $ "process" "roxie") }} ] + args: [ {{- include "hpcc.componentStartArgs" (dict "me" $roxie "root" $ "process" "roxie") | nindent 16 }} + {{ include "hpcc.configArg" $roxie }}, + {{ include "hpcc.daliArg" (dict "root" $ "component" "Local Roxie" "optional" false) }}, + "--server=true", + "--localAgent={{ $localAgent }}", + "--resolveLocally=false" + ] + env: +{{ include "hpcc.mergeEnvironments" $env | indent 8 -}} + - name: "SENTINEL" + value: "/tmp/{{ $roxie.name }}.sentinel" +{{- $local := dict "first" true }} +{{- range $service := $roxie.services }} +{{- if ne (int $service.servicePort) 0 }} +{{- if $local.first }} +{{- $_ := set $local "first" false }} + ports: +{{- end }} + - name: {{ $service.name }} + containerPort: {{ $service.servicePort }} +{{- end }} +{{- end }} +{{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} +{{- include "hpcc.addResources" (dict "me" $roxie.resources "root" $) | indent 8 }} +{{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} + volumeMounts: +{{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }} +{{ include "hpcc.addVolumeMounts" $commonCtx | indent 8 }} +{{- include "hpcc.addSecretVolumeMounts" $commonCtx | indent 8 }} +{{ include "hpcc.addVaultClientCertificateVolumeMounts" $commonCtx | indent 8 }} +{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "name" $roxie.name "component" "localroxie" "external" false) | indent 8 }} +{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "name" $roxie.name "component" "localroxie" "external" true "includeRemote" true) | indent 8 }} +{{ include "hpcc.addUDPCertificateVolumeMount" (dict "root" $ "name" $roxie.name "component" "localudpkey" ) | indent 8 }} + volumes: +{{ include "hpcc.addConfigMapVolume" . | indent 6 }} +{{ include "hpcc.addVolumes" $commonCtx | indent 6 }} +{{ include "hpcc.addSecretVolumes" $commonCtx | indent 6 }} +{{ include "hpcc.addVaultClientCertificateVolumes" $commonCtx | indent 6 }} +{{ include "hpcc.addCertificateVolume" (dict "root" $ "name" $roxie.name "component" "localroxie" "external" false) | indent 6 }} +{{ include "hpcc.addCertificateVolume" (dict "root" $ "name" $roxie.name "component" "localroxie" "external" true "includeRemote" true) | indent 6 }} +{{ include "hpcc.addUDPCertificateVolume" (dict "root" $ "name" $roxie.name "component" "localudpkey" ) | indent 6 }} +--- +{{- range $service := $roxie.services }} +{{- if ne (int $service.servicePort) 0 }} +{{ include "hpcc.addService" ( dict "root" $ "name" $service.name "service" $service "selector" $servername "defaultPort" $service.servicePort ) }} +--- +{{- end }} +{{- end }} +kind: ConfigMap +{{ include "hpcc.generateConfig" ($commonCtx | merge (dict "configMapHelper" "hpcc.localroxieConfigMap")) }} +--- +{{ include "hpcc.addCertificate" (dict "root" $ "name" $roxie.name "services" $roxie.services "component" "localroxie" "external" false) }} +{{ include "hpcc.addCertificate" (dict "root" $ "name" $roxie.name "services" $roxie.services "component" "localroxie" "external" true "includeRemote" true) }} +{{ include "hpcc.addUDPCertificate" (dict "root" $ "name" $roxie.name "component" "localudpkey") }} +--- +{{ include "hpcc.addEgress" $commonCtx }} + +{{- end }}{{/* if singleNode */}} +{{- end }}{{/* if not disabled */}} +{{- end }}{{/* range */}} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 0662bd2c291..6cdf0495d85 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -180,7 +180,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -242,7 +242,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -347,7 +347,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/roxie.yaml.fixed b/helm/hpcc/templates/roxie.yaml.fixed new file mode 100644 index 00000000000..3f919f7c048 --- /dev/null +++ b/helm/hpcc/templates/roxie.yaml.fixed @@ -0,0 +1,479 @@ +{{/* + +--- DO NOT EDIT THIS FILE - all configuration of HPCC platform should be done via values.yaml ---- + +############################################################################## + + HPCC SYSTEMS software Copyright (C) 2021 HPCC Systems®. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +############################################################################## + +*/}} + +{{/* +roxie configmap +Pass in dict with root and me +*/}} +{{- define "hpcc.roxieConfigMap" -}} +apiVersion: v1 +metadata: + name: {{ .me.name }}-configmap +data: + {{ .me.name }}.yaml: + version: 1.0 + roxie: +{{- $root := .root -}} +{{- $component := .me }} + services: +{{- range $service := .me.services }} + - name: {{ $service.name }} +{{ toYaml (omit $service "tls" "name") | indent 8 }} +{{- if ne (int $service.servicePort) 0 }} +{{- include "hpcc.addTLSServiceEntries" (dict "root" $root "service" $service "component" $component "visibility" $service.visibility "remoteClients" $service.remoteClients "trustClients" $service.trustClients "includeTrustedPeers" true "incluedRoxieAndEspServices" true) | indent 6 }} +{{- end }} +{{- end }} +{{ toYaml ( omit .me "logging" "tracing" "topoServer" "encryptInTransit" "env" "services") | indent 6 }} + numChannels: {{ .numChannels }} + topologyServers: "{{ .toponame }}:{{ .topoport }}" + heartbeatInterval: {{ .heartbeatInterval }} + resolveLocally: false +{{- $mtlsEnabled := (eq (include "hpcc.isMtlsEnabled" (dict "root" .root)) "true") -}} +{{/* By default use encryption if local certificates are enabled, but allow it to be turned off via roxie .encryptInTransit value */}} +{{- if (hasKey .me "encryptInTransit") -}} +{{- if and (.me.encryptInTransit) (not $mtlsEnabled) -}} +{{- $_ := fail (printf "Roxie %s encryptInTransit requires local cert-manager configuration." .me.name ) }} +{{- end }} + encryptInTransit: {{ .me.encryptInTransit }} +{{ else }} + encryptInTransit: {{ $mtlsEnabled }} +{{ end -}} +{{- include "hpcc.generateLoggingConfig" (dict "root" .root "me" .me) | indent 6 }} +{{- include "hpcc.generateTracingConfig" (dict "root" .root "me" .me) | indent 6 }} +{{ include "hpcc.generateVaultConfig" . | indent 6 }} + global: +{{ include "hpcc.generateGlobalConfigMap" .root | indent 6 }} +{{- end -}}{{/*define "hpcc.roxieConfigMap"*/}} + +{{- define "hpcc.roxieTopoConfigMap" -}} +apiVersion: v1 +metadata: + name: {{ .toponame }}-configmap +data: + {{ .toponame }}.yaml: + version: 1.0 + toposerver: +{{ toYaml ( omit .toposerver "logging" "tracing" "env") | indent 6 }} +{{- include "hpcc.generateLoggingConfig" (dict "root" .root "me" .toposerver) | indent 6 }} +{{- include "hpcc.generateTracingConfig" (dict "root" .root "me" .toposerver) | indent 6 }} + global: +{{ include "hpcc.generateGlobalConfigMap" .root | indent 6 }} +{{- end -}}{{/*define "hpcc.roxieConfigMap"*/}} + +{{ range $roxie := $.Values.roxie -}} +{{- if not $roxie.disabled -}} +{{- $env := concat ($.Values.global.env | default list) (.env | default list) -}} +{{- $secretsCategories := list "system" "eclUser" "ecl" "storage" }} +{{- $toposerver := ($roxie.topoServer | default dict) -}} +{{- $enginePlaneDetails := dict -}} +{{- $_ := include "hpcc.getEnginePlanes" (dict "root" $ "me" . "result" $enginePlaneDetails) -}} +{{- $commonCtx := dict "root" $ "me" $roxie "includeCategories" $enginePlaneDetails.planeCategories "includeNames" $enginePlaneDetails.namedPlanes "secretsCategories" $secretsCategories "toposerver" $toposerver "env" $env }} +{{- $_ := set $commonCtx "toponame" (printf "%s-toposerver" $roxie.name) -}} +{{- $_ := set $commonCtx "numChannels" ($roxie.numChannels | int | default 1) -}} +{{- $_ := set $commonCtx "topoport" ($toposerver.port | int | default 9004) -}} +{{- $_ := set $commonCtx "heartbeatInterval" ($toposerver.heartbeatInterval | int | default 10000) -}} +{{- $_ := set $toposerver "name" $commonCtx.toponame -}} +{{- $configSHA := include "hpcc.getConfigSHA" ($commonCtx | merge (dict "configMapHelper" "hpcc.roxieConfigMap" "component" "roxie" "excludeKeys" "global")) }} +{{- $topoconfigSHA := include "hpcc.getConfigSHA" ($commonCtx | merge (dict "configMapHelper" "hpcc.roxieTopoConfigMap" "component" "toposerver" "excludeKeys" "global")) }} +{{- include "hpcc.checkDefaultStoragePlane" $commonCtx }} +{{- $singleNode := (hasKey $roxie "singleNode") | ternary $roxie.singleNode ((hasKey $roxie "localAgent") | ternary $roxie.localAgent false) }} +{{- if not $singleNode -}} +{{- $servername := printf "%s-server" $roxie.name -}} +{{- $udpkeyname := $roxie.name -}} +{{- range $service := $roxie.services }} +{{- range $remoteClient := $service.remoteClients }} + {{ include "hpcc.addExternalRemoteClientCertificate" (dict "root" $ "client" $remoteClient.name "organization" $remoteClient.organization "instance" $service.name "component" "roxie" "visibility" $service.visibility "secretTemplate" $remoteClient.secretTemplate) }} +{{- end }} +{{- if ne (int $service.servicePort) 0 }} +{{- $_ := set $service "port" $service.servicePort }} +{{- end }} +{{- end }} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ $commonCtx.toponame | quote }} +spec: + replicas: {{ $toposerver.replicas | default 1 }} + selector: + matchLabels: + run: {{ $commonCtx.toponame | quote }} + template: + metadata: + labels: +{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} + run: {{ $commonCtx.toponame | quote }} + roxie-cluster: {{ $roxie.name | quote }} +<<<<<<< HEAD + helmVersion: 9.0.117-closedown0 +{{- if hasKey $.Values.global "metrics" }} +{{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} +{{- end }} +{{- if hasKey $toposerver "labels" }} +{{ toYaml $toposerver.labels | indent 8 }} +{{- end }} +======= + helmVersion: 9.0.117-closedown0 +{{- if hasKey $.Values.global "metrics" }} + {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} +{{- end }} +{{- if hasKey $toposerver "labels" }} +{{ toYaml $toposerver.labels | indent 8 }} +{{- end }} +>>>>>>> origin/candidate-9.6.x + annotations: + checksum/config: {{ $topoconfigSHA }} +{{- include "hpcc.generateAnnotations" (dict "root" $commonCtx.root "me" $toposerver) | indent 8 }} +{{- if hasKey $.Values.global "metrics" }} +{{- include "hpcc.addPrometheusScrapeAnnotations" $.Values.global.metrics | nindent 8 }} +{{- end }} + spec: +{{- include "hpcc.placementsByPodTargetType" (dict "root" $ "pod" $commonCtx.toponame "target" $roxie.name "type" "roxie") | indent 6 }} + serviceAccountName: "hpcc-default" +{{- include "hpcc.addImagePullSecrets" $commonCtx | nindent 6 -}} + containers: + - name: {{ $commonCtx.toponame | quote }} +{{ include "hpcc.addSentinelProbes" $toposerver | indent 8 }} +{{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} +{{- $defaultResources := dict "cpu" "500m" "memory" "200Mi" }} +{{- include "hpcc.addResources" (dict "me" .topoResources "defaults" $defaultResources "root" $) | indent 8 }} +{{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} + workingDir: /var/lib/HPCCSystems + command: [ {{ include "hpcc.componentCommand" (dict "me" $toposerver "root" $ "process" "toposerver") }} ] + args: [ {{- include "hpcc.componentStartArgs" (dict "me" $toposerver "root" $ "process" "toposerver") | nindent 16 }} + {{ include "hpcc.configArg" $toposerver }} + ] + env: +{{ include "hpcc.mergeEnvironments" $env | indent 8 -}} + - name: "SENTINEL" + value: "/tmp/{{ $commonCtx.toponame }}.sentinel" + volumeMounts: +{{ include "hpcc.addConfigMapVolumeMount" $toposerver | indent 8 }} +{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "topo" "name" $commonCtx.toponame "external" false) | indent 8 }} + volumes: +{{ include "hpcc.addConfigMapVolume" $toposerver | indent 6 }} +{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "topo" "name" $commonCtx.toponame "external" false) | indent 6 }} + +--- +{{ include "hpcc.addCertificate" (dict "root" $ "name" $commonCtx.toponame "servicename" $commonCtx.toponame "component" "topo" "external" false) }} +{{ include "hpcc.addUDPCertificate" (dict "root" $ "name" $udpkeyname "component" "udpkey") }} + +--- +{{- range $service := $roxie.services }} +{{- if ne (int $service.servicePort) 0 }} +{{ include "hpcc.addService" ( dict "root" $ "name" $service.name "service" $service "selector" $servername "defaultPort" $service.servicePort) }} +--- +{{- end }} +{{- end }} + +apiVersion: v1 +kind: Service +metadata: + name: {{ $commonCtx.toponame | quote }} + labels: +<<<<<<< HEAD + helmVersion: 9.0.117-closedown0 +{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} +======= + helmVersion: 9.0.117-closedown0 + {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} +>>>>>>> origin/candidate-9.6.x +spec: + ports: + - port: {{ $commonCtx.topoport }} + protocol: TCP + targetPort: {{ $commonCtx.topoport }} + selector: + run: {{ $commonCtx.toponame | quote }} + clusterIP: None # Headless service +--- + +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ printf "%s-internal-traffic" $roxie.name }} +spec: + podSelector: + matchLabels: + roxie-cluster: {{ $roxie.name | quote }} + policyTypes: + - Ingress + - Egress + ingress: + - from: + - podSelector: + matchLabels: + roxie-cluster: {{ $roxie.name | quote }} + egress: + - to: + - podSelector: + matchLabels: + roxie-cluster: {{ $roxie.name | quote }} + +--- +kind: ConfigMap +{{ include "hpcc.generateConfig" ($commonCtx | merge (dict "configMapHelper" "hpcc.roxieConfigMap")) }} +--- +kind: ConfigMap +{{ include "hpcc.generateConfig" ($commonCtx | merge (dict "configMapHelper" "hpcc.roxieTopoConfigMap")) }} +--- + +{{- $_ := set $commonCtx "instanceNames" list -}} +{{ if $roxie.serverReplicas -}} +{{ $_ := set $commonCtx "instanceNames" (list $servername) -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ $servername | quote }} +spec: + replicas: {{ $roxie.serverReplicas }} + selector: + matchLabels: + run: {{ $servername | quote }} + template: + metadata: + labels: + run: {{ $servername | quote }} + server: {{ $roxie.name | quote }} + roxie-cluster: {{ $roxie.name | quote }} + accessDali: "yes" + accessEsp: "yes" +<<<<<<< HEAD + helmVersion: 9.0.117-closedown0 +{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} +{{- if hasKey $.Values.global "metrics" }} +{{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} +{{- end }} +{{- if hasKey $roxie "labels" }} +{{ toYaml $roxie.labels | indent 8 }} +{{- end }} +======= + helmVersion: 9.0.117-closedown0 + {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} +{{- if hasKey $.Values.global "metrics" }} + {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} +{{- end }} +{{- if hasKey $roxie "labels" }} +{{ toYaml $roxie.labels | indent 8 }} +{{- end }} +>>>>>>> origin/candidate-9.6.x + annotations: + checksum/config: {{ $configSHA }} +{{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} +{{- if hasKey $.Values.global "metrics" }} +{{- include "hpcc.addPrometheusScrapeAnnotations" $.Values.global.metrics | nindent 8 }} +{{- end }} +{{- if hasKey $roxie "annotations" }} +{{ toYaml $roxie.annotations | indent 8 }} +{{- end }} + spec: +{{- include "hpcc.placementsByPodTargetType" (dict "root" $ "pod" $servername "target" $roxie.name "type" "roxie") | indent 6 }} + serviceAccountName: "hpcc-default" + initContainers: +{{- include "hpcc.createConfigInitContainers" $commonCtx | indent 6 }} +{{- include "hpcc.addImagePullSecrets" $commonCtx | nindent 6 -}} + terminationGracePeriodSeconds: {{ add ($roxie.agentQueryReleaseDelaySeconds | default 60) 30 }} + containers: + - name: {{ $servername | quote }} + workingDir: /var/lib/HPCCSystems + command: [ {{ include "hpcc.componentCommand" (dict "me" $roxie "root" $ "process" "roxie") }} ] + args: [ {{- include "hpcc.componentStartArgs" (dict "me" $roxie "root" $ "process" "roxie") | nindent 16 }} + {{ include "hpcc.daliArg" (dict "root" $ "component" "Roxie" "optional" false) }}, + "--server=true" + ] + env: +{{ include "hpcc.mergeEnvironments" $env | indent 8 -}} + - name: "SENTINEL" + value: "/tmp/{{ $roxie.name }}.sentinel" +{{- $local := dict "first" true }} +{{- range $service := $roxie.services }} +{{- if ne (int $service.servicePort) 0 }} +{{- if $local.first }} +{{- $_ := set $local "first" false }} + ports: +{{- end }} + - name: {{ $service.name }} + containerPort: {{ $service.servicePort }} +{{- end }} +{{- end }} + lifecycle: + preStop: + exec: + command: ["testsocket", ".", "control:closedown"] +{{ include "hpcc.addSentinelProbes" ( $roxie | merge (dict "readyProbeName" ".ready" )) | indent 8 }} +{{ include "hpcc.addSecurityContext" (dict "root" $ "me" .) | indent 8 }} +{{- include "hpcc.addResources" (dict "me" ($roxie.serverResources | default $roxie.resources) "root" $) | indent 8 }} +{{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} + volumeMounts: +{{ include "hpcc.addConfigMapVolumeMount" $roxie | indent 8 }} +{{ include "hpcc.addVolumeMounts" $commonCtx | indent 8 }} +{{ include "hpcc.addSecretVolumeMounts" $commonCtx | indent 8 }} +{{ include "hpcc.addVaultClientCertificateVolumeMounts" $commonCtx | indent 8 }} +{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "roxie-server" "name" $servername "external" false) | indent 8 }} +{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "roxie-server" "name" $servername "certificate" $roxie.certificate "external" true "includeRemote" true) | indent 8 }} +{{ include "hpcc.addUDPCertificateVolumeMount" (dict "root" $ "component" "udpkey" "name" $udpkeyname ) | indent 8 }} + volumes: +{{ include "hpcc.addConfigMapVolume" $roxie | indent 6 }} +{{ include "hpcc.addVolumes" $commonCtx | indent 6 }} +{{ include "hpcc.addSecretVolumes" $commonCtx | indent 6 }} +{{ include "hpcc.addVaultClientCertificateVolumes" $commonCtx | indent 6 }} +{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "roxie-server" "name" $servername "external" false) | indent 6 }} +{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "roxie-server" "name" $servername "certificate" $roxie.certificate "external" true "includeRemote" true) | indent 6 }} +{{ include "hpcc.addUDPCertificateVolume" (dict "root" $ "component" "udpkey" "name" $udpkeyname) | indent 6 }} + +--- +{{ include "hpcc.addCertificate" (dict "root" $ "name" $servername "services" $roxie.services "component" "roxie-server" "external" false) }} +{{ include "hpcc.addCertificate" (dict "root" $ "name" $servername "services" $roxie.services "component" "roxie-server" "external" true "includeRemote" true) }} +--- +{{ end -}}{{/* if serverReplicas */}} + +{{- $agentPublicCertName := printf "%s-agent" $roxie.name }} +{{ include "hpcc.addCertificate" (dict "root" $ "name" $agentPublicCertName "services" $roxie.services "component" "roxie-agent" "external" true "includeRemote" true) }} + +{{ range $c, $e := until ($commonCtx.numChannels|int) -}} +{{- $channel := add $c 1 -}} +{{- $name := printf "%s-agent-%d" $roxie.name $channel }} +{{- $_ := set $commonCtx "instanceNames" (append $commonCtx.instanceNames $name) }} + +{{ include "hpcc.addCertificate" (dict "root" $ "name" $name "services" $roxie.services "component" "roxie-agent" "external" false) }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ $name | quote}} +spec: + replicas: {{ (hasKey $roxie "replicas") | ternary $roxie.replicas 1 }} + selector: + matchLabels: + run: {{ $name | quote}} + template: + metadata: + labels: +{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-agent" "name" "roxie" "instance" $name) | indent 8 }} + run: {{ $name | quote}} +{{- if not $roxie.serverReplicas }} + server: {{ $servername | quote }} +{{- end }} + roxie-cluster: {{ $roxie.name | quote }} + accessDali: "yes" + accessEsp: "yes" +<<<<<<< HEAD + helmVersion: 9.0.117-closedown0 +{{- if hasKey $.Values.global "metrics" }} +{{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} +{{- end }} +{{- if hasKey $roxie "labels" }} +{{ toYaml $roxie.labels | indent 8 }} +{{- end }} +======= + helmVersion: 9.0.117-closedown0 +{{- if hasKey $.Values.global "metrics" }} + {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} +{{- end }} +{{- if hasKey $roxie "labels" }} +{{ toYaml $roxie.labels | indent 8 }} +{{- end }} +>>>>>>> origin/candidate-9.6.x + annotations: + checksum/config: {{ $configSHA }} +{{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} +{{- if hasKey $.Values.global "metrics" }} +{{- include "hpcc.addPrometheusScrapeAnnotations" $.Values.global.metrics | nindent 8 }} +{{- end }} + spec: +{{- include "hpcc.placementsByPodTargetType" (dict "root" $ "pod" $name "target" $roxie.name "type" "roxie") | indent 6 }} + serviceAccountName: "hpcc-default" + initContainers: +{{- include "hpcc.createConfigInitContainers" $commonCtx | indent 6 }} +{{- include "hpcc.addImagePullSecrets" $commonCtx | nindent 6 -}} + terminationGracePeriodSeconds: {{ add ($roxie.agentQueryReleaseDelaySeconds | default 60) 30 }} + containers: + - name: {{ $name | quote}} + workingDir: /var/lib/HPCCSystems + command: [ {{ include "hpcc.componentCommand" (dict "me" $roxie "root" $ "process" "roxie") }} ] + args: [ {{- include "hpcc.componentStartArgs" (dict "me" $roxie "root" $ "process" "roxie") | nindent 16 }} + {{ include "hpcc.configArg" $roxie }}, + {{ include "hpcc.daliArg" (dict "root" $ "component" "Roxie" "optional" false) }}, + "--channels={{ $channel }}", + "--server={{ not $roxie.serverReplicas }}", + ] + env: +{{ include "hpcc.mergeEnvironments" $env | indent 8 -}} + - name: "SENTINEL" + value: "/tmp/{{ $roxie.name }}.sentinel" +{{- if not $roxie.serverReplicas }} +{{- $local := dict "first" true }} +{{- range $service := $roxie.services }} +{{- if ne (int $service.servicePort) 0 }} +{{- if $local.first }} +{{- $_ := set $local "first" false }} + ports: +{{- end }} + - name: {{ $service.name }} + containerPort: {{ $service.servicePort }} +{{- end }} +{{- end }} + lifecycle: + preStop: + exec: + command: ["testsocket", ".", "control:closedown"] +{{ include "hpcc.addSentinelProbes" ( $roxie | merge (dict "readyProbeName" ".ready" )) | indent 8 }} +{{- end }}{{/* not serverReplicas */}} +{{ include "hpcc.addSecurityContext" (dict "root" $ "me" .) | indent 8 }} +{{- include "hpcc.addResources" (dict "me" ($roxie.channelResources | default $roxie.resources) "root" $) | indent 8 }} +{{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} + volumeMounts: +{{ include "hpcc.addConfigMapVolumeMount" $roxie | indent 8 }} +{{ include "hpcc.addVolumeMounts" $commonCtx | indent 8 }} +{{ include "hpcc.addSecretVolumeMounts" $commonCtx | indent 8 }} +{{ include "hpcc.addVaultClientCertificateVolumeMounts" $commonCtx | indent 8 }} +{{- if not $roxie.serverReplicas }} + +{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "roxie-agent" "name" $name "external" false) | indent 8 }} +{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "roxie-agent" "name" $agentPublicCertName "certificate" $roxie.certificate "external" true "includeRemote" true) | indent 8 }} +{{ include "hpcc.addUDPCertificateVolumeMount" (dict "root" $ "component" "udpkey" "name" $udpkeyname ) | indent 8 }} +{{- end }}{{/* not serverReplicas */}} + + volumes: +{{ include "hpcc.addConfigMapVolume" $roxie | indent 6 }} +{{ include "hpcc.addVolumes" $commonCtx | indent 6 }} +{{ include "hpcc.addSecretVolumes" $commonCtx | indent 6 }} +{{ include "hpcc.addVaultClientCertificateVolumes" $commonCtx | indent 6 }} +{{- if not $roxie.serverReplicas }} +{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "roxie-agent" "name" $name "external" false) | indent 6 }} +{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "roxie-agent" "name" $agentPublicCertName "certificate" $roxie.certificate "external" true "includeRemote" true) | indent 6 }} +{{ include "hpcc.addUDPCertificateVolume" (dict "root" $ "component" "udpkey" "name" $udpkeyname) | indent 6 }} +{{- end }}{{/* not serverReplicas */}} +--- + +{{- end }} +{{- end }}{{/* if not singlenode */}} +--- +{{ include "hpcc.addEgress" (dict "root" $ "me" $roxie "labels" $commonCtx.instanceNames) }} +{{- if hasKey . "hpa" }} +{{- include "hpcc.addHorizontalPodAutoscaler" (dict "name" $roxie.name "kind" "Deployment" "hpa" $roxie.hpa) }} +{{- end }} +{{- end }}{{/* if not disabled */}} +{{- end }}{{/* range */}} + diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index f21fe53a9e3..0b87dce862f 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -52,7 +52,7 @@ spec: run: {{ $serviceName | quote }} server: {{ $serviceName | quote }} accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index e889da23c4a..92fe896c3cd 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -147,7 +147,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -214,7 +214,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -347,7 +347,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -412,7 +412,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.0.115-closedown0 + helmVersion: 9.0.117-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index e588462df44..5eb1da4aa55 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 0 ) -set ( HPCC_POINT 115 ) +set ( HPCC_POINT 117 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-05-17T17:04:28Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-07T16:59:28Z" ) ### From 5a08ab280ce71ed8e88fc27cb03705cebdf30b51 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Fri, 7 Jun 2024 18:01:07 +0100 Subject: [PATCH 029/151] Split off 9.2.94 Signed-off-by: Gavin Halliday --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 9bb52d01957..cffec699dbc 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.2.93-closedown0 +version: 9.2.95-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.2.93-closedown0 +appVersion: 9.2.95-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 0191fd5a91f..e6dd19d96b6 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1405,7 +1405,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 7b73ab073ca..7bdd3cb8574 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index 0eda5998e8b..8b35e6d018f 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 4c13930294f..0e12a34c332 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 056bf663efc..c7216fa0f66 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -133,7 +133,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 305f2bea1d1..1398ba37599 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -140,7 +140,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 8d94e37fde9..fff53b07f76 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index b9412020b9e..92bebaa579e 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -120,7 +120,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 10f841e3ee7..dee41f099e3 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 1983af2627f..05ef4e57b0a 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -178,7 +178,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -240,7 +240,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -346,7 +346,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 2622fefb98f..0c3f8317753 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 7c0fbee21a3..16d0cec1ba3 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -145,7 +145,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -210,7 +210,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -341,7 +341,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -404,7 +404,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.2.93-closedown0 + helmVersion: 9.2.95-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 5626bf07968..e26c90f5321 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 2 ) -set ( HPCC_POINT 93 ) +set ( HPCC_POINT 95 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-05-31T15:16:03Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-07T17:01:06Z" ) ### From 444974ff3f77d78d4c316d9bc7faa8b5f1c9aa72 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Fri, 7 Jun 2024 18:02:37 +0100 Subject: [PATCH 030/151] Split off 9.4.68 Signed-off-by: Gavin Halliday --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 56229305ed3..139a9b83c03 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.4.67-closedown0 +version: 9.4.69-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.4.67-closedown0 +appVersion: 9.4.69-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index ec33dc62786..33b9199de4d 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1519,7 +1519,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index e4a30446fe2..314832ebffc 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index bfc0e3e1540..1974d20224d 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 2bd09732105..c4e0dc5b0ff 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 96162e8bc8f..e50403c215d 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -60,7 +60,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -137,7 +137,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index c1c96ab2bf1..d1f04144b0a 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -143,7 +143,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 5e7880db618..fb4890e011d 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 5d5b9c8f7e1..5b692cf5102 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -122,7 +122,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 4ce7bde8f29..de404448da2 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 40be2c586f4..073b7570de8 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 3235571027a..df851836973 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index b0dcbbfb0f1..a69918afa2b 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -86,7 +86,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -151,7 +151,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -218,7 +218,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -351,7 +351,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -416,7 +416,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.4.67-closedown0 + helmVersion: 9.4.69-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 984982439dc..c2381512fc8 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 4 ) -set ( HPCC_POINT 67 ) +set ( HPCC_POINT 69 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-05-31T15:17:10Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-07T17:02:37Z" ) ### From 384469ea392db267f5fd6cc9d44961fcb49bc5b3 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Fri, 7 Jun 2024 18:04:00 +0100 Subject: [PATCH 031/151] Split off 9.6.20 Signed-off-by: Gavin Halliday --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 2e12be1ab82..13fae5887dc 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.6.19-closedown0 +version: 9.6.21-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.6.19-closedown0 +appVersion: 9.6.21-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index ff08cdd0e7f..d8543620659 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1523,7 +1523,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 8b7230b2acd..ff382b69d37 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index bb93c173bab..dd60aaedcd8 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 1e83898efa6..f272ffb018d 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index ca4d4e7b8d4..3a4910ed5ac 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -139,7 +139,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index a3978595efb..126e2291259 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -147,7 +147,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 540178fa8e6..28b723dbf1b 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index f0bed901493..85556052525 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -125,7 +125,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 37c03ea71fd..4db571144f9 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index c3e555c856d..933b60193be 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index e04067229d1..72a15b9ec34 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 5e14f86d71a..e16ad6a6be1 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -88,7 +88,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -153,7 +153,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -220,7 +220,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -353,7 +353,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -418,7 +418,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.6.19-closedown0 + helmVersion: 9.6.21-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 53bfbf37dd3..93195ddb8d2 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 6 ) -set ( HPCC_POINT 19 ) +set ( HPCC_POINT 21 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-05-31T15:18:09Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-07T17:04:00Z" ) ### From 7d90f9445479d6745e2da32362f5601d32ac0f37 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Mon, 10 Jun 2024 08:41:21 +0100 Subject: [PATCH 032/151] HPCC-32029 Suppress coverity false-positive about unreachable code Signed-off-by: Gavin Halliday --- testing/unittests/jlibtests.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/unittests/jlibtests.cpp b/testing/unittests/jlibtests.cpp index c98b58521b5..d5cca3b2864 100644 --- a/testing/unittests/jlibtests.cpp +++ b/testing/unittests/jlibtests.cpp @@ -3191,7 +3191,7 @@ class AtomicTimingStressTest : public CppUnit::TestFixture unsigned __int64 run(const char * title, unsigned numThreads, unsigned numIterations) { value1 = 0; - for (unsigned ix = 1; ix < NUMVALUES; ix++) + for (unsigned ix = 0; ix < NUMVALUES; ix++) extraValues[ix] = 0; for (unsigned i = 0; i < numThreads; i++) { From 3d7a9628f3742ad945969b9a6b624549d0e3685a Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Mon, 10 Jun 2024 12:38:48 +0100 Subject: [PATCH 033/151] HPCC-32032 Avoid locking a workunit when resolving logical files Signed-off-by: Gavin Halliday --- .../ws_workunits/ws_workunitsQuerySets.cpp | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/esp/services/ws_workunits/ws_workunitsQuerySets.cpp b/esp/services/ws_workunits/ws_workunitsQuerySets.cpp index 7b4cd4bf358..897d52b65f1 100644 --- a/esp/services/ws_workunits/ws_workunitsQuerySets.cpp +++ b/esp/services/ws_workunits/ws_workunitsQuerySets.cpp @@ -349,20 +349,24 @@ void QueryFilesInUse::loadTarget(IPropertyTree *t, const char *target, unsigned queryTree = NULL; } - Owned factory = getWorkUnitFactory(); - Owned cw = factory->openWorkUnit(wuid); - if (!cw) - continue; + Owned wufiles; + //Only lock the workunit while the information is being gathered - not when files are resolved + { + Owned factory = getWorkUnitFactory(); + Owned cw = factory->openWorkUnit(wuid); + if (!cw) + continue; - queryTree = targetTree->addPropTree("Query", createPTree("Query")); - queryTree->setProp("@target", target); //for reference when searching across targets - queryTree->setProp("@id", queryid); - if (pkgid && *pkgid) - queryTree->setProp("@pkgid", pkgid); + queryTree = targetTree->addPropTree("Query", createPTree("Query")); + queryTree->setProp("@target", target); //for reference when searching across targets + queryTree->setProp("@id", queryid); + if (pkgid && *pkgid) + queryTree->setProp("@pkgid", pkgid); - IUserDescriptor **roxieUser = roxieUserMap.getValue(target); - Owned wufiles = createReferencedFileList(roxieUser ? *roxieUser : NULL, true, true); - wufiles->addFilesFromQuery(cw, pm, queryid); + IUserDescriptor **roxieUser = roxieUserMap.getValue(target); + wufiles.setown(createReferencedFileList(roxieUser ? *roxieUser : NULL, true, true)); + wufiles->addFilesFromQuery(cw, pm, queryid); + } if (aborting) return; StringArray locations; From 9cf5fc828c57ffc08955f29657881ae77d499620 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Mon, 10 Jun 2024 13:42:59 +0100 Subject: [PATCH 034/151] HPCC-32033 Cleanup code - #ifndef on mis-spelt identifier Signed-off-by: Gavin Halliday --- thorlcr/master/thmastermain.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/thorlcr/master/thmastermain.cpp b/thorlcr/master/thmastermain.cpp index 15799a5c593..ad0f1dd0f29 100644 --- a/thorlcr/master/thmastermain.cpp +++ b/thorlcr/master/thmastermain.cpp @@ -67,9 +67,7 @@ #include "thexception.hpp" #include "thmem.hpp" -#ifndef _CONTAINERIED #define DEFAULT_QUERY_SO_DIR "sodir" -#endif #define MAX_SLAVEREG_DELAY 60*1000*15 // 15 mins #define SLAVEREG_VERIFY_DELAY 5*1000 #define SHUTDOWN_IN_PARALLEL 20 From b51c1c0702b307c6a8badaad131449656652c21c Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Mon, 10 Jun 2024 13:18:31 +0100 Subject: [PATCH 035/151] HPCC-32021 Bump kubectl version to avoid (spurious) scan warning Signed-off-by: Gavin Halliday --- dockerfiles/platform-build/Dockerfile | 2 +- dockerfiles/vcpkg/platform-core-ubuntu-22.04.dockerfile | 2 +- dockerfiles/vcpkg/platform-core-ubuntu-22.04/Dockerfile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dockerfiles/platform-build/Dockerfile b/dockerfiles/platform-build/Dockerfile index df535df8a63..e5cdbd6b948 100644 --- a/dockerfiles/platform-build/Dockerfile +++ b/dockerfiles/platform-build/Dockerfile @@ -44,7 +44,7 @@ RUN groupadd -g 10001 hpcc RUN useradd -s /bin/bash -r -m -N -c "hpcc runtime User" -u 10000 -g hpcc hpcc RUN passwd -l hpcc -RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.27.6/bin/linux/amd64/kubectl && chmod +x ./kubectl && mv ./kubectl /usr/local/bin +RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.28.10/bin/linux/amd64/kubectl && chmod +x ./kubectl && mv ./kubectl /usr/local/bin WORKDIR /hpcc-dev RUN mkdir build diff --git a/dockerfiles/vcpkg/platform-core-ubuntu-22.04.dockerfile b/dockerfiles/vcpkg/platform-core-ubuntu-22.04.dockerfile index ff20fac4848..9e76ce3b73f 100644 --- a/dockerfiles/vcpkg/platform-core-ubuntu-22.04.dockerfile +++ b/dockerfiles/vcpkg/platform-core-ubuntu-22.04.dockerfile @@ -59,7 +59,7 @@ RUN apt-get install -y \ gdb \ nano -RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.27.6/bin/linux/amd64/kubectl && \ +RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.28.10/bin/linux/amd64/kubectl && \ chmod +x ./kubectl && \ mv ./kubectl /usr/local/bin diff --git a/dockerfiles/vcpkg/platform-core-ubuntu-22.04/Dockerfile b/dockerfiles/vcpkg/platform-core-ubuntu-22.04/Dockerfile index e5bd56d89f4..af9402919ff 100644 --- a/dockerfiles/vcpkg/platform-core-ubuntu-22.04/Dockerfile +++ b/dockerfiles/vcpkg/platform-core-ubuntu-22.04/Dockerfile @@ -58,7 +58,7 @@ RUN apt-get install -y \ gdb \ nano -RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.28.4/bin/linux/amd64/kubectl && \ +RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.28.10/bin/linux/amd64/kubectl && \ chmod +x ./kubectl && \ mv ./kubectl /usr/local/bin From 2717ba5e417542a96a0c2d85ab92c222d5b6805b Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Mon, 10 Jun 2024 17:50:14 +0100 Subject: [PATCH 036/151] HPCC-31844 Fix dll cache in containerized setup The manager's query dll cache was not being initialized in containerized, which meant that it could cache an indeterminate amount of query dlls. However, the workers did initialize their cache, meaning that their copies would be deleted as expected. The net result was that the manager would consider it cached and not resend a dll, causing the slave to fail since its copy had been deleted. Signed-off-by: Jake Smith --- thorlcr/master/thgraphmanager.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/thorlcr/master/thgraphmanager.cpp b/thorlcr/master/thgraphmanager.cpp index d34ed50c6b8..5eb01feaca4 100644 --- a/thorlcr/master/thgraphmanager.cpp +++ b/thorlcr/master/thgraphmanager.cpp @@ -372,6 +372,16 @@ CJobManager::CJobManager(ILogMsgHandler *_logHandler) : logHandler(_logHandler) activeTasks = 0; setJobManager(this); debugListener.setown(new CThorDebugListener(*this)); + + StringBuffer soPath; + globals->getProp("@query_so_dir", soPath); + StringBuffer soPattern("*."); +#ifdef _WIN32 + soPattern.append("dll"); +#else + soPattern.append("so"); +#endif + querySoCache.init(soPath.str(), DEFAULT_QUERYSO_LIMIT, soPattern); } CJobManager::~CJobManager() @@ -582,16 +592,6 @@ void CJobManager::run() setWuid(NULL); #ifndef _CONTAINERIZED - StringBuffer soPath; - globals->getProp("@query_so_dir", soPath); - StringBuffer soPattern("*."); -#ifdef _WIN32 - soPattern.append("dll"); -#else - soPattern.append("so"); -#endif - querySoCache.init(soPath.str(), DEFAULT_QUERYSO_LIMIT, soPattern); - SCMStringBuffer _queueNames; const char *thorName = globals->queryProp("@name"); if (!thorName) thorName = "thor"; From 9846be0f2a99f3b14d68bab0e6e0241ab658a34c Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Mon, 10 Jun 2024 15:09:14 -0400 Subject: [PATCH 037/151] HPCC-32030 ECL Watch v9 fix multi ESP calls on Query details Fix duplicate calls to /WsWorkunits/WUQueryDetails on Query detail page. Also fixes an issue when clicking the Save button, where deactivating a Query failed due to the "Name" param being missing in the request. Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- esp/src/src-react/components/QueryDetails.tsx | 10 +++++--- esp/src/src-react/components/QuerySummary.tsx | 24 +++++++++---------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/esp/src/src-react/components/QueryDetails.tsx b/esp/src/src-react/components/QueryDetails.tsx index af9ef819876..bbd2e3e969c 100644 --- a/esp/src/src-react/components/QueryDetails.tsx +++ b/esp/src/src-react/components/QueryDetails.tsx @@ -36,19 +36,23 @@ export const QueryDetails: React.FunctionComponent = ({ const [logicalFileCount, setLogicalFileCount] = React.useState(0); const [superFileCount, setSuperFileCount] = React.useState(0); const [libsUsedCount, setLibsUsedCount] = React.useState(0); + const [suspended, setSuspended] = React.useState(false); + const [activated, setActivated] = React.useState(false); React.useEffect(() => { setQuery(ESPQuery.Get(querySet, queryId)); }, [setQuery, queryId, querySet]); React.useEffect(() => { - query?.getDetails().then(({ WUQueryDetailsResponse }) => { + query?.getDetails().then(() => { setWuid(query.Wuid); setLogicalFileCount(query.LogicalFiles?.Item?.length); setSuperFileCount(query.SuperFiles?.SuperFile?.length); setLibsUsedCount(query.LibrariesUsed?.Item?.length); + setActivated(query.Activated); + setSuspended(query.Suspended); }); - }, [query, setLogicalFileCount, setSuperFileCount, setLibsUsedCount]); + }, [query]); const onTabSelect = React.useCallback((tab: TabInfo) => { switch (tab.id) { @@ -100,7 +104,7 @@ export const QueryDetails: React.FunctionComponent = ({
- + diff --git a/esp/src/src-react/components/QuerySummary.tsx b/esp/src/src-react/components/QuerySummary.tsx index 9e9869ddb95..1ce2df2109c 100644 --- a/esp/src/src-react/components/QuerySummary.tsx +++ b/esp/src/src-react/components/QuerySummary.tsx @@ -15,17 +15,20 @@ const logger = scopedLogger("../components/QuerySummary.tsx"); interface QuerySummaryProps { querySet: string; queryId: string; + isSuspended?: boolean; + isActivated?: boolean; } export const QuerySummary: React.FunctionComponent = ({ querySet, - queryId + queryId, + isSuspended = false, + isActivated = false }) => { const [query, setQuery] = React.useState(); - const [wuid, setWuid] = React.useState(""); - const [suspended, setSuspended] = React.useState(false); - const [activated, setActivated] = React.useState(false); + const [suspended, setSuspended] = React.useState(isSuspended); + const [activated, setActivated] = React.useState(isActivated); const [DeleteConfirm, setShowDeleteConfirm] = useConfirm({ title: nlsHPCC.Delete, @@ -57,12 +60,9 @@ export const QuerySummary: React.FunctionComponent = ({ }, [queryId, querySet]); React.useEffect(() => { - query?.getDetails().then(({ WUQueryDetailsResponse }) => { - setWuid(query?.Wuid); - setSuspended(query.Suspended); - setActivated(query.Activated); - }); - }, [query]); + setActivated(isActivated); + setSuspended(isSuspended); + }, [isActivated, isSuspended]); const buttons = React.useMemo((): ICommandBarItemProps[] => [ { @@ -73,7 +73,7 @@ export const QuerySummary: React.FunctionComponent = ({ { key: "save", text: nlsHPCC.Save, iconProps: { iconName: "Save" }, disabled: !canSave, onClick: () => { - const selection = [{ QuerySetId: querySet, Id: queryId }]; + const selection = [{ QuerySetId: querySet, Id: queryId, Name: query?.QueryName }]; const actions = []; if (suspended !== query?.Suspended) { actions.push(WsWorkunits.WUQuerysetQueryAction(selection, suspended ? "Suspend" : "Unsuspend")); @@ -125,7 +125,7 @@ export const QuerySummary: React.FunctionComponent = ({ }} /> {nlsHPCC.Workunit} {nlsHPCC.Other} From f46cadacd0174a50a1165a4959c5ddcaf04b6706 Mon Sep 17 00:00:00 2001 From: M Kelly Date: Mon, 10 Jun 2024 16:03:17 -0400 Subject: [PATCH 038/151] HPCC-28461 Add listen queue limit to dafilesrv Signed-off-by: M Kelly --- fs/dafilesrv/dafilesrv.cpp | 8 ++++++-- fs/dafsserver/dafsserver.cpp | 16 ++++++++-------- fs/dafsserver/dafsserver.hpp | 18 +----------------- system/jlib/jsocket.hpp | 4 ++++ 4 files changed, 19 insertions(+), 27 deletions(-) diff --git a/fs/dafilesrv/dafilesrv.cpp b/fs/dafilesrv/dafilesrv.cpp index 48e06a24458..34e875fa315 100644 --- a/fs/dafilesrv/dafilesrv.cpp +++ b/fs/dafilesrv/dafilesrv.cpp @@ -707,6 +707,10 @@ int main(int argc, const char* argv[]) dedicatedRowServiceSSL = dafileSrvInstance->getPropBool("@rowServiceSSL", dedicatedRowServiceSSL); rowServiceOnStdPort = dafileSrvInstance->getPropBool("@rowServiceOnStdPort", rowServiceOnStdPort); + unsigned listenQueueLimit = dafileSrvInstance->getPropInt("@maxBacklogQueueSize", DEFAULT_LISTEN_QUEUE_SIZE); + // NB: could check getComponentConfig()->getPropInt("expert/@maxBacklogQueueSize", DEFAULT_LISTEN_QUEUE_SIZE); + // but many other components have their own explcit setting for this ... + installDefaultFileHooks(dafileSrvInstance); #ifndef _CONTAINERIZED @@ -843,7 +847,7 @@ int main(int argc, const char* argv[]) { SocketEndpoint rowServiceEp(listenep); // copy listenep, incase bound by -addr rowServiceEp.port = dedicatedRowServicePort; - server->run(config, connectMethod, listenep, sslport, &rowServiceEp, dedicatedRowServiceSSL, rowServiceOnStdPort); + server->run(config, connectMethod, listenep, sslport, listenQueueLimit, &rowServiceEp, dedicatedRowServiceSSL, rowServiceOnStdPort); } else server->run(config, connectMethod, listenep, sslport); @@ -931,7 +935,7 @@ int main(int argc, const char* argv[]) { SocketEndpoint rowServiceEp(listenep); // copy listenep, incase bound by -addr rowServiceEp.port = dedicatedRowServicePort; - server->run(config, connectMethod, listenep, sslport, &rowServiceEp, dedicatedRowServiceSSL, rowServiceOnStdPort); + server->run(config, connectMethod, listenep, sslport, listenQueueLimit, &rowServiceEp, dedicatedRowServiceSSL, rowServiceOnStdPort); } else server->run(config, connectMethod, listenep, sslport); diff --git a/fs/dafsserver/dafsserver.cpp b/fs/dafsserver/dafsserver.cpp index 9e788fe4447..375f7e03e70 100644 --- a/fs/dafsserver/dafsserver.cpp +++ b/fs/dafsserver/dafsserver.cpp @@ -3629,7 +3629,7 @@ class CRemoteFileServer : implements IRemoteFileServer, public CInterface IMPLEMENT_IINTERFACE CRemoteFileServer(unsigned maxThreads, unsigned maxThreadsDelayMs, unsigned maxAsyncCopy, IPropertyTree *_keyPairInfo) - : asyncCommandManager(maxAsyncCopy), stdCmdThrottler("stdCmdThrotlter"), slowCmdThrottler("slowCmdThrotlter"), keyPairInfo(_keyPairInfo) + : asyncCommandManager(maxAsyncCopy), stdCmdThrottler("stdCmdThrottler"), slowCmdThrottler("slowCmdThrottler"), keyPairInfo(_keyPairInfo) { lasthandle = 0; selecthandler.setown(createSocketSelectHandler(NULL)); @@ -5288,7 +5288,7 @@ class CRemoteFileServer : implements IRemoteFileServer, public CInterface handleTracer.traceIfReady(); } - virtual void run(IPropertyTree *componentConfig, DAFSConnectCfg _connectMethod, const SocketEndpoint &listenep, unsigned sslPort, const SocketEndpoint *rowServiceEp, bool _rowServiceSSL, bool _rowServiceOnStdPort) override + virtual void run(IPropertyTree *componentConfig, DAFSConnectCfg _connectMethod, const SocketEndpoint &listenep, unsigned sslPort, unsigned listenQueueLimit, const SocketEndpoint *rowServiceEp, bool _rowServiceSSL, bool _rowServiceOnStdPort) override { SocketEndpoint sslep(listenep); #ifndef _CONTAINERIZED @@ -5305,12 +5305,12 @@ class CRemoteFileServer : implements IRemoteFileServer, public CInterface throw createDafsException(DAFSERR_serverinit_failed, "dafilesrv port not specified"); if (listenep.isNull()) - acceptSock.setown(ISocket::create(listenep.port)); + acceptSock.setown(ISocket::create(listenep.port, listenQueueLimit)); else { StringBuffer ips; listenep.getHostText(ips); - acceptSock.setown(ISocket::create_ip(listenep.port,ips.str())); + acceptSock.setown(ISocket::create_ip(listenep.port, ips.str(), listenQueueLimit)); } } @@ -5342,12 +5342,12 @@ class CRemoteFileServer : implements IRemoteFileServer, public CInterface #endif if (sslep.isNull()) - secureSock.setown(ISocket::create(sslep.port)); + secureSock.setown(ISocket::create(sslep.port, listenQueueLimit)); else { StringBuffer ips; sslep.getHostText(ips); - secureSock.setown(ISocket::create_ip(sslep.port,ips.str())); + secureSock.setown(ISocket::create_ip(sslep.port, ips.str(), listenQueueLimit)); } } @@ -5357,12 +5357,12 @@ class CRemoteFileServer : implements IRemoteFileServer, public CInterface rowServiceOnStdPort = _rowServiceOnStdPort; if (rowServiceEp->isNull()) - rowServiceSock.setown(ISocket::create(rowServiceEp->port)); + rowServiceSock.setown(ISocket::create(rowServiceEp->port, listenQueueLimit)); else { StringBuffer ips; rowServiceEp->getHostText(ips); - rowServiceSock.setown(ISocket::create_ip(rowServiceEp->port, ips.str())); + rowServiceSock.setown(ISocket::create_ip(rowServiceEp->port, ips.str(), listenQueueLimit)); } #ifndef _CONTAINERIZED diff --git a/fs/dafsserver/dafsserver.hpp b/fs/dafsserver/dafsserver.hpp index f5cafb7abac..9d239a15116 100644 --- a/fs/dafsserver/dafsserver.hpp +++ b/fs/dafsserver/dafsserver.hpp @@ -31,22 +31,6 @@ #define DAFILESRV_STREAMREAD_MINVERSION 22 #define DAFILESRV_STREAMGENERAL_MINVERSION 25 -// RemoteFileServer throttling defaults -#define DEFAULT_THREADLIMIT 100 -#define DEFAULT_THREADLIMITDELAYMS (60*1000) -#define DEFAULT_ASYNCCOPYMAX 10 - -#define DEFAULT_STDCMD_PARALLELREQUESTLIMIT 80 -#define DEFAULT_STDCMD_THROTTLEDELAYMS 1000 -#define DEFAULT_STDCMD_THROTTLECPULIMIT 85 -#define DEFAULT_STDCMD_THROTTLEQUEUELIMIT 1000 - -#define DEFAULT_SLOWCMD_PARALLELREQUESTLIMIT 20 -#define DEFAULT_SLOWCMD_THROTTLEDELAYMS 5000 -#define DEFAULT_SLOWCMD_THROTTLECPULIMIT 75 -#define DEFAULT_SLOWCMD_THROTTLEQUEUELIMIT 1000 - - enum RowServiceCfg { rs_off, // No dedicated row service, allows row service commands on std. dafilesrv port. @@ -58,7 +42,7 @@ enum RowServiceCfg interface IRemoteFileServer : extends IInterface { - virtual void run(IPropertyTree *componentConfig, DAFSConnectCfg connectMethod, const SocketEndpoint &listenep, unsigned sslPort=0, const SocketEndpoint *rowServiceEp=nullptr, bool rowServiceSSL=false, bool rowServiceOnStdPort=true) = 0; + virtual void run(IPropertyTree *componentConfig, DAFSConnectCfg connectMethod, const SocketEndpoint &listenep, unsigned sslPort=0, unsigned listenQueueLimit=DEFAULT_LISTEN_QUEUE_SIZE, const SocketEndpoint *rowServiceEp=nullptr, bool rowServiceSSL=false, bool rowServiceOnStdPort=true) = 0; virtual void run(IPropertyTree *componentConfig, DAFSConnectCfg _connectMethod, ISocket *listenSocket, ISocket *secureSocket, ISocket *rowServiceSocket) = 0; virtual void stop() = 0; virtual unsigned idleTime() = 0; // in ms diff --git a/system/jlib/jsocket.hpp b/system/jlib/jsocket.hpp index dec2268a56b..bf91c86a3b7 100644 --- a/system/jlib/jsocket.hpp +++ b/system/jlib/jsocket.hpp @@ -34,7 +34,11 @@ # define _TRACELINKCLOSED #endif +#ifdef _WIN32 #define DEFAULT_LISTEN_QUEUE_SIZE 200 // maximum for windows 2000 server +#else +#define DEFAULT_LISTEN_QUEUE_SIZE 600 +#endif #define DEFAULT_LINGER_TIME 1000 // seconds #ifndef WAIT_FOREVER #define WAIT_FOREVER ((unsigned)-1) From 14357e88c53bb18273581e1a1aa2ea8e703efa45 Mon Sep 17 00:00:00 2001 From: Terrence Asselin Date: Mon, 11 Mar 2024 11:40:10 -0500 Subject: [PATCH 039/151] HPCC-31394 WsSMC send roxie control cmd to ssl port if available Rather than default to port 9876, use the roxie farmer ports configured for sending control messages to. Prefer an ssl port if available. Update the smart pointer factory and ctors where required to accept a tls config from bare-metal configurations. These configurations using transitional APIs don't support all the properties and sync features of the containerized versions. A separate ticket would be required to determine if and how to add the full complement of tls related configuration, behavior and secrets support to bare metal that is currently in place in containerized code. Signed-off-by: Terrence Asselin --- esp/services/ws_ecl/ws_ecl_service.cpp | 14 ++-- esp/services/ws_smc/ws_smcService.cpp | 14 ++-- esp/smc/SMCLib/TpContainer.cpp | 5 ++ esp/smc/SMCLib/TpWrapper.cpp | 70 +++++++++++++++++++ esp/smc/SMCLib/TpWrapper.hpp | 1 + roxie/roxiepipe/roxiepipe.cpp | 2 +- system/jlib/jsmartsock.cpp | 12 +++- system/jlib/jsmartsock.ipp | 2 +- system/security/securesocket/securesocket.cpp | 6 +- system/security/securesocket/securesocket.hpp | 2 +- 10 files changed, 107 insertions(+), 21 deletions(-) diff --git a/esp/services/ws_ecl/ws_ecl_service.cpp b/esp/services/ws_ecl/ws_ecl_service.cpp index 9a2db171a3d..61fc71b08ac 100644 --- a/esp/services/ws_ecl/ws_ecl_service.cpp +++ b/esp/services/ws_ecl/ws_ecl_service.cpp @@ -11,6 +11,7 @@ #include "xsdparser.hpp" #include "httpclient.hpp" #include "jsonhelpers.hpp" +#include "securesocket.hpp" #define SDS_LOCK_TIMEOUT (5*60*1000) // 5mins, 30s a bit short @@ -211,9 +212,8 @@ class WsEclSocketFactory : public CSmartSocketFactory { } - WsEclSocketFactory(const char *_socklist, bool _retry, bool includeTarget, const char *_alias, unsigned _dnsInterval, bool useTls) : CSmartSocketFactory(_socklist, _retry, 60, _dnsInterval), includeTargetInURL(includeTarget), alias(_alias) + WsEclSocketFactory(const char *_socklist, IPropertyTree *_tlsConfig, bool _retry, bool includeTarget, const char *_alias, unsigned _dnsInterval) : CSmartSocketFactory(_socklist, _tlsConfig, _retry, 60, _dnsInterval), includeTargetInURL(includeTarget), alias(_alias) { - tlsService = useTls; } }; @@ -260,7 +260,8 @@ void initBareMetalRoxieTargets(MapStringToMyClass &connMap, const char *vip = NULL; bool includeTargetInURL = true; unsigned dnsInterval = (unsigned) -1; - bool useTls = false; + + Owned tlsConfig; if (vips) { IPropertyTree *pc = vips->queryPropTree(xpath.clear().appendf("ProcessCluster[@name='%s']", process.str())); @@ -269,7 +270,8 @@ void initBareMetalRoxieTargets(MapStringToMyClass &connMap, vip = pc->queryProp("@vip"); includeTargetInURL = pc->getPropBool("@includeTargetInURL", true); dnsInterval = (unsigned) pc->getPropInt("@dnsInterval", -1); - useTls = pc->getPropBool("@tls", false); + if (pc->getPropBool("@tls", false)) + tlsConfig.setown(createSecureSocketConfig(nullptr, nullptr, nullptr)); } } StringBuffer list; @@ -297,7 +299,7 @@ void initBareMetalRoxieTargets(MapStringToMyClass &connMap, farmerPort = port; const char *protocol = farmer.queryProp("@protocol"); if (protocol && streq(protocol, "ssl")) - useTls = true; + tlsConfig.setown(createSecureSocketConfig(farmer.queryProp("@certificateFileName"), farmer.queryProp("@privateKeyFileName"), nullptr)); break; //use the first one without port==0 } Owned servers = roxieCluster->getElements("RoxieServerProcess"); @@ -308,7 +310,7 @@ void initBareMetalRoxieTargets(MapStringToMyClass &connMap, if (list.length()) { StringAttr alias(clusterInfo->getAlias()); - Owned sf = new WsEclSocketFactory(list.str(), !loadBalanced, includeTargetInURL, loadBalanced ? alias.str() : NULL, dnsInterval, useTls); + Owned sf = new WsEclSocketFactory(list.str(), tlsConfig, !loadBalanced, includeTargetInURL, loadBalanced ? alias.str() : NULL, dnsInterval); connMap.setValue(target.str(), sf.get()); if (alias.length() && !connMap.getValue(alias.str())) //only need one vip per alias for routing purposes connMap.setValue(alias.str(), sf.get()); diff --git a/esp/services/ws_smc/ws_smcService.cpp b/esp/services/ws_smc/ws_smcService.cpp index e7b35f99a4b..89169dc29b9 100644 --- a/esp/services/ws_smc/ws_smcService.cpp +++ b/esp/services/ws_smc/ws_smcService.cpp @@ -143,6 +143,8 @@ void CWsSMCEx::init(IPropertyTree *cfg, const char *process, const char *service #ifdef _CONTAINERIZED initContainerRoxieTargets(roxieConnMap); +#else + initBareMetalRoxieTargets(roxieConnMap); #endif xpath.setf("Software/EspProcess[@name=\"%s\"]/EspService[@name=\"%s\"]/ActivityInfoCacheSeconds", process, service); @@ -2305,18 +2307,18 @@ bool CWsSMCEx::onRoxieControlCmd(IEspContext &context, IEspRoxieControlCmdReques if (isEmptyString(process)) throw makeStringException(ECLWATCH_MISSING_PARAMS, "Process cluster not specified."); - SocketEndpointArray addrs; - getRoxieProcessServers(process, addrs); - if (!addrs.length()) - throw makeStringException(ECLWATCH_CANNOT_GET_ENV_INFO, "Process cluster not found."); - Owned controlResp = sendRoxieControlAllNodes(addrs.item(0), controlReq, true, req.getWait()); + ISmartSocketFactory *conn = roxieConnMap.getValue(process); + if (!conn) + throw makeStringExceptionV(ECLWATCH_CANNOT_GET_ENV_INFO, "Connection info for '%s' process cluster not found.", process); + + Owned controlResp = sendRoxieControlAllNodes(conn, controlReq, true, req.getWait(), ROXIECONNECTIONTIMEOUT); #else const char *target = req.getTargetCluster(); if (isEmptyString(target)) target = req.getProcessCluster(); //backward compatible if (isEmptyString(target)) throw makeStringException(ECLWATCH_MISSING_PARAMS, "Target cluster not specified."); - + ISmartSocketFactory *conn = roxieConnMap.getValue(target); if (!conn) throw makeStringExceptionV(ECLWATCH_CANNOT_GET_ENV_INFO, "roxie target cluster not mapped: %s", target); diff --git a/esp/smc/SMCLib/TpContainer.cpp b/esp/smc/SMCLib/TpContainer.cpp index e93ce41a33f..22762414256 100644 --- a/esp/smc/SMCLib/TpContainer.cpp +++ b/esp/smc/SMCLib/TpContainer.cpp @@ -694,6 +694,11 @@ extern TPWRAPPER_API void initContainerRoxieTargets(MapStringToMyClass& connMap) +{ + IWARNLOG("UNIMPLEMENTED: CONTAINERIZED(CTpWrapper::initBareMetalRoxieTargets)"); +} + extern TPWRAPPER_API void getRoxieTargetsSupportingPublishedQueries(StringArray& names) { Owned queues = getComponentConfigSP()->getElements("queues[@type='roxie']"); diff --git a/esp/smc/SMCLib/TpWrapper.cpp b/esp/smc/SMCLib/TpWrapper.cpp index 61f1bda67d4..1ad7022709d 100644 --- a/esp/smc/SMCLib/TpWrapper.cpp +++ b/esp/smc/SMCLib/TpWrapper.cpp @@ -2093,6 +2093,76 @@ extern TPWRAPPER_API void initContainerRoxieTargets(MapStringToMyClass& connMap) +{ + Owned factory = getEnvironmentFactory(false); + Owned env = factory->openEnvironment(); + Owned envRoot = &env->getPTree(); + + Owned roxieClusters = envRoot->getElements("Software/RoxieCluster"); + ForEach(*roxieClusters) + { + IPropertyTree& roxieCluster = roxieClusters->query(); + const char* name = roxieCluster.queryProp("@name"); + if (isEmptyString(name)) + continue; + + StringBuffer addressList; + StringBuffer port(""); + Owned tlsConfig = createPTree("none"); + + Owned roxieFarms = roxieCluster.getElements("RoxieFarmProcess"); + ForEach(*roxieFarms) + { + IPropertyTree& farm = roxieFarms->query(); + const char* farmPort = farm.queryProp("@port"); + if (!isEmptyString(farmPort) && !streq(farmPort, "0")) + { + const char *protocol = farm.queryProp("@protocol"); + if (!isEmptyString(protocol) && strieq(protocol, "ssl")) + { + port.set(farmPort); + tlsConfig.setown(createSecureSocketConfig(farm.queryProp("@certificateFileName"), farm.queryProp("@privateKeyFileName"), nullptr)); + break; + } + else if (isEmptyString(port.str())) + { + port.set(farmPort); + } + } + } + + Owned roxieServers = roxieCluster.getElements("RoxieServerProcess"); + ForEach(*roxieServers) + appendServerAddress(addressList, *envRoot, roxieServers->query(), port.str()); + + Owned sf = streq(tlsConfig->queryName(), "ssl") ? createSecureSmartSocketFactory(addressList, tlsConfig) : createSmartSocketFactory(addressList); + connMap.setValue(name, sf.get()); + } +} + extern TPWRAPPER_API void getRoxieTargetsSupportingPublishedQueries(StringArray& names) { CConstWUClusterInfoArray clusters; diff --git a/esp/smc/SMCLib/TpWrapper.hpp b/esp/smc/SMCLib/TpWrapper.hpp index 25853a633ed..e239a920b4d 100644 --- a/esp/smc/SMCLib/TpWrapper.hpp +++ b/esp/smc/SMCLib/TpWrapper.hpp @@ -228,6 +228,7 @@ extern TPWRAPPER_API unsigned getWUClusterInfo(CConstWUClusterInfoArray& cluster extern TPWRAPPER_API IConstWUClusterInfo* getWUClusterInfoByName(const char* clustName); extern TPWRAPPER_API void initContainerRoxieTargets(MapStringToMyClass& connMap); +extern TPWRAPPER_API void initBareMetalRoxieTargets(MapStringToMyClass& connMap); extern TPWRAPPER_API unsigned getThorClusterNames(StringArray& targetNames, StringArray& queueNames); extern TPWRAPPER_API void getRoxieTargetsSupportingPublishedQueries(StringArray& names); extern TPWRAPPER_API void validateTargetName(const char* target); diff --git a/roxie/roxiepipe/roxiepipe.cpp b/roxie/roxiepipe/roxiepipe.cpp index 4a9e700b6a0..5599cbd2df4 100644 --- a/roxie/roxiepipe/roxiepipe.cpp +++ b/roxie/roxiepipe/roxiepipe.cpp @@ -677,7 +677,7 @@ int main(int argc, char *argv[]) { #ifdef _USE_OPENSSL if (useSSL) - smartSocketFactory = createSecureSmartSocketFactory(hosts.str(), retryMode); + smartSocketFactory = createSecureSmartSocketFactory(hosts.str(), createSecureSocketConfig(nullptr, nullptr, nullptr), retryMode); else #endif smartSocketFactory = createSmartSocketFactory(hosts.str(), retryMode); diff --git a/system/jlib/jsmartsock.cpp b/system/jlib/jsmartsock.cpp index ce96fe254d9..e2a080e7a51 100644 --- a/system/jlib/jsmartsock.cpp +++ b/system/jlib/jsmartsock.cpp @@ -242,13 +242,19 @@ CSmartSocketFactory::CSmartSocketFactory(IPropertyTree &service, bool _retry, un } } -CSmartSocketFactory::CSmartSocketFactory(const char *_socklist, bool _retry, unsigned _retryInterval, unsigned _dnsInterval) +CSmartSocketFactory::CSmartSocketFactory(const char *_socklist, IPropertyTree* _tlsConfig, bool _retry, unsigned _retryInterval, unsigned _dnsInterval) { - PROGLOG("CSmartSocketFactory::CSmartSocketFactory(%s)",_socklist?_socklist:"NULL"); + PROGLOG("CSmartSocketFactory::CSmartSocketFactory(%s, tlsConfig(%s))",_socklist?_socklist:"NULL", _tlsConfig?"yes":"no"); SmartSocketListParser slp(_socklist); if (slp.getSockets(sockArray) == 0) throw createSmartSocketException(0, "no endpoints defined"); + if (_tlsConfig != nullptr) + { + tlsService = true; + tlsConfig.setown(createSyncedPropertyTree(_tlsConfig)); + } + shuffleEndpoints(); nextEndpointIndex = 0; @@ -491,5 +497,5 @@ ISmartSocketFactory *createSmartSocketFactory(IPropertyTree &service, bool _retr ISmartSocketFactory *createSmartSocketFactory(const char *_socklist, bool _retry, unsigned _retryInterval, unsigned _dnsInterval) { - return new CSmartSocketFactory(_socklist, _retry, _retryInterval, _dnsInterval); + return new CSmartSocketFactory(_socklist, nullptr, _retry, _retryInterval, _dnsInterval); } diff --git a/system/jlib/jsmartsock.ipp b/system/jlib/jsmartsock.ipp index 0aa065f95f1..1b3875003c0 100644 --- a/system/jlib/jsmartsock.ipp +++ b/system/jlib/jsmartsock.ipp @@ -80,7 +80,7 @@ protected: public: IMPLEMENT_IINTERFACE_USING(Thread); - CSmartSocketFactory(const char *_socklist, bool _retry = false, unsigned _retryInterval = 60, unsigned _dnsInterval = (unsigned)-1); + CSmartSocketFactory(const char *_socklist, IPropertyTree *_tlsConfig, bool _retry = false, unsigned _retryInterval = 60, unsigned _dnsInterval = (unsigned)-1); CSmartSocketFactory(IPropertyTree &service, bool _retry = false, unsigned _retryInterval = 60, unsigned _dnsInterval = (unsigned)-1); ~CSmartSocketFactory(); int run(); diff --git a/system/security/securesocket/securesocket.cpp b/system/security/securesocket/securesocket.cpp index 2b276bef1cc..4c6e8b10a35 100644 --- a/system/security/securesocket/securesocket.cpp +++ b/system/security/securesocket/securesocket.cpp @@ -2188,7 +2188,7 @@ class CSecureSmartSocketFactory : public CSmartSocketFactory public: Owned secureContext; - CSecureSmartSocketFactory(const char *_socklist, bool _retry, unsigned _retryInterval, unsigned _dnsInterval) : CSmartSocketFactory(_socklist, _retry, _retryInterval, _dnsInterval) + CSecureSmartSocketFactory(const char *_socklist, IPropertyTree *_tlsConfig, bool _retry, unsigned _retryInterval, unsigned _dnsInterval) : CSmartSocketFactory(_socklist, _tlsConfig, _retry, _retryInterval, _dnsInterval) { secureContext.setown(createSecureSocketContext(ClientSocket)); } @@ -2221,9 +2221,9 @@ class CSecureSmartSocketFactory : public CSmartSocketFactory } }; -ISmartSocketFactory *createSecureSmartSocketFactory(const char *_socklist, bool _retry, unsigned _retryInterval, unsigned _dnsInterval) +ISmartSocketFactory *createSecureSmartSocketFactory(const char *_socklist, IPropertyTree* _tlsConfig, bool _retry, unsigned _retryInterval, unsigned _dnsInterval) { - return new CSecureSmartSocketFactory(_socklist, _retry, _retryInterval, _dnsInterval); + return new CSecureSmartSocketFactory(_socklist, _tlsConfig, _retry, _retryInterval, _dnsInterval); } ISmartSocketFactory *createSecureSmartSocketFactory(IPropertyTree &service, bool _retry, unsigned _retryInterval, unsigned _dnsInterval) diff --git a/system/security/securesocket/securesocket.hpp b/system/security/securesocket/securesocket.hpp index 4d75bb56e75..1dac6af9c27 100644 --- a/system/security/securesocket/securesocket.hpp +++ b/system/security/securesocket/securesocket.hpp @@ -105,7 +105,7 @@ SECURESOCKET_API int signCertificate(const char* csr, const char* ca_certificate }; -SECURESOCKET_API ISmartSocketFactory *createSecureSmartSocketFactory(const char *_socklist, bool _retry = false, unsigned _retryInterval = 60, unsigned _dnsInterval = (unsigned) -1); +SECURESOCKET_API ISmartSocketFactory *createSecureSmartSocketFactory(const char *_socklist, IPropertyTree* _tlsConfig, bool _retry = false, unsigned _retryInterval = 60, unsigned _dnsInterval = (unsigned) -1); SECURESOCKET_API ISmartSocketFactory *createSecureSmartSocketFactory(IPropertyTree &service, bool _retry = false, unsigned _retryInterval = 60, unsigned _dnsInterval = (unsigned) -1); SECURESOCKET_API IConversation *createSingletonSecureSocketConnection(unsigned short port,SocketEndpoint *_ep=nullptr); From 6af95780f1ad5a8a51bcdab2f69af32b30e6dbb2 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Tue, 11 Jun 2024 10:23:05 +0100 Subject: [PATCH 040/151] HPCC-32037 Remove logging lines before log output format is configured Signed-off-by: Gavin Halliday --- system/jlib/jlog.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/system/jlib/jlog.cpp b/system/jlib/jlog.cpp index 714c46251c4..d9063ec018a 100644 --- a/system/jlib/jlog.cpp +++ b/system/jlib/jlog.cpp @@ -2403,8 +2403,6 @@ void setupContainerizedLogMsgHandler() if (logConfig->hasProp(logFormatAtt)) { const char *logFormat = logConfig->queryProp(logFormatAtt); - LOG(MCdebugInfo, "JLog: log format configuration detected '%s'!", logFormat); - bool newFormatDetected = false; if (streq(logFormat, "xml")) { @@ -2416,9 +2414,7 @@ void setupContainerizedLogMsgHandler() theStderrHandler = new HandleLogMsgHandlerJSON(stderr, MSGFIELD_STANDARD); newFormatDetected = true; } - else if (streq(logFormat, "table")) - LOG(MCdebugInfo, "JLog: default log format detected: '%s'!", logFormat); - else + else if (!streq(logFormat, "table")) LOG(MCoperatorWarning, "JLog: Invalid log format configuration detected '%s'!", logFormat); if (newFormatDetected) From 4bfc06f9da121b0bc93a6c85fb9fd5380b1d2cab Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Tue, 11 Jun 2024 12:26:24 +0100 Subject: [PATCH 041/151] HPCC-31993 Fix jptree hasProp leak Signed-off-by: Jake Smith --- system/jlib/jptree.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/system/jlib/jptree.cpp b/system/jlib/jptree.cpp index e9f19075d0a..c15302f35ea 100644 --- a/system/jlib/jptree.cpp +++ b/system/jlib/jptree.cpp @@ -1368,9 +1368,8 @@ bool PTree::hasProp(const char * xpath) const } else { - IPropertyTreeIterator *iter = getElements(xpath); + Owned iter = getElements(xpath); bool res = iter->first(); - iter->Release(); return res; } } From 4296177e39425f593a963361bc04f3f915f18388 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Tue, 11 Jun 2024 12:41:10 +0100 Subject: [PATCH 042/151] HPCC-32040 Fix various IPT leaks Signed-off-by: Jake Smith --- dali/base/dacsds.cpp | 3 +-- dali/base/dafdesc.cpp | 2 +- dali/base/dasds.cpp | 4 +--- dali/base/dasds.ipp | 3 +-- dali/ft/filecopy.cpp | 3 +-- ecl/eclcc/eclcc.cpp | 2 +- esp/bindings/http/platform/msgbuilder.cpp | 11 ++++------- esp/platform/espcfg.cpp | 21 +++++---------------- tools/esdlcmd/esdl2ecl.cpp | 2 +- 9 files changed, 16 insertions(+), 35 deletions(-) diff --git a/dali/base/dacsds.cpp b/dali/base/dacsds.cpp index 80fe9701144..3a23fdf5e4e 100644 --- a/dali/base/dacsds.cpp +++ b/dali/base/dacsds.cpp @@ -1108,13 +1108,12 @@ void CClientRemoteTree::resetState(unsigned _state, bool sub) serverId = 0; if (sub) { - IPropertyTreeIterator *iter = getElements("*"); + Owned iter = getElements("*"); ForEach(*iter) { CClientRemoteTree &child = (CClientRemoteTree &)iter->query(); child.resetState(state, sub); } - iter->Release(); } } diff --git a/dali/base/dafdesc.cpp b/dali/base/dafdesc.cpp index 5d672a16d78..825833e086a 100644 --- a/dali/base/dafdesc.cpp +++ b/dali/base/dafdesc.cpp @@ -3845,7 +3845,7 @@ class CStorageApiInfo : public CInterfaceOf throw makeStringExceptionV(-1, "No container provided: path %s", path.str()); return container; } - Linked xml; + Owned xml; }; class CStoragePlaneInfo : public CInterfaceOf diff --git a/dali/base/dasds.cpp b/dali/base/dasds.cpp index 489612a06a8..3e35e9a08b8 100644 --- a/dali/base/dasds.cpp +++ b/dali/base/dasds.cpp @@ -2699,7 +2699,7 @@ class CServerRemoteTree : public CRemoteTreeBase if (cutoff < 0 || depth iter = getElements("*"); iter->first(); while (iter->isValid()) { @@ -2708,8 +2708,6 @@ class CServerRemoteTree : public CRemoteTreeBase child->serializeCutOffRT(tgt, cutoff, depth+1, extValues); iter->next(); } - iter->Release(); - } tgt.append(""); // element terminator. i.e. blank child name. } diff --git a/dali/base/dasds.ipp b/dali/base/dasds.ipp index 9ea156b471c..b58e2fff0f9 100644 --- a/dali/base/dasds.ipp +++ b/dali/base/dasds.ipp @@ -166,7 +166,7 @@ public: bool res = true; if (op.applyTop(node)) { - IPropertyTreeIterator *iter = node.getElements("*"); + Owned iter = node.getElements("*"); if (iter->first()) { bool levelBreak = false; @@ -180,7 +180,6 @@ public: iter->next(); } } - iter->Release(); } return res; } diff --git a/dali/ft/filecopy.cpp b/dali/ft/filecopy.cpp index ec6f2923559..35df79c7d0b 100644 --- a/dali/ft/filecopy.cpp +++ b/dali/ft/filecopy.cpp @@ -2790,14 +2790,13 @@ bool FileSprayer::restorePartition() { if (allowRecovery && progressTree->getPropBool(ANhasPartition)) { - IPropertyTreeIterator * iter = progressTree->getElements(PNpartition); + Owned iter = progressTree->getElements(PNpartition); ForEach(*iter) { PartitionPoint & next = * new PartitionPoint; next.restore(&iter->query()); partition.append(next); } - iter->Release(); return (partition.ordinality() != 0); } return false; diff --git a/ecl/eclcc/eclcc.cpp b/ecl/eclcc/eclcc.cpp index 3ce11eb8e8d..ce0977aa0bb 100644 --- a/ecl/eclcc/eclcc.cpp +++ b/ecl/eclcc/eclcc.cpp @@ -3286,7 +3286,7 @@ int EclCC::parseCommandLineOptions(int argc, const char* argv[]) void EclCC::setSecurityOptions() { - IPropertyTree *eclSecurity = configuration->getPropTree("eclSecurity"); + Owned eclSecurity = configuration->getPropTree("eclSecurity"); if (eclSecurity) { // Name of security option in configuration yaml diff --git a/esp/bindings/http/platform/msgbuilder.cpp b/esp/bindings/http/platform/msgbuilder.cpp index b1cc23cbb35..a15d1ba132a 100644 --- a/esp/bindings/http/platform/msgbuilder.cpp +++ b/esp/bindings/http/platform/msgbuilder.cpp @@ -66,7 +66,7 @@ void CSoapMsgBuilder::setPropertyValueBool(const char * key, bool val) StringBuffer & CSoapMsgBuilder::getSoapResponse(StringBuffer & soapResponse) { - IPropertyTreeIterator * itr = m_properties->getElements("*"); + Owned itr = m_properties->getElements("*"); itr->first(); while(itr->isValid()) { @@ -77,7 +77,6 @@ StringBuffer & CSoapMsgBuilder::getSoapResponse(StringBuffer & soapResponse) soapResponse.appendf("<%s>%s", key.str(), val.str(), key.str()); itr->next(); } - itr->Release(); return soapResponse; } @@ -140,7 +139,7 @@ StringBuffer & CSoapMsgXsdBuilder::getXsd(StringBuffer & wsdlSchema) wsdlSchema.appendf("<%s:complexType name=\"%s\" >", m_var.str(), m_structLabel.str()); wsdlSchema.appendf("<%s:sequence>", m_var.str()); - IPropertyTreeIterator * itr = m_properties->getElements("*"); + Owned itr = m_properties->getElements("*"); itr->first(); while(itr->isValid()) { @@ -150,7 +149,6 @@ StringBuffer & CSoapMsgXsdBuilder::getXsd(StringBuffer & wsdlSchema) wsdlSchema.appendf("<%s:element minOccurs=\"0\" maxOccurs=\"1\" name=\"%s\" type=\"xsd:%s\"/>", m_var.str(), name.str(), getXsdTypeLabel(static_cast(m_properties->getPropInt(name.str())))); itr->next(); } - itr->Release(); wsdlSchema.appendf("", m_var.str()); wsdlSchema.appendf("", m_var.str()); @@ -162,7 +160,7 @@ StringBuffer & CSoapMsgXsdBuilder::getXsd(StringBuffer & wsdlSchema) wsdlSchema.appendf("<%s:element name=\"%s\" nillable=\"true\" type=\"tns:%s\" />", m_var.str(), m_structLabel.str(), m_structLabel.str()); wsdlSchema.appendf("<%s:element name=\"ArrayOf%s\" nillable=\"true\" type=\"tns:ArrayOf%s\" />", m_var.str(), m_structLabel.str(), m_structLabel.str()); - return wsdlSchema; + return wsdlSchema; } const char * const XSD_STRING_DESC = "string"; @@ -188,7 +186,7 @@ CSoapMsgBuilder * CSoapMsgXsdBuilder::newMsgBuilder() { OwnednewXml = createPTree(m_structLabel.str()); - IPropertyTreeIterator * itr = m_properties->getElements("*"); + Owned itr = m_properties->getElements("*"); itr->first(); while(itr->isValid()) { @@ -208,7 +206,6 @@ CSoapMsgBuilder * CSoapMsgXsdBuilder::newMsgBuilder() } itr->next(); } - itr->Release(); StringBuffer xml; return new CSoapMsgBuilder(toXML(newXml, xml).str()); diff --git a/esp/platform/espcfg.cpp b/esp/platform/espcfg.cpp index 21a0fb87422..39b98561349 100644 --- a/esp/platform/espcfg.cpp +++ b/esp/platform/espcfg.cpp @@ -411,7 +411,7 @@ CEspConfig::CEspConfig(IProperties* inputs, IPropertyTree* envpt, IPropertyTree* #endif #endif - IPropertyTreeIterator *pt_iter = NULL; + Owned pt_iter; StringBuffer xpath; if (m_inputs->hasProp("SingleUserPass")) @@ -421,7 +421,7 @@ CEspConfig::CEspConfig(IProperties* inputs, IPropertyTree* envpt, IPropertyTree* m_inputs->getProp("SingleUserPass", plainesppass); encrypt(encesppass, plainesppass.str()); xpath.setf("SecurityManagers/SecurityManager[@type=\"SingleUserSecurityManager\"]/SingleUserSecurityManager/"); - pt_iter = m_cfg->getElements(xpath.str()); + pt_iter.setown(m_cfg->getElements(xpath.str())); if (pt_iter!=NULL) { IPropertyTree *ptree = NULL; @@ -442,8 +442,6 @@ CEspConfig::CEspConfig(IProperties* inputs, IPropertyTree* envpt, IPropertyTree* } pt_iter->next(); } - pt_iter->Release(); - pt_iter=NULL; } } @@ -470,7 +468,7 @@ CEspConfig::CEspConfig(IProperties* inputs, IPropertyTree* envpt, IPropertyTree* xpath.clear(); xpath.append("EspService"); - pt_iter = m_cfg->getElements(xpath.str()); + pt_iter.setown(m_cfg->getElements(xpath.str())); if (pt_iter!=NULL) { @@ -495,15 +493,12 @@ CEspConfig::CEspConfig(IProperties* inputs, IPropertyTree* envpt, IPropertyTree* } pt_iter->next(); } - - pt_iter->Release(); - pt_iter=NULL; } xpath.clear(); xpath.append("EspProtocol"); - pt_iter = m_cfg->getElements(xpath.str()); + pt_iter.setown(m_cfg->getElements(xpath.str())); if (pt_iter!=NULL) { @@ -530,15 +525,12 @@ CEspConfig::CEspConfig(IProperties* inputs, IPropertyTree* envpt, IPropertyTree* pt_iter->next(); } - - pt_iter->Release(); - pt_iter=NULL; } xpath.clear(); xpath.append("EspBinding"); - pt_iter = m_cfg->getElements(xpath.str()); + pt_iter.setown(m_cfg->getElements(xpath.str())); if (pt_iter!=NULL) { @@ -588,9 +580,6 @@ CEspConfig::CEspConfig(IProperties* inputs, IPropertyTree* envpt, IPropertyTree* pt_iter->next(); } - - pt_iter->Release(); - pt_iter=NULL; } } } diff --git a/tools/esdlcmd/esdl2ecl.cpp b/tools/esdlcmd/esdl2ecl.cpp index fa5ff0b7e80..0f6a83e0780 100644 --- a/tools/esdlcmd/esdl2ecl.cpp +++ b/tools/esdlcmd/esdl2ecl.cpp @@ -360,7 +360,7 @@ class Esdl2EclCmd : public EsdlConvertCmd int count = trees.all->getCount("esxdl"); if (trees.all->getCount("esxdl") > 0) { - IPropertyTree *file = trees.all->getPropTree("esxdl[1]"); + Owned file = trees.all->getPropTree("esxdl[1]"); if (file) { StringBuffer xmlfile; From ebe377e75d1cef11c88dee91bb37dffc52241dad Mon Sep 17 00:00:00 2001 From: Michael Gardner Date: Wed, 29 May 2024 11:35:46 -0400 Subject: [PATCH 043/151] HPCC-31901 Add Jfrog internal package releases to build-assets Signed-off-by: Michael Gardner --- .github/workflows/build-assets.yml | 43 ++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/.github/workflows/build-assets.yml b/.github/workflows/build-assets.yml index 195034d0efc..4fd56f61eb6 100644 --- a/.github/workflows/build-assets.yml +++ b/.github/workflows/build-assets.yml @@ -321,6 +321,29 @@ jobs: tag: ${{ needs.preamble.outputs.internal_tag }} artifacts: "${{ needs.preamble.outputs.folder_build }}/hpccsystems-*-internal*.deb,${{ needs.preamble.outputs.folder_build }}/hpccsystems-*-internal*.rpm" + - name: Upload Assets to Jfrog (debian internal) + if: ${{ matrix.ln && !matrix.container && contains(matrix.os, 'ubuntu') && github.repository_owner == 'hpcc-systems'}} + shell: bash + run: | + cd ${{ needs.preamble.outputs.folder_build }} + version=$(echo "${{ needs.preamble.outputs.internal_tag }}" | sed 's/internal_//') + packages=($(ls -1 hpccsystems-*.deb )) + for _package in ${packages[@]}; do + distribution=$( echo "${_package}" | sed "s/^.*${version}//" | awk -F '_' '{print $1;}' ) + curl -u${{ secrets.JFROG_USERNAME }}:${{ secrets.JFROG_PASSWORD }} -XPUT "https://${{ secrets.JFROG_REGISTRY }}/hpccpl-debian-local/pool/LN/${_package};deb.distribution=${distribution};deb.component=LN;deb.architecture=amd64" -T ${{ needs.preamble.outputs.folder_build }}/${_package} + done + + - name: Upload Assets to Jfrog (centos internal) + if: ${{ matrix.ln && !matrix.container && !contains(matrix.os, 'ubuntu') && github.repository_owner == 'hpcc-systems'}} + shell: bash + run: | + cd ${{ needs.preamble.outputs.folder_build }} + packages=($(ls -1 hpccsystems-*.rpm )) + for _package in ${packages[@]}; do + distribution=$( echo "${_package}" | awk -F '.' '{print $4;}' ) + curl -u${{ secrets.JFROG_USERNAME }}:${{ secrets.JFROG_PASSWORD }} -XPUT "https://${{ secrets.JFROG_REGISTRY }}/hpccpl-rpm-local/LN/${distribution}/x86_64/${_package}" -T ${{ needs.preamble.outputs.folder_build }}/${_package} + done + - name: Locate k8s deb file (internal) if: ${{ matrix.ln && matrix.container && !matrix.documentation }} id: ln-container @@ -470,6 +493,26 @@ jobs: tag: ${{ needs.preamble.outputs.internal_tag }} artifacts: "./build/hpccsystems-clienttools-internal*.exe,./build/hpccsystems-clienttools-internal*.msi,./build/hpccsystems-clienttools-internal*.dmg,./build/hpccsystems-clienttools-internal*.pkg,./build/hpccsystems-clienttools-internal*.tar.gz" + - name: Upload Assets to Jfrog (windows) + if: ${{ contains(matrix.os, 'windows') && github.repository_owner == 'hpcc-systems' }} + shell: bash + run: | + cd ./build + packages=($(ls -1 hpccsystems-*.exe )) + for _package in ${packages[@]}; do + curl -u${{ secrets.JFROG_USERNAME }}:${{ secrets.JFROG_PASSWORD }} "https://${{ secrets.JFROG_REGISTRY }}/hpccpl-windows-local/LN/windows/x86_64/${_package}" -T ${_package} + done + + - name: Upload Assets to Jfrog (macos) + if: ${{ contains(matrix.os, 'macos') && github.repository_owner == 'hpcc-systems' }} + shell: bash + run: | + cd ./build + packages=($(ls -1 hpccsystems-*.pkg )) + for _package in ${packages[@]}; do + curl -u${{ secrets.JFROG_USERNAME }}:${{ secrets.JFROG_PASSWORD }} "https://${{ secrets.JFROG_REGISTRY }}/hpccpl-macos-local/LN/macos/x86_64/${_package}" -T ${_package} + done + - name: Upload error logs if: ${{ failure() || cancelled() }} uses: actions/upload-artifact@v4 From 476c17cc599f69c3911c140c669951d4978faf56 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Tue, 11 Jun 2024 12:21:46 +0100 Subject: [PATCH 044/151] HPCC-32039 Protect against potential leaks in the LDAP code Signed-off-by: Gavin Halliday --- system/security/LdapSecurity/ldapsecurity.cpp | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/system/security/LdapSecurity/ldapsecurity.cpp b/system/security/LdapSecurity/ldapsecurity.cpp index 7bd62e4e3d0..d9d1d9d750d 100644 --- a/system/security/LdapSecurity/ldapsecurity.cpp +++ b/system/security/LdapSecurity/ldapsecurity.cpp @@ -1272,8 +1272,7 @@ IAuthMap * CLdapSecManager::createAuthMap(IPropertyTree * authconfig, IEspSecure { CAuthMap* authmap = new CAuthMap(); - IPropertyTreeIterator *loc_iter = NULL; - loc_iter = authconfig->getElements(".//Location"); + Owned loc_iter(authconfig->getElements(".//Location")); if (loc_iter != NULL) { IPropertyTree *location = NULL; @@ -1308,8 +1307,6 @@ IAuthMap * CLdapSecManager::createAuthMap(IPropertyTree * authconfig, IEspSecure } loc_iter->next(); } - loc_iter->Release(); - loc_iter = NULL; } authmap->shareWithManager(*this, secureContext); @@ -1322,8 +1319,7 @@ IAuthMap * CLdapSecManager::createFeatureMap(IPropertyTree * authconfig, IEspSec { CAuthMap* feature_authmap = new CAuthMap(); - IPropertyTreeIterator *feature_iter = NULL; - feature_iter = authconfig->getElements(".//Feature"); + Owned feature_iter(authconfig->getElements(".//Feature")); if (feature_iter != NULL) { IPropertyTree *feature = NULL; @@ -1355,8 +1351,6 @@ IAuthMap * CLdapSecManager::createFeatureMap(IPropertyTree * authconfig, IEspSec } feature_iter->next(); } - feature_iter->Release(); - feature_iter = NULL; } feature_authmap->shareWithManager(*this, secureContext); @@ -1652,9 +1646,7 @@ LDAPSECURITY_API IAuthMap *newDefaultAuthMap(IPropertyTree* config) { CAuthMap* authmap = new CAuthMap(); - IPropertyTreeIterator *loc_iter = NULL; - loc_iter = config->getElements(".//Location"); - + Owned loc_iter(config->getElements(".//Location")); if (loc_iter != NULL) { IPropertyTree *location = NULL; @@ -1676,8 +1668,6 @@ LDAPSECURITY_API IAuthMap *newDefaultAuthMap(IPropertyTree* config) } loc_iter->next(); } - loc_iter->Release(); - loc_iter = NULL; } return authmap; From 7717063c09489a835106a1b21f19b433b803f739 Mon Sep 17 00:00:00 2001 From: El Arbi Belfarsi Date: Thu, 6 Jun 2024 11:00:37 -0400 Subject: [PATCH 045/151] HPCC-32007 Adding MD5 Sum for GitHub Actions Packages Signed-off-by: El Arbi Belfarsi --- .github/workflows/build-assets.yml | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-assets.yml b/.github/workflows/build-assets.yml index 4fd56f61eb6..1d2dc3efac3 100644 --- a/.github/workflows/build-assets.yml +++ b/.github/workflows/build-assets.yml @@ -199,6 +199,11 @@ jobs: # docker run --rm --mount ${{ needs.preamble.outputs.mount_platform }} --mount ${{ needs.preamble.outputs.mount_build }} ${{ steps.vars.outputs.docker_tag_candidate_base }} "cmake -S /hpcc-dev/HPCC-Platform -B /hpcc-dev/build ${{ needs.preamble.outputs.cmake_docker_config }} -D$plugin=ON -DCONTAINERIZED=OFF -DCPACK_STRIP_FILES=ON" # docker run --rm --mount ${{ needs.preamble.outputs.mount_platform }} --mount ${{ needs.preamble.outputs.mount_build }} ${{ steps.vars.outputs.docker_tag_candidate_base }} "cmake --build /hpcc-dev/build --parallel $(nproc) --target package" done + cd ${{ needs.preamble.outputs.folder_build }} + packages=($(ls -1 hpccsystems-*)) + for i in "${packages[@]}"; do + md5sum "$i" > "$i.md5sum" + done - name: CMake Containerized Packages (community) if: ${{ !matrix.ln && matrix.container && !matrix.documentation }} @@ -233,7 +238,16 @@ jobs: allowUpdates: true generateReleaseNotes: false prerelease: ${{ contains(github.ref, '-rc') }} - artifacts: "${{ needs.preamble.outputs.folder_build }}/*.deb,${{ needs.preamble.outputs.folder_build }}/*.rpm,${{ needs.preamble.outputs.folder_build }}/Release/docs/*.zip,${{ needs.preamble.outputs.folder_build }}/Release/docs/EN_US/*.zip,${{ needs.preamble.outputs.folder_build }}/Release/docs/PT_BR/*.zip,${{ needs.preamble.outputs.folder_build }}/docs/EN_US/EclipseHelp/*.zip,${{ needs.preamble.outputs.folder_build }}/docs/EN_US/HTMLHelp/*.zip,${{ needs.preamble.outputs.folder_build }}/docs/PT_BR/HTMLHelp/*.zip" + artifacts: | + ${{ needs.preamble.outputs.folder_build }}/*.deb, + ${{ needs.preamble.outputs.folder_build }}/*.rpm, + ${{ needs.preamble.outputs.folder_build }}/Release/docs/*.zip, + ${{ needs.preamble.outputs.folder_build }}/Release/docs/EN_US/*.zip, + ${{ needs.preamble.outputs.folder_build }}/Release/docs/PT_BR/*.zip, + ${{ needs.preamble.outputs.folder_build }}/docs/EN_US/EclipseHelp/*.zip, + ${{ needs.preamble.outputs.folder_build }}/docs/EN_US/HTMLHelp/*.zip, + ${{ needs.preamble.outputs.folder_build }}/docs/PT_BR/HTMLHelp/*.zip, + ${{ needs.preamble.outputs.folder_build }}/*.md5sum - name: Locate k8s deb file (community) if: ${{ !matrix.ln && matrix.container && !matrix.documentation }} From 8ccdd7ff8de00518c33e82a69fb292f5d42d1908 Mon Sep 17 00:00:00 2001 From: Shamser Ahmed Date: Mon, 3 Jun 2024 11:13:50 +0100 Subject: [PATCH 046/151] HPCC-31895 Fix deserialization and merging of nested stats Signed-off-by: Shamser Ahmed --- system/jlib/jstats.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/jlib/jstats.cpp b/system/jlib/jstats.cpp index 12f156a5fc1..023a5b8ffda 100644 --- a/system/jlib/jstats.cpp +++ b/system/jlib/jstats.cpp @@ -2709,7 +2709,7 @@ void CRuntimeStatisticCollection::deserialize(MemoryBuffer& in) in.read(hasNested); if (hasNested) { - ensureNested().deserializeMerge(in); + ensureNested().deserialize(in); } } @@ -2730,7 +2730,7 @@ void CRuntimeStatisticCollection::deserializeMerge(MemoryBuffer& in) in.read(hasNested); if (hasNested) { - ensureNested().deserialize(in); + ensureNested().deserializeMerge(in); } } From 917dcf28ba58b3fa106cd6037654d16a456862cc Mon Sep 17 00:00:00 2001 From: g-pan Date: Tue, 11 Jun 2024 20:33:25 -0400 Subject: [PATCH 047/151] HPCC-32045 Fix URLs in source XML documents Signed-off-by: g-pan --- .../ContainerizedMods/ConfigureValues.xml | 2 +- .../ContainerizedMods/ContainerLogging.xml | 25 ++++++++++--------- docs/EN_US/JDBC-driver/hpcc-jdbc-driver.xml | 2 +- .../ContainerizedMods/ConfigureValues.xml | 2 +- .../ContainerizedMods/ContainerLogging.xml | 4 +-- docs/PT_BR/JDBC-driver/hpcc-jdbc-driver.xml | 2 +- 6 files changed, 19 insertions(+), 18 deletions(-) diff --git a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml index 80e62634f7f..f3b52e78f51 100644 --- a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml +++ b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml @@ -1527,7 +1527,7 @@ thor: and Placements please review our developer documentation: https://github.com/hpcc-systems/HPCC-Platform/blob/master/helm/hpcc/docs/placements.md + url="https://github.com/hpcc-systems/HPCC-Platform/blob/master/helm/hpcc/docs/placements.md">https://github.com/hpcc-systems/HPCC-Platform/blob/master/helm/hpcc/docs/placements.md Taints and Tolerations Examples diff --git a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ContainerLogging.xml b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ContainerLogging.xml index f623a8668fe..3bcbccdd27c 100644 --- a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ContainerLogging.xml +++ b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ContainerLogging.xml @@ -112,20 +112,20 @@ - + - + + fileref="../../images/caution.png"/> NOTE: The elastic4hpcclogs chart does not enable any security. The responsibility of determining the need for security and enabling security on any deployed Elastic Stack instance or - components is up to you and your organization. + components is up to you and your organization. @@ -152,7 +152,7 @@ Systems charts and repositories. The elastic4hpcclogs chart is among them. - + @@ -186,14 +186,14 @@ PLEASE NOTE: Elastic Search declares PVC(s) which might require explicit manual - + - + + fileref="../../images/caution.png"/> IMPORTANT: PLEASE NOTE: Elastic Search declares PVC(s) which might require @@ -230,7 +230,7 @@ PLEASE NOTE: Elastic Search declares PVC(s) which might require explicit manual myelk-filebeat-6wd2g 1/1 Running 0 myelk-kibana-68688b4d4d-d489b 1/1 Running 0 - + Once all the pods are indicating a 'ready' state and 'Running', including the three components for filebeats, Elastic Search, and @@ -282,12 +282,13 @@ myelk-kibana LoadBalancer 10.110.129.199 localhost 5601:31465/TCP 68m interface. For more information about using the Elastic-Kibana interface please refer to the corresponding documentation: - https://www.elastic.co/ + https://www.elastic.co/ and https://www.elastic.co/elastic-stack/ + url="https://www.elastic.co/elastic-stack/">https://www.elastic.co/elastic-stack/ @@ -584,7 +585,7 @@ ContainerLog Sample output - + More complex queries can be formulated to fetch specific information provided in any of the log columns including unformatted diff --git a/docs/EN_US/JDBC-driver/hpcc-jdbc-driver.xml b/docs/EN_US/JDBC-driver/hpcc-jdbc-driver.xml index 20fcbcef203..5632665623f 100644 --- a/docs/EN_US/JDBC-driver/hpcc-jdbc-driver.xml +++ b/docs/EN_US/JDBC-driver/hpcc-jdbc-driver.xml @@ -156,7 +156,7 @@ Copyright is used for the copyright line on title page. The ID attribute is Copy WsSQL must be installed on the target HPCC Platform to utilize a JDBC connection. See http://hpccsystems.com/permlink/wssql for + url="http://hpccsystems.com/permlink/wssql">http://hpccsystems.com/permlink/wssql for details. diff --git a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml index a73aa50e7b5..bd6474cd740 100644 --- a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml +++ b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml @@ -996,7 +996,7 @@ desenvolvedor: https://github.com/hpcc-systems/HPCC-Platform/blob/master/helm/hpcc/docs/placements.md + url="https://github.com/hpcc-systems/HPCC-Platform/blob/master/helm/hpcc/docs/placements.md">https://github.com/hpcc-systems/HPCC-Platform/blob/master/helm/hpcc/docs/placements.md Placement diff --git a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ContainerLogging.xml b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ContainerLogging.xml index ca7c57a612f..1491815ceda 100644 --- a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ContainerLogging.xml +++ b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ContainerLogging.xml @@ -273,12 +273,12 @@ myelk-kibana LoadBalancer 10.110.129.199 localhost 5601:31465/TCP 68m Kibana deve ser consultada para obter mais informações sobre como usar a interface do Kibana. Por favor, veja: - https://www.elastic.co/ + https://www.elastic.co/ e https://www.elastic.co/elastic-stack/ + url="https://www.elastic.co/elastic-stack/">https://www.elastic.co/elastic-stack/ Incluídos na documentação completa também estão vídeos de início rápido e outros recursos úteis. diff --git a/docs/PT_BR/JDBC-driver/hpcc-jdbc-driver.xml b/docs/PT_BR/JDBC-driver/hpcc-jdbc-driver.xml index 5fd9aef7fad..3bc51c2c874 100644 --- a/docs/PT_BR/JDBC-driver/hpcc-jdbc-driver.xml +++ b/docs/PT_BR/JDBC-driver/hpcc-jdbc-driver.xml @@ -160,7 +160,7 @@ Copyright is used for the copyright line on title page. The ID attribute is Copy O WsSQL deve ser instalado na plataforma HPCC de destino para utilizar uma conexão JDBC. Veja http://hpccsystems.com/permlink/wssql para + url="http://hpccsystems.com/permlink/wssql">http://hpccsystems.com/permlink/wssql para detalhes. From 1bb952e2644b4543111271770a372efdf0db940b Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Tue, 28 May 2024 16:01:09 +0100 Subject: [PATCH 048/151] HPCC-31951 Add extra logging and protection to CDeltaWriter Add guards and extra logging to the CDeltaWriter in the event it runs into trouble. Signed-off-by: Jake Smith --- dali/base/dasds.cpp | 101 ++++++++++++++++++++++++++------------------ 1 file changed, 61 insertions(+), 40 deletions(-) diff --git a/dali/base/dasds.cpp b/dali/base/dasds.cpp index 3e35e9a08b8..9478cbe2665 100644 --- a/dali/base/dasds.cpp +++ b/dali/base/dasds.cpp @@ -1351,7 +1351,11 @@ class CDeltaWriter : implements IThreaded if (!writeRequested) { if (pendingSz) // the writer thread is idle, but there are some transactions that the writer hasn't seen yet + { + // NB: will never happen in default configuration (i.e. unless @deltaSaveThresholdSecs is enabled) + // because addToQueue (in default config) will always call requestAsyncWrite() as soon as anything is queued requestAsyncWrite(); + } } // NB: this is not an else, because if called, requestAsyncWrite() above will set writeRequested=true if (writeRequested) @@ -1364,7 +1368,11 @@ class CDeltaWriter : implements IThreaded { // this should not be here long, but log just in case while (!allWrittenSem.wait(10000)) + { + if (aborted) // can ony happen if CDeltaWriter thread is stopping + return; WARNLOG("Waiting on CDeltaWriter to flush transactions"); + } } } void stop() @@ -1380,53 +1388,63 @@ class CDeltaWriter : implements IThreaded // IThreaded virtual void threadmain() override { - while (!aborted) + PROGLOG("CDeltaWriter thread started"); + try { - bool semTimedout = false; - if (saveThresholdSecs) - semTimedout = !pendingTransactionsSem.wait(saveThresholdSecs * 1000); - else - pendingTransactionsSem.wait(); - - if (aborted) - break; - // keep going whilst there's things pending - while (true) + while (!aborted) { - CLeavableCriticalBlock b(pendingCrit); - std::queue> todo = std::move(pending); - if (0 == todo.size()) + bool semTimedout = false; + if (saveThresholdSecs) + semTimedout = !pendingTransactionsSem.wait(saveThresholdSecs * 1000); + else + pendingTransactionsSem.wait(); + + if (aborted) + break; + // keep going whilst there's things pending + while (true) { - if (writeRequested) + CLeavableCriticalBlock b(pendingCrit); + std::queue> todo = std::move(pending); + if (0 == todo.size()) { - // NB: if here, implies someone signalled via requestAsyncWrite() + if (writeRequested) + { + // NB: if here, implies someone signalled via requestAsyncWrite() - // if reason we're here is because sem timedout, consume the signal that was sent - if (semTimedout) - pendingTransactionsSem.wait(); + // if reason we're here is because sem timedout, consume the signal that was sent + if (semTimedout) + pendingTransactionsSem.wait(); - writeRequested = false; - } - if (signalWhenAllWritten) - { - signalWhenAllWritten = false; - allWrittenSem.signal(); + writeRequested = false; + if (signalWhenAllWritten) // can only be true if writeRequested was true + { + signalWhenAllWritten = false; + allWrittenSem.signal(); + } + } + break; } - break; + pendingSz = 0; + // Hold blockedSaveCrit before releasing pendingCrit, because need to ensure this saves ahead + // of other transactions building up in addToQueue + CHECKEDCRITICALBLOCK(blockedSaveCrit, fakeCritTimeout); // because if Dali is saving state (::blockingSave), it will clear pending + b.leave(); + + // Because blockedSaveCrit is held, it will also block 'synchronous save' (see addToQueue) + // i.e. if stuck here, the transactions will start building up, and trigger a 'Forced synchronous save', + // which will in turn block. This must complete! + while (!save(todo)) // if temporarily blocked, wait a bit (blocking window is short) + MilliSleep(1000); } - pendingSz = 0; - // Hold blockedSaveCrit before releasing pendingCrit, because need to ensure this saves ahead - // of other transactions building up in addToQueue - CHECKEDCRITICALBLOCK(blockedSaveCrit, fakeCritTimeout); // because if Dali is saving state (::blockingSave), it will clear pending - b.leave(); - - // Because blockedSaveCrit is held, it will also block 'synchronous save' (see addToQueue) - // i.e. if stuck here, the transactions will start building up, and trigger a 'Forced synchronous save', - // which will in turn block. This must complete! - while (!save(todo)) // if temporarily blocked, wait a bit (blocking window is short) - MilliSleep(1000); } } + catch (IException *e) + { + DISLOG(e, "CDeltaWriter: thread exited. Remedial action must be taken. Save, shutdown or restart ASAP."); + e->Release(); + } + aborted = true; } }; @@ -2198,6 +2216,9 @@ void CDeltaWriter::addToQueue(CTransactionItem *item) } else // here if exceeded transationQueueLimit, transactionMaxMem or exceeded time threshold (deltaSaveThresholdSecs) { + if (aborted) // critical situation if delta writer is no longer running (there will have been previous errors) + DISLOG("CDeltaWriter thread was aborted! Dali is compromised. Save, shutdown or restart ASAP."); + ++totalQueueLimitHits; // force a synchronous save CCycleTimer timer; @@ -8844,7 +8865,7 @@ bool CDeltaWriter::save(std::queue> &todo) } catch (IException *e) { - OERRLOG(e, "save: failed to touch delta in progress file"); + DISLOG(e, "save: failed to touch delta in progress file"); e->Release(); } // here if exception only @@ -8907,14 +8928,14 @@ bool CDeltaWriter::save(std::queue> &todo) } catch (IException *e) { - OERRLOG("save: failure whilst committing deltas to disk! Remedial action must be taken"); + DISLOG(e, "save: failure whilst committing deltas to disk! Remedial action must be taken"); e->Release(); // this is really an attempt at disaster recovery at this point forceBlockingSave = true; } if (forceBlockingSave) { - OWARNLOG("Due to earlier failures, attempting forced/blocking save of Dali store"); + DISLOG("Due to earlier failures, attempting forced/blocking save of Dali store"); while (todo.size()) todo.pop(); SDSManager->saveStore(nullptr, false, false); From b2a1475144e52bbbe67855a764dd63b8e3213e6d Mon Sep 17 00:00:00 2001 From: g-pan Date: Tue, 28 May 2024 17:21:37 -0400 Subject: [PATCH 049/151] HPCC-22453 Document Import ZAP Feature Signed-off-by: g-pan --- docs/EN_US/HPCCSystemAdmin/SA-Mods/WUTool.xml | 47 +++++++++++++------ 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/docs/EN_US/HPCCSystemAdmin/SA-Mods/WUTool.xml b/docs/EN_US/HPCCSystemAdmin/SA-Mods/WUTool.xml index b75c459406d..7b0a33dbbc0 100644 --- a/docs/EN_US/HPCCSystemAdmin/SA-Mods/WUTool.xml +++ b/docs/EN_US/HPCCSystemAdmin/SA-Mods/WUTool.xml @@ -17,15 +17,15 @@ - + - + Actions - + @@ -82,6 +82,25 @@ Restore from xml files. [INCLUDEFILES=1] + + importzap + + Imports ZAP report to be able to recreate a workunit and + replicate the reported issue. Importzap requires the following + parameters. + <zapreport-filename><output-helper-directory> + temporary directory to unpack the zap report + into<zapreport-password> [optional] + + + + + postmortem <workunit> + + <workunit> PMD=<dir> - Add post-mortem + info + + orphans @@ -171,16 +190,16 @@ - + - + info parameters - + @@ -193,7 +212,7 @@ - + Which scopes are matched: @@ -220,7 +239,7 @@ - + NOTE: scope, stype and id cannot be specified in the same filter @@ -249,7 +268,7 @@ - + Which scopes are include in the results: @@ -276,7 +295,7 @@ - + Which information about a scope is reported: @@ -286,25 +305,25 @@ properties[statistics|hints| attributes|scope|all]* - + statistic[<statistic-kind>|none|all]* - + attribute[<attribute-name>|none|all]* - + hint[<hint-name>]* - + From 8e5945a7db1b3f215254185e6190481d8d7b028b Mon Sep 17 00:00:00 2001 From: "Dan S. Camper" Date: Wed, 12 Jun 2024 13:44:52 -0500 Subject: [PATCH 050/151] HPCC-32049 Cleanup old log files in Roxie runtime directory --- initfiles/bin/init_roxie.in | 3 +++ 1 file changed, 3 insertions(+) diff --git a/initfiles/bin/init_roxie.in b/initfiles/bin/init_roxie.in index eadb271a4bb..f86d84b5ff7 100755 --- a/initfiles/bin/init_roxie.in +++ b/initfiles/bin/init_roxie.in @@ -68,6 +68,9 @@ killed() exit 255 } +# Delete older *.stdout and *.stderr log files +find . -name "[0-9]*_[0-9]*.std*" -mtime +3 -delete + trap "killed" SIGINT SIGTERM SIGKILL log "Calling nohup roxie --topology=RoxieTopology.xml --logfile --restarts=$restarts --stdlog=0 2>>$logfilename.stderr 1>>$logfilename.stdout &" nohup roxie --topology=RoxieTopology.xml --logfile --restarts=$restarts --stdlog=0 2>>$logfilename.stderr 1>>$logfilename.stdout & From f49686a57f67559e8d3553263e79e70bd69a8ad9 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Mon, 3 Jun 2024 15:22:54 +0100 Subject: [PATCH 051/151] HPCC-31989 Implement new streaming input and output classes for spills Signed-off-by: Gavin Halliday --- common/thorhelper/thorread.cpp | 2 +- ecl/hthor/hthorkey.cpp | 2 +- fs/dafsserver/dafsserver.cpp | 4 +- fs/dafsstream/dafsstream.cpp | 19 + roxie/ccd/ccdactivities.cpp | 2 +- system/jlib/jfcmp.hpp | 6 + system/jlib/jfile.cpp | 66 ++- system/jlib/jfile.hpp | 37 +- system/jlib/jlz4.cpp | 41 ++ system/jlib/jlzw.cpp | 25 + system/jlib/jlzw.hpp | 6 + system/jlib/jlzw.ipp | 1 + system/jlib/jstream.cpp | 920 +++++++++++++++++++++++++++++ system/jlib/jstream.hpp | 87 +++ testing/unittests/CMakeLists.txt | 1 + testing/unittests/jstreamtests.cpp | 817 +++++++++++++++++++++++++ 16 files changed, 1985 insertions(+), 51 deletions(-) create mode 100644 testing/unittests/jstreamtests.cpp diff --git a/common/thorhelper/thorread.cpp b/common/thorhelper/thorread.cpp index cc8386ffd21..2b48cab2fcf 100644 --- a/common/thorhelper/thorread.cpp +++ b/common/thorhelper/thorread.cpp @@ -808,7 +808,7 @@ class ExternalFormatDiskRowReader : public LocalDiskRowReader unsigned __int64 startPos; cursor.read(startPos); if (inputStream->tell() != startPos) - inputStream->reset(startPos); + inputStream->reset(startPos, UnknownOffset); } virtual offset_t getLocalOffset() override diff --git a/ecl/hthor/hthorkey.cpp b/ecl/hthor/hthorkey.cpp index 1c9094a3a63..af16c3ce623 100644 --- a/ecl/hthor/hthorkey.cpp +++ b/ecl/hthor/hthorkey.cpp @@ -2544,7 +2544,7 @@ class CHThorCsvFetchActivity : public CHThorFetchActivityBase, public IFlatFetch virtual void processFetch(FetchRequest const * fetch, offset_t pos, ISerialStream *rawStream) { - rawStream->reset(pos); + rawStream->reset(pos, UnknownOffset); CriticalBlock procedure(transformCrit); size32_t maxRowSize = 10*1024*1024; // MORE - make configurable unsigned thisLineLength = csvSplitter.splitLine(rawStream, maxRowSize); diff --git a/fs/dafsserver/dafsserver.cpp b/fs/dafsserver/dafsserver.cpp index 9e788fe4447..408b253a172 100644 --- a/fs/dafsserver/dafsserver.cpp +++ b/fs/dafsserver/dafsserver.cpp @@ -1301,7 +1301,7 @@ class CRemoteStreamReadBaseActivity : public CRemoteDiskBaseActivity, implements { if (inputStream->tell() != startPos) { - inputStream->reset(startPos); + inputStream->reset(startPos, UnknownOffset); return true; } return false; @@ -1411,7 +1411,7 @@ class CRemoteDiskReadActivity : public CRemoteStreamReadBaseActivity { if (prefetchBuffer.tell() != startPos) { - inputStream->reset(startPos); + inputStream->reset(startPos, UnknownOffset); prefetchBuffer.clearStream(); prefetchBuffer.setStream(inputStream); return true; diff --git a/fs/dafsstream/dafsstream.cpp b/fs/dafsstream/dafsstream.cpp index f0c1ea740be..65ff7148bb1 100644 --- a/fs/dafsstream/dafsstream.cpp +++ b/fs/dafsstream/dafsstream.cpp @@ -879,6 +879,25 @@ class CDFUPartReader : public CDaFileSrvClientBase, implements IDFUFilePartReade currentReadPos += r; } } + virtual size32_t read(size32_t len, void * ptr) override + { + size32_t originalLen = len; + while (len) + { + if (0 == bufRemaining) + { + refill(); + if (0 == bufRemaining) + return originalLen-len; + } + size32_t r = len>bufRemaining ? bufRemaining : len; + memcpy(ptr, replyMb.readDirect(r), r); + len -= r; + bufRemaining -= r; + currentReadPos += r; + } + return originalLen; + } virtual bool eos() override { if (!eoi) diff --git a/roxie/ccd/ccdactivities.cpp b/roxie/ccd/ccdactivities.cpp index 7788c0ca158..01a3fa91d90 100644 --- a/roxie/ccd/ccdactivities.cpp +++ b/roxie/ccd/ccdactivities.cpp @@ -3897,7 +3897,7 @@ class CRoxieCSVFetchActivity : public CRoxieFetchActivityBase virtual size32_t doFetch(ARowBuilder & rowBuilder, offset_t pos, offset_t rawpos, void *inputData) { IHThorCsvFetchArg *h = (IHThorCsvFetchArg *) helper; - rawStream->reset(pos); + rawStream->reset(pos, UnknownOffset); size32_t rowSize = 4096; // MORE - make configurable for (;;) { diff --git a/system/jlib/jfcmp.hpp b/system/jlib/jfcmp.hpp index 49f4dbeb69c..1e355bd4855 100644 --- a/system/jlib/jfcmp.hpp +++ b/system/jlib/jfcmp.hpp @@ -20,6 +20,7 @@ #include "platform.h" #include "jlzw.hpp" +#include "jexcept.hpp" #define COMMITTED ((size32_t)-1) @@ -191,6 +192,11 @@ class jlib_decl CFcmpCompressor : public CSimpleInterfaceOf return 0; } + virtual size32_t compressDirect(size32_t destSize, void * dest, size32_t srcSize, const void * src, size32_t * numCompressed) override + { + throwUnimplemented(); + } + virtual void * bufptr() override { assertex(!inbuf); // i.e. closed diff --git a/system/jlib/jfile.cpp b/system/jlib/jfile.cpp index d4f3689163c..e25bbac59e7 100644 --- a/system/jlib/jfile.cpp +++ b/system/jlib/jfile.cpp @@ -6250,12 +6250,13 @@ class CSerialStreamBase : implements ISerialStream, public CInterface } } - void getreadnext(size32_t len, void * ptr) __attribute__((noinline)) + size32_t getreadnext(size32_t len, void * ptr, bool failAtEnd) __attribute__((noinline)) { bufbase += bufmax; bufpos = 0; bufmax = 0; size32_t rd = 0; + size32_t totalRead = 0; if (!eoinput) { //If reading >= bufsize, read any complete blocks directly into the target if (len>=bufsize) { @@ -6264,25 +6265,33 @@ class CSerialStreamBase : implements ISerialStream, public CInterface bufbase += rd; if (rd!=tord) { eoinput = true; - PrintStackReport(); - IERRLOG("CFileSerialStream::get read past end of stream.1 (%u,%u) %s",rd,tord,eoinput?"eoinput":""); - throw MakeStringException(-1,"CFileSerialStream::get read past end of stream"); + if (failAtEnd) + { + PrintStackReport(); + IERRLOG("CFileSerialStream::get read past end of stream.1 (%u,%u) %s",rd,tord,eoinput?"eoinput":""); + throw MakeStringException(-1,"CFileSerialStream::get read past end of stream"); + } } len -= rd; + totalRead += rd; if (!len) - return; + return totalRead; ptr = (byte *)ptr+rd; } const void *p = dopeek(len,rd); if (len<=rd) { memcpy(ptr,p,len); bufpos += len; - return; + return totalRead + len; } } - PrintStackReport(); - IERRLOG("CFileSerialStream::get read past end of stream.2 (%u,%u) %s",len,rd,eoinput?"eoinput":""); - throw MakeStringException(-1,"CFileSerialStream::get read past end of stream"); + if (failAtEnd) + { + PrintStackReport(); + IERRLOG("CFileSerialStream::get read past end of stream.2 (%u,%u) %s",len,rd,eoinput?"eoinput":""); + throw MakeStringException(-1,"CFileSerialStream::get read past end of stream"); + } + return 0; } protected: @@ -6339,7 +6348,21 @@ class CSerialStreamBase : implements ISerialStream, public CInterface bufpos += cpy; return; } - return getreadnext(len, (byte *)ptr+cpy); + getreadnext(len, (byte *)ptr+cpy, true); + } + + virtual size32_t read(size32_t len, void * ptr) override + { + size32_t cpy = bufmax-bufpos; + if (cpy>len) + cpy = len; + memcpy(ptr,(const byte *)buf+bufpos,cpy); + len -= cpy; + if (len==0) { + bufpos += cpy; + return cpy; + } + return getreadnext(len, (byte *)ptr+cpy, false) + cpy; } virtual bool eos() override @@ -6600,6 +6623,18 @@ class CMemoryMappedSerialStream: implements ISerialStream, public CInterface mmofs += len; } + virtual size32_t read(size32_t len, void * ptr) override + { + memsize_t left = mmsize-mmofs; + if (len>left) + len = left; + if (tally) + tally->process(mmofs,len,mmbase+mmofs); + memcpy(ptr,mmbase+mmofs,len); + mmofs += len; + return len; + } + virtual bool eos() override { return (mmsize<=mmofs); @@ -6662,6 +6697,17 @@ class CMemoryBufferSerialStream: implements ISerialStream, public CInterface memcpy(ptr,data,len); } + virtual size32_t read(size32_t len, void * ptr) override + { + if (len>buffer.remaining()) + len = buffer.remaining(); + const void * data = buffer.readDirect(len); + if (tally) + tally->process(buffer.getPos()-len,len,data); + memcpy(ptr,data,len); + return len; + } + virtual bool eos() override { return buffer.remaining() == 0; diff --git a/system/jlib/jfile.hpp b/system/jlib/jfile.hpp index 71e5f7527b3..0020f759ef7 100644 --- a/system/jlib/jfile.hpp +++ b/system/jlib/jfile.hpp @@ -29,6 +29,7 @@ #include "jtime.hpp" #include "jsocket.hpp" #include "jstatcodes.h" +#include "jstream.hpp" interface IFile; interface IFileIO; @@ -309,42 +310,6 @@ extern jlib_decl IDiscretionaryLock *createDiscretionaryLock(IFileIO *fileio); // useful stream based reader -interface ISerialStream: extends IInterface -{ - virtual const void * peek(size32_t wanted,size32_t &got) = 0; // try and ensure wanted bytes are available. - // if gotwanted then got is size available in buffer - - virtual void get(size32_t len, void * ptr) = 0; // exception if no data available - virtual bool eos() = 0; // no more data - virtual void skip(size32_t sz) = 0; - virtual offset_t tell() const = 0; - virtual void reset(offset_t _offset,offset_t _flen=(offset_t)-1) = 0; // input stream has changed - restart reading -}; - -/* example of reading a nul terminated string using ISerialStream peek and skip -{ - for (;;) { - const char *s = peek(1,got); - if (!s) - break; // eof before nul detected; - const char *p = s; - const char *e = p+got; - while (p!=e) { - if (!*p) { - out.append(p-s,s); - skip(p-s+1); // include nul - return; - } - p++; - } - out.append(got,s); - skip(got); - } -} -*/ - - interface IFileSerialStreamCallback // used for CRC tallying { diff --git a/system/jlib/jlz4.cpp b/system/jlib/jlz4.cpp index 59ec73bdf12..2c32f362c15 100644 --- a/system/jlib/jlz4.cpp +++ b/system/jlib/jlz4.cpp @@ -154,6 +154,36 @@ class CLZ4Compressor final : public CFcmpCompressor return compressedSize + 3 * sizeof(size32_t); } + virtual size32_t compressDirect(size32_t destSize, void * dest, size32_t srcSize, const void * src, size32_t * numCompressed) override + { + dbgassertex(srcSize != 0); + int compressedSize; + if (numCompressed) + { + //Write as much data as possible into the target buffer - update numCompressed with size actually written + int numRead = srcSize; + if (hc) + { + MemoryAttr state(LZ4_sizeofStateHC()); + compressedSize = LZ4_compress_HC_destSize(state.mem(), (const char *)src, (char *)dest, &numRead, destSize, hcLevel); + } + else + { + compressedSize = LZ4_compress_destSize((const char *)src, (char *)dest, &numRead, destSize); + } + *numCompressed = numRead; + } + else + { + if (hc) + compressedSize = LZ4_compress_HC((const char *)src, (char *)dest, srcSize, destSize, hcLevel); + else + compressedSize = LZ4_compress_default((const char *)src, (char *)dest, srcSize, destSize); + } + + return compressedSize; + } + virtual CompressionMethod getCompressionMethod() const override { return hc ? COMPRESS_METHOD_LZ4HC : COMPRESS_METHOD_LZ4; } public: CLZ4Compressor(const char * options, bool _hc) : hc(_hc) @@ -275,6 +305,17 @@ class jlib_decl CLZ4Expander : public CFcmpExpander throw MakeStringException(0, "LZ4Expander - corrupt data(3) %d %d",written,szchunk); return written; } + + virtual size32_t expandDirect(size32_t destSize, void * dest, size32_t srcSize, const void * src) override + { + assertex(destSize != 0); + return LZ4_decompress_safe((const char *)src, (char *)dest, srcSize, destSize); + } + + virtual bool supportsBlockDecompression() const override + { + return true; + } }; void LZ4CompressToBuffer(MemoryBuffer & out, size32_t len, const void * src) diff --git a/system/jlib/jlzw.cpp b/system/jlib/jlzw.cpp index de2d4ef3cb0..d0fb75762ff 100644 --- a/system/jlib/jlzw.cpp +++ b/system/jlib/jlzw.cpp @@ -463,6 +463,16 @@ size32_t CExpanderBase::expandNext(MemoryBuffer & target) return 0; } +size32_t CExpanderBase::expandDirect(size32_t destSize, void * dest, size32_t srcSize, const void * src) +{ + throwUnimplemented(); +} + +bool CExpanderBase::supportsBlockDecompression() const +{ + return false; +} + CLZWExpander::CLZWExpander(bool _supportbigendian) { outbuf = NULL; @@ -1410,6 +1420,11 @@ class jlib_decl CRDiffCompressor : public ICompressor, public CInterface virtual size32_t compressBlock(size32_t destSize, void * dest, size32_t srcSize, const void * src) override { return 0; } + virtual size32_t compressDirect(size32_t destSize, void * dest, size32_t srcSize, const void * src, size32_t * numCompressed) override + { + throwUnimplemented(); + } + virtual bool adjustLimit(size32_t newLimit) override { assertex(bufalloc == 0 && !outBufMb); // Only supported when a fixed size buffer is provided @@ -1723,6 +1738,11 @@ class jlib_decl CRandRDiffCompressor : public ICompressor, public CInterface virtual size32_t compressBlock(size32_t destSize, void * dest, size32_t srcSize, const void * src) override { return 0; } + virtual size32_t compressDirect(size32_t destSize, void * dest, size32_t srcSize, const void * src, size32_t * numCompressed) override + { + throwUnimplemented(); + } + inline size32_t maxcompsize(size32_t s) { return s+((s+254)/255)*2; } virtual size32_t write(const void *buf,size32_t buflen) override @@ -2721,6 +2741,11 @@ class CAESCompressor : implements ICompressor, public CInterface virtual size32_t compressBlock(size32_t destSize, void * dest, size32_t srcSize, const void * src) override { return 0; } + virtual size32_t compressDirect(size32_t destSize, void * dest, size32_t srcSize, const void * src, size32_t * numCompressed) override + { + throwUnimplemented(); + } + virtual void close() override { comp->close(); diff --git a/system/jlib/jlzw.hpp b/system/jlib/jlzw.hpp index daa3f1614b0..1c3402d3318 100644 --- a/system/jlib/jlzw.hpp +++ b/system/jlib/jlzw.hpp @@ -51,6 +51,8 @@ interface jlib_decl ICompressor : public IInterface virtual void close()=0; virtual size32_t write(const void *buf,size32_t len)=0; virtual size32_t compressBlock(size32_t destSize, void * dest, size32_t srcSize, const void * src) = 0; + // Like compressBlock, but adds no internal header. If numCompressed is not null, compress as much as will fit + virtual size32_t compressDirect(size32_t destSize, void * dest, size32_t srcSize, const void * src, size32_t * numCompressed) = 0; virtual void * bufptr()=0; virtual size32_t buflen()=0; @@ -71,6 +73,8 @@ interface jlib_decl IExpander : public IInterface virtual size32_t buflen()=0; virtual size32_t expandFirst(MemoryBuffer & target, const void * src) = 0; virtual size32_t expandNext(MemoryBuffer & target) = 0; + virtual size32_t expandDirect(size32_t destSize, void * dest, size32_t srcSize, const void * src) = 0; + virtual bool supportsBlockDecompression() const = 0; }; @@ -93,6 +97,8 @@ class jlib_decl CExpanderBase : public CInterfaceOf //Provide default implementations virtual size32_t expandFirst(MemoryBuffer & target, const void * src) override; virtual size32_t expandNext(MemoryBuffer & target) override; + virtual size32_t expandDirect(size32_t destSize, void * dest, size32_t srcSize, const void * src) override; + virtual bool supportsBlockDecompression() const override; }; diff --git a/system/jlib/jlzw.ipp b/system/jlib/jlzw.ipp index 1f562b7d6cf..b80b157a677 100644 --- a/system/jlib/jlzw.ipp +++ b/system/jlib/jlzw.ipp @@ -58,6 +58,7 @@ public: virtual bool supportsBlockCompression() const override { return false; } virtual bool supportsIncrementalCompression() const override { return true; } virtual size32_t compressBlock(size32_t destSize, void * dest, size32_t srcSize, const void * src) override { return 0; } + virtual size32_t compressDirect(size32_t destSize, void * dest, size32_t srcSize, const void * src, size32_t * numCompressed) override { throwUnimplemented(); } protected: void flushbuf(); diff --git a/system/jlib/jstream.cpp b/system/jlib/jstream.cpp index c108bbc7a3c..fa76a9632f8 100644 --- a/system/jlib/jstream.cpp +++ b/system/jlib/jstream.cpp @@ -26,6 +26,7 @@ #ifdef _WIN32 #include #endif +#include "jlzw.hpp" CByteInputStream::CByteInputStream() { @@ -282,3 +283,922 @@ IByteInputStream *createInputStream(int handle) return new CFileInputStream(handle); } + +//=========================================================================== + +// This class ensures that data is read in fixed block sizes - NOT a fixed buffer size. +// This means the buffer size is likely to be bigger than the block size - the class is passed +// an initial estimate for the potential overlap. + +class CBlockedSerialInputStream : public CInterfaceOf +{ +public: + CBlockedSerialInputStream(ISerialInputStream * _input, size32_t _blockReadSize) + : input(_input), blockReadSize(_blockReadSize) + { + //Allocate the input buffer slightly bigger than the block read size, so that a small peek at the end of a block + //does not have to expand the block. (Avoid extra allocation for for pathological unittests where blockReadSize <= 1024) + size32_t extraSize = (blockReadSize > 1024) ? 1024 : 0; + buffer.allocate(blockReadSize + extraSize); + } + + virtual size32_t read(size32_t len, void * ptr) override + { + size32_t sizeRead = 0; + byte * target = (byte *)ptr; + if (likely(bufferOffset < dataLength)) + { + size32_t toCopy = std::min(len, available()); + memcpy(target, data(bufferOffset), toCopy); + bufferOffset += toCopy; + if (likely(toCopy == len)) + return toCopy; + + sizeRead = toCopy; + } + + //While there are blocks larger than the buffer size read directly into the target buffer + while (sizeRead + blockReadSize <= len) + { + size32_t got = readNextBlock(blockReadSize, target+sizeRead); + if ((got == 0) || (got == BufferTooSmall)) + break; + sizeRead += got; + nextBlockOffset += got; + } + + while ((sizeRead < len) && !endOfStream) + { + assertex(bufferOffset == dataLength); + // NOTE: This could read less than a block, even if a whole block was requested. + // Will set endOfStream if there is no more to read - do not special case end-of-file here. + readNextBlock(); + + size32_t toCopy = std::min(len-sizeRead, available()); + memcpy(target + sizeRead, data(bufferOffset), toCopy); + bufferOffset += toCopy; + sizeRead += toCopy; + } + return sizeRead; + } + + virtual const void * peek(size32_t wanted, size32_t &got) override + { + if (likely(wanted <= available())) + { + got = available(); + return data(bufferOffset); + } + //Split this into a separate non-inlined function so that the fastpath function does not need to protect the stack + return peekAndExpand(wanted, got); + } + + const void * peekAndExpand(size32_t wanted, size32_t &got) __attribute__((noinline)) + { + while (unlikely(wanted > available())) + { + if (endOfStream) + break; + readNextBlock(); // will be appended onto the end of the existing buffer + } + got = available(); + return data(bufferOffset); + } + + virtual void get(size32_t len, void * ptr) override + { + size32_t numRead = read(len, ptr); + if (numRead != len) + throw makeStringExceptionV(-1, "End of input stream for read of %u bytes at offset %llu", len, tell()-numRead); + } + + virtual bool eos() override + { + return endOfStream && (dataLength == bufferOffset); + } + + virtual void skip(size32_t sz) override + { + size32_t remaining = available(); + if (likely(sz <= remaining)) + { + bufferOffset += sz; + } + else + { + bufferOffset = dataLength; + skipInput(sz - remaining); + } + } + virtual offset_t tell() const override + { + dbgassertex(nextBlockOffset == input->tell()); + return nextBlockOffset + bufferOffset - dataLength; + } + + virtual void reset(offset_t _offset, offset_t _flen) + { + endOfStream = false; + nextBlockOffset = _offset; + bufferOffset = 0; + dataLength = 0; + input->reset(_offset, _flen); + } + +protected: + inline byte * data(size32_t offset) { return (byte *)buffer.get() + offset; } + inline size32_t available() const { return dataLength - bufferOffset; } + + size32_t readBuffer(size32_t len, void * ptr) + { + if ((len == 0) || (bufferOffset == dataLength)) + return 0; + + size32_t toCopy = std::min(len, available()); + memcpy(ptr, data(bufferOffset), toCopy); + bufferOffset += toCopy; + return toCopy; + } + + const void * peekBuffer(size32_t &got) + { + got = available(); + return data(bufferOffset); + } + + +private: + void skipInput(size32_t size) + { + input->skip(size); + nextBlockOffset += size; + } + + void readNextBlock() + { + if (endOfStream) + return; + + size32_t remaining = available(); + //If there is rmaining data that isn't at the head of the buffer, move it to the head + if (bufferOffset) + { + if (remaining) + { + memmove(data(0), data(bufferOffset), remaining); + dataLength = remaining; + } + bufferOffset = 0; + } + + size32_t nextReadSize = blockReadSize; + for (;;) + { + expandBuffer(remaining + nextReadSize); + size32_t got = readNextBlock(nextReadSize, data(remaining)); // will set endOfStream if finished + if (likely(got != BufferTooSmall)) + { + nextBlockOffset += got; + dataLength = remaining + got; + break; + } + + //This can occur when decompressing - if the next block is too big to fit in the requested buffer + nextReadSize += blockReadSize; + } + } + + size32_t readNextBlock(size32_t len, void * ptr) + { + size32_t got = input->read(len, ptr); + if (got == 0) + endOfStream = true; + return got; + } + + void expandBuffer(size32_t newLength) + { + if (buffer.length() < newLength) + { + MemoryAttr expandedBuffer(newLength); + memcpy(expandedBuffer.mem(), data(0), available()); + buffer.swapWith(expandedBuffer); + } + } + + +protected: + Linked input; + MemoryAttr buffer; + offset_t nextBlockOffset = 0; + size32_t blockReadSize = 0; + size32_t bufferOffset = 0; + size32_t dataLength = 0; + bool endOfStream = false; +}; + +IBufferedSerialInputStream * createBufferedInputStream(ISerialInputStream * input, size32_t blockReadSize) +{ + assertex(blockReadSize != 0); + return new CBlockedSerialInputStream(input, blockReadSize); +} + +//--------------------------------------------------------------------------- + +class CDecompressingSerialInputStream final : public CInterfaceOf +{ +public: + CDecompressingSerialInputStream(IBufferedSerialInputStream * _input, IExpander * _decompressor) + : input(_input), decompressor(_decompressor) + { + } + + virtual size32_t read(size32_t len, void * ptr) override + { + size32_t available; + constexpr size32_t sizeHeader = 2 * sizeof(size32_t); + + again: + const byte * next = static_cast(input->peek(sizeHeader, available)); + if (available == 0) + return 0; + if (available < sizeHeader) + throw makeStringExceptionV(-1, "End of input stream for read of %u bytes at offset %llu (%llu)", len, tell(), input->tell()); + + size32_t decompressedSize = *(const size32_t *)next; // Technically illegal - should copy to an aligned object + if (len < decompressedSize) + return BufferTooSmall; // Need to expand the buffer + + size32_t compressedSize = *((const size32_t *)next + 1); // Technically illegal - should copy to an aligned object + if (unlikely(decompressedSize <= skipPending)) + { + input->skip(sizeHeader + compressedSize); + skipPending -= decompressedSize; + goto again; // go around the loop again. - a goto seems the cleanest way to code this. + } + + //If the input buffer is not big enough skip the header so it does not need to be contiguous with the data + //but that means various offsets need adjusting further down. + size32_t sizeNextHeader = sizeHeader; + if (unlikely(available < sizeHeader + compressedSize)) + { + input->skip(sizeHeader); + next = static_cast(input->peek(compressedSize, available)); + if (available < compressedSize) + throw makeStringExceptionV(-1, "End of input stream for read of %u bytes at offset %llu (%llu)", len, tell(), input->tell()); + sizeNextHeader = 0; + } + + size32_t expanded = decompressor->expandDirect(decompressedSize, ptr, compressedSize, next + sizeNextHeader); + assertex(expanded == decompressedSize); + nextOffset += decompressedSize; + input->skip(sizeNextHeader + compressedSize); + + if (skipPending > 0) + { + //Yuk. This works, but will require copying the whole buffer + memmove(ptr, (const byte *)ptr+skipPending, decompressedSize - skipPending); + decompressedSize -= skipPending; + skipPending = 0; + } + + return decompressedSize; + } + + virtual void get(size32_t len, void * ptr) override + { + //This function cannot be implemented because the caller would have to request exactly the correct length + throwUnexpected(); + } + + virtual bool eos() override + { + return input->eos(); + } + + virtual void skip(size32_t sz) override + { + skipPending += sz; + } + virtual offset_t tell() const override + { + return nextOffset + skipPending; + } + + virtual void reset(offset_t _offset, offset_t _flen=(offset_t)-1) + { + nextOffset = _offset; + skipPending = 0; + input->reset(_offset, _flen); + } + +protected: + Linked input; + Linked decompressor; + offset_t nextOffset = 0; + offset_t skipPending = 0; +}; + +ISerialInputStream * createDecompressingInputStream(IBufferedSerialInputStream * input, IExpander * decompressor) +{ + assertex(decompressor->supportsBlockDecompression()); + return new CDecompressingSerialInputStream(input, decompressor); +} + +//--------------------------------------------------------------------------- + +class CFileSerialInputStream final : public CInterfaceOf +{ +public: + CFileSerialInputStream(IFileIO * _input) + : input(_input) + { + } + + virtual size32_t read(size32_t len, void * ptr) override + { + if (nextOffset + len > lastOffset) + len = lastOffset - nextOffset; + unsigned numRead = input->read(nextOffset, len, ptr); + nextOffset += numRead; + return numRead; + } + + virtual void get(size32_t len, void * ptr) override + { + size32_t numRead = read(len, ptr); + if (numRead != len) + throw makeStringExceptionV(-1, "End of input stream for read of %u bytes at offset %llu", len, tell()-numRead); + } + + virtual bool eos() override + { + return nextOffset == input->size(); + } + + virtual void skip(size32_t len) override + { + if (nextOffset + len <= lastOffset) + nextOffset += len; + else + nextOffset = lastOffset; + } + + virtual offset_t tell() const override + { + return nextOffset; + } + + virtual void reset(offset_t _offset, offset_t _flen) + { + nextOffset = _offset; + lastOffset = _flen; + assertex(nextOffset <= lastOffset); + } + +protected: + Linked input; + offset_t nextOffset = 0; + offset_t lastOffset = UnknownOffset; // max(offset_t), so input file length is not limited +}; + +//Temporary class - long term goal is to have IFile create this directly and avoid an indirect call. +ISerialInputStream * createSerialInputStream(IFileIO * input) +{ + return new CFileSerialInputStream(input); +} + + + +//=========================================================================== + +// This class ensures that data is read in fixed block sizes - NOT a fixed buffer size. +// This means the buffer size is likely to be bigger than the block size - the class is passed +// an initial estimate for the potential overlap. + +class CBlockedSerialOutputStream final : public CInterfaceOf +{ +public: + CBlockedSerialOutputStream(ISerialOutputStream * _output, size32_t _blockWriteSize) + : output(_output), blockWriteSize(_blockWriteSize) + { + size32_t initialBufferSize = blockWriteSize; + buffer.allocate(initialBufferSize); + } + + virtual void flush() override + { + assertex(!isSuspended()); + flushBlocks(true); + output->flush(); + } + + virtual void put(size32_t len, const void * ptr) override + { + //Special case where there is space in the buffer - so the function has much lower overhead + if (likely(!isSuspended())) + { + if (likely(bufferOffset + len <= blockWriteSize)) + { + memcpy(data(bufferOffset), ptr, len); + bufferOffset += len; + return; + } + } + doWrite(len, ptr); + } + + //General purpose write routine. + size32_t doWrite(size32_t len, const void * ptr) __attribute__((noinline)) + { + size32_t sizeWritten = 0; + const byte * src = (const byte *)ptr; + if (likely(!isSuspended())) + { + //NOTE: If we are writing more than blockWriteSize and bufferOffset == 0 then we + //could avoid this first memcpy. However, the tests would slow down the very common cases + //so live with the potential inefficiency + + //First fill up any remaining output block. + //bufferOffset can only be > blockWriteSize if it is suspended + //When a suspended output is resumed all pending blocks will be written out. + assertex(bufferOffset <= blockWriteSize); + size32_t space = (blockWriteSize - bufferOffset); + if (likely(space)) + { + size32_t toCopy = std::min(len, space); + memcpy(data(bufferOffset), src, toCopy); + bufferOffset += toCopy; + sizeWritten += toCopy; + // Otherwise this would have been processed by the fast-path. The following code is still correct if condition is false. + dbgassertex(sizeWritten < len); + } + + output->put(blockWriteSize, data(0)); + //Revisit when compression can indicate that the buffer is only half consumed + blockOffset += blockWriteSize; + bufferOffset = 0; + + //While there are blocks larger than the block write size write directly to the output + while (unlikely(sizeWritten + blockWriteSize <= len)) + { + output->put(blockWriteSize, src+sizeWritten); + sizeWritten += blockWriteSize; + blockOffset += blockWriteSize; + } + } + + size32_t remaining = len - sizeWritten; + if (likely(remaining)) + { + ensureSpace(remaining); + memcpy(data(bufferOffset), src+sizeWritten, remaining); + bufferOffset += remaining; + } + return sizeWritten + remaining; + } + + inline bool readyToWrite() const + { + return (bufferOffset >= blockWriteSize) && !isSuspended(); + } + + virtual byte * reserve(size32_t wanted, size32_t & got) override + { + ensureSpace(wanted); + got = available(); + return data(bufferOffset); + } + + virtual void commit(size32_t written) override + { + doCommit(written); + checkWriteIfNotSuspended(); + } + + virtual void suspend(size32_t len) override + { + suspendOffsets.append(bufferOffset); + ensureSpace(len); + //Leave space for the data to be written afterwards + bufferOffset += len; + } + + virtual void resume(size32_t len, const void * ptr) override + { + doResume(len, ptr); + checkWriteIfNotSuspended(); + } + + virtual offset_t tell() const override { return blockOffset+bufferOffset; } + +//------------------------------------------------------- +//Helper functions for CThreadedBlockedSerialOutputStream +//doCommit() and doResume are also used by this class + + void doCommit(size32_t written) + { + bufferOffset += written; + } + + void doResume(size32_t len, const void * ptr) + { + size32_t offset = suspendOffsets.tos(); + suspendOffsets.pop(); + if (likely(len)) + memcpy(data(offset), ptr, len); + } + + size32_t writeToBuffer(size32_t len, const void * ptr) + { + if (likely(!isSuspended())) + { + assertex(bufferOffset <= blockWriteSize); + size32_t space = (blockWriteSize - bufferOffset); + if (likely(space)) + { + size32_t toCopy = std::min(len, space); + memcpy(data(bufferOffset), ptr, toCopy); + bufferOffset += toCopy; + return toCopy; + } + return 0; + } + else + { + ensureSpace(len); + memcpy(data(bufferOffset), ptr, len); + bufferOffset += len; + return len; + } + } + + size32_t flushAllButLastBlock() + { + assertex(bufferOffset >= blockWriteSize); + + unsigned from = 0; + for (;;) + { + size32_t remaining = bufferOffset - from; + if (remaining < 2*blockWriteSize) + return from; + + output->put(blockWriteSize, data(from)); + blockOffset += blockWriteSize; + from += blockWriteSize; + } + } + + //This can be executed in parallel with writeBlock() + void copyRemainingAndReset(CBlockedSerialOutputStream & other, size32_t marker) + { + size32_t from = marker + blockWriteSize; + size32_t remaining = bufferOffset - from; + if (remaining) + { + size32_t written = other.writeToBuffer(remaining, data(from)); + assertex(written == remaining); + } + + bufferOffset = 0; + } + + //This can be executed in parallel with copyRemainingAndReset() + void writeBlock(unsigned from) + { + output->put(blockWriteSize, data(from)); + blockOffset += blockWriteSize; + } + + +protected: + inline byte * data(size32_t offset) { return (byte *)buffer.get() + offset; } + inline size32_t available() const { return buffer.length() - bufferOffset; } + inline bool isSuspended() const { return suspendOffsets.ordinality() != 0; } + + void ensureSpace(size32_t required) __attribute__((always_inline)) + { + if (unlikely(required > available())) + expandBuffer(bufferOffset + required); + } + void expandBuffer(size32_t newLength) + { + if (buffer.length() < newLength) + { + constexpr size32_t alignment = 32; + newLength = (newLength + (alignment - 1)) & ~(alignment -1); + + MemoryAttr expandedBuffer(newLength); + memcpy(expandedBuffer.mem(), data(0), bufferOffset); + buffer.swapWith(expandedBuffer); + } + } + + inline void checkWriteIfNotSuspended() __attribute__((always_inline)) + { + if (likely(bufferOffset < blockWriteSize)) + return; + + if (unlikely(isSuspended())) + return; + + flushBlocks(false); + } + + void flushBlocks(bool flushLast) + { + unsigned from = 0; + //Write out any pending blocks + for (;;) + { + size32_t remaining = bufferOffset - from; + if (remaining == 0) + break; + if ((remaining < blockWriteSize) && !flushLast) + break; + size32_t writeSize = std::min(remaining, blockWriteSize); + output->put(writeSize, data(from)); + blockOffset += writeSize; + from += writeSize; + } + + if ((from != 0) && (from != bufferOffset)) + memcpy(data(0), data(from), bufferOffset - from); + + bufferOffset -= from; + } + +protected: + Linked output; + MemoryAttr buffer; + UnsignedArray suspendOffsets; + offset_t blockOffset = 0; + size32_t blockWriteSize = 0; + size32_t bufferOffset = 0; +}; + +IBufferedSerialOutputStream * createBufferedOutputStream(ISerialOutputStream * output, size32_t blockWriteSize) +{ + assertex(blockWriteSize); + return new CBlockedSerialOutputStream(output, blockWriteSize); +} + +//--------------------------------------------------------------------------- + +class CThreadedBlockedSerialOutputStream final : public CInterfaceOf, public IThreaded +{ +public: + IMPLEMENT_IINTERFACE_USING(CInterfaceOf) + + CThreadedBlockedSerialOutputStream(ISerialOutputStream * _output, size32_t _blockWriteSize) + : threaded("CThreadedBlockedSerialOutputStream"), stream{{_output,_blockWriteSize},{_output,_blockWriteSize}} + { + active = 0; + threaded.init(this, false); + } + ~CThreadedBlockedSerialOutputStream() + { + abort = true; + go.signal(); + threaded.join(); + } + + virtual void flush() override + { + ensureWriteComplete(); + stream[active].flush(); + } + + virtual void put(size32_t len, const void * ptr) override + { + size32_t sizeWritten = 0; + const byte * src = (const byte *)ptr; + for (;;) + { + CBlockedSerialOutputStream & activeStream = stream[active]; + size32_t written = activeStream.writeToBuffer(len-sizeWritten, src+sizeWritten); + sizeWritten += written; + + checkForPendingWrite(); + + if (sizeWritten == len) + return; + } + } + + virtual byte * reserve(size32_t wanted, size32_t & got) override + { + return stream[active].reserve(wanted, got); + } + + virtual void commit(size32_t written) override + { + stream[active].doCommit(written); + checkForPendingWrite(); + } + virtual void suspend(size32_t len) override + { + stream[active].suspend(len); + } + virtual void resume(size32_t len, const void * ptr) override + { + stream[active].doResume(len, ptr); + checkForPendingWrite(); + } + +protected: + virtual offset_t tell() const override + { + return stream[0].tell() + stream[1].tell(); + } + + inline void checkForPendingWrite() __attribute__((always_inline)) + { + if (unlikely(stream[active].readyToWrite())) + { + startActiveWrite(active); + active = 1-active; + } + } + + void ensureWriteComplete() + { + if (running) + { + done.wait(); + running = false; + if (pendingException) + throw pendingException.getClear(); + } + } + + void startActiveWrite(unsigned whichStream) __attribute__((noinline)) + { + ensureWriteComplete(); + //write out all but the last block + size32_t marker = stream[whichStream].flushAllButLastBlock(); + + //The following calls can be executed in parallel. Copy remaining afterwards to allow for better thread overlap + startActiveWrite(whichStream, marker); + stream[whichStream].copyRemainingAndReset(stream[1-whichStream], marker); + } + + void startActiveWrite(unsigned whichStream, unsigned marker) + { + runningStream = whichStream; + runningMarker = marker; + running = true; + + constexpr bool useThreading = true; + if (useThreading) + go.signal(); + else + run(); + } + + void run() + { + try + { + stream[runningStream].writeBlock(runningMarker); + } + catch (IException * e) + { + pendingException.setown(e); + } + done.signal(); + } + + virtual void threadmain() override + { + for (;;) + { + go.wait(); + if (abort) + break; + run(); + } + } + +protected: + CThreaded threaded; + CBlockedSerialOutputStream stream[2]; + Semaphore go; + Semaphore done; + Owned pendingException; + bool running{false}; + std::atomic abort{false}; + unsigned active = 0; +//Used by the thread that outputs the data + unsigned runningStream = 0; + unsigned runningMarker = 0; +}; + +IBufferedSerialOutputStream * createThreadedBufferedOutputStream(ISerialOutputStream * output, size32_t blockWriteSize) +{ + assertex(blockWriteSize); + return new CThreadedBlockedSerialOutputStream(output, blockWriteSize); +} + +//--------------------------------------------------------------------------- + +class CCompressingSerialOutputStream final : public CInterfaceOf +{ +public: + CCompressingSerialOutputStream(IBufferedSerialOutputStream * _output, ICompressor * _compressor) + : output(_output), compressor(_compressor) + { + } + + virtual void flush() override + { + output->flush(); + } + + virtual void put(size32_t len, const void * data) override + { + constexpr size32_t sizeHeader = 2 * sizeof(size32_t); + unsigned expectedSize = sizeHeader + len; // MORE: Reduce this to minimize the size of the target buffer. + for (;;) + { + //Future: When general file support is added this will need to keep track of where it is in the output + //block and manage packing data into the boundaries cleanly. (Probably implement with a new class) + size32_t available; + byte * target = output->reserve(expectedSize, available); + assertex(available >= sizeHeader); + + //MORE: Support option to compress as much as possible when packing into fixed size target buffers + size32_t written = compressor->compressDirect(available-sizeHeader, target + sizeHeader, len, data, nullptr); + if (written != 0) + { + *(size32_t *)target = len; // Technically illegal - should copy to an aligned object + *((size32_t *)target + 1) = written; // Technically illegal - should copy to an aligned object + output->commit(sizeHeader + written); + return; + } + + //Increase the buffer size and try again + expectedSize += len / 4; + } + } + + virtual offset_t tell() const override { throwUnexpected(); } + +protected: + Linked output; + Linked compressor; +}; + +ISerialOutputStream * createCompressingOutputStream(IBufferedSerialOutputStream * output, ICompressor * compressor) +{ + assertex(compressor->supportsBlockCompression()); + return new CCompressingSerialOutputStream(output, compressor); +} + +//--------------------------------------------------------------------------- + +class CFileSerialOutputStream final : public CInterfaceOf +{ +public: + CFileSerialOutputStream(IFileIO * _output) + : output(_output) + { + } + + virtual void flush() override + { + output->flush(); + } + + virtual void put(size32_t len, const void * ptr) override + { + unsigned written = output->write(nextOffset, len, ptr); + nextOffset += len; + if (written != len) + throw makeStringExceptionV(-1, "Failed to write %u bytes at offset %llu", len-written, nextOffset); + } + + virtual offset_t tell() const override + { + return nextOffset; + } + +protected: + Linked output; + offset_t nextOffset = 0; +}; + +//Temporary class - long term goal is to have IFile create this directly and avoid an indirect call. +ISerialOutputStream * createSerialOutputStream(IFileIO * output) +{ + return new CFileSerialOutputStream(output); +} + + +/* + * Future work: + * + * Add an intercept input/output + * Add support for parital compression into a fixed block size (including) zero padding the rest of the block if too small + */ diff --git a/system/jlib/jstream.hpp b/system/jlib/jstream.hpp index 28563fce0df..03751d8e29a 100644 --- a/system/jlib/jstream.hpp +++ b/system/jlib/jstream.hpp @@ -43,4 +43,91 @@ extern jlib_decl IByteInputStream *createInputStream(StringBuffer &from); extern jlib_decl IByteInputStream *createInputStream(int handle); extern jlib_decl IByteOutputStream *createOutputStream(StringBuffer &to); extern jlib_decl IByteOutputStream *createOutputStream(int handle); + + +static constexpr size32_t BufferTooSmall = (size32_t)-1; +static constexpr offset_t UnknownOffset = (offset_t)-1; +interface ISerialInputStream : extends IInterface +{ + virtual size32_t read(size32_t len, void * ptr) = 0; // returns size read, result < len does NOT imply end of file + virtual void skip(size32_t sz) = 0; + virtual void get(size32_t len, void * ptr) = 0; // exception if no data available + virtual bool eos() = 0; // no more data + virtual void reset(offset_t _offset, offset_t _flen) = 0; // input stream has changed - restart reading + virtual offset_t tell() const = 0; // used to implement beginNested +}; + +interface ISerialStream : extends ISerialInputStream +{ + virtual const void * peek(size32_t wanted, size32_t &got) = 0; // try and ensure wanted bytes are available. + // if gotwanted then got is size available in buffer +}; +using IBufferedSerialInputStream = ISerialStream; + +/* example of reading a nul terminated string using ISerialStream peek and skip +{ + for (;;) { + const char *s = peek(1,got); + if (!s) + break; // eof before nul detected; + const char *p = s; + const char *e = p+got; + while (p!=e) { + if (!*p) { + out.append(p-s,s); + skip(p-s+1); // include nul + return; + } + p++; + } + out.append(got,s); + skip(got); + } +} +*/ + +interface ISerialOutputStream : extends IInterface +{ + virtual void put(size32_t len, const void * ptr) = 0; // throws an error if cannot write the full size. + virtual void flush() = 0; + virtual offset_t tell() const = 0; // used to implement beginNested +}; + +interface IBufferedSerialOutputStream : extends ISerialOutputStream +{ + virtual byte * reserve(size32_t wanted, size32_t & got) = 0; // get a pointer to a contiguous block of memory to write to. + virtual void commit(size32_t written) = 0 ; // commit the data written to the block returned by reserve + virtual void suspend(size32_t wanted) = 0; // Reserve some bytes and prevent data being flushed to the next stage until endNested is called. May nest. + virtual void resume(size32_t len, const void * ptr) = 0; // update the data allocated by suspend and allow flushing. +}; + +interface ICompressor; +interface IExpander; +interface IFileIO; + +extern jlib_decl IBufferedSerialInputStream * createBufferedInputStream(ISerialInputStream * input, size32_t blockReadSize); +extern jlib_decl ISerialInputStream * createDecompressingInputStream(IBufferedSerialInputStream * input, IExpander * decompressor); +extern jlib_decl ISerialInputStream * createSerialInputStream(IFileIO * input); +extern jlib_decl IBufferedSerialOutputStream * createBufferedOutputStream(ISerialOutputStream * output, size32_t blockWriteSize); +extern jlib_decl IBufferedSerialOutputStream * createThreadedBufferedOutputStream(ISerialOutputStream * output, size32_t blockWriteSize); +extern jlib_decl ISerialOutputStream * createCompressingOutputStream(IBufferedSerialOutputStream * output, ICompressor * compressor); +extern jlib_decl ISerialOutputStream * createSerialOutputStream(IFileIO * output); + + +inline IBufferedSerialOutputStream * createBufferedOutputStream(ISerialOutputStream * output, size32_t blockWriteSize, bool threaded) +{ + //Threaded version is currently slower unless data is hard to compress or a very large buffer size is being used. + if (threaded) + return createThreadedBufferedOutputStream(output, blockWriteSize); + else + return createBufferedOutputStream(output, blockWriteSize); +} + +inline IBufferedSerialInputStream * createBufferedInputStream(ISerialInputStream * input, size32_t blockReadSize, bool threaded) +{ + //If a threaded version is implemented it should use async io, rather than a thread to perform the look ahead + return createBufferedInputStream(input, blockReadSize); +} + #endif diff --git a/testing/unittests/CMakeLists.txt b/testing/unittests/CMakeLists.txt index ea5e6c9f59e..69a7b5e7a1c 100644 --- a/testing/unittests/CMakeLists.txt +++ b/testing/unittests/CMakeLists.txt @@ -37,6 +37,7 @@ set ( SRCS remotetests.cpp dalitests.cpp jlibtests.cpp + jstreamtests.cpp cryptotests.cpp hqltests.cpp esdltests.cpp diff --git a/testing/unittests/jstreamtests.cpp b/testing/unittests/jstreamtests.cpp new file mode 100644 index 00000000000..98303578ccc --- /dev/null +++ b/testing/unittests/jstreamtests.cpp @@ -0,0 +1,817 @@ +/*############################################################################## + + HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems®. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +############################################################################## */ + +/* + * Jlib regression tests + * + */ + +#ifdef _USE_CPPUNIT +#include +#include +#include +#include + +#include "jsem.hpp" +#include "jfile.hpp" +#include "jstream.hpp" +#include "jlzw.hpp" + +#include "unittests.hpp" + +#define CPPUNIT_ASSERT_EQUAL_STR(x, y) CPPUNIT_ASSERT_EQUAL(std::string(x ? x : ""),std::string(y ? y : "")) + +static const unsigned oneMinute = 60000; // msec + +class CDataProvider +{ +public: + virtual size32_t create(IBufferedSerialOutputStream * target, unsigned row) = 0; + virtual size32_t check(IBufferedSerialInputStream * source, unsigned row) = 0; + const char * queryName() const { return name.str(); }; +protected: + StringBuffer name; +}; + +//Highly compressible +class SequenceDataProvider : public CDataProvider +{ +public: + SequenceDataProvider(size32_t _len, bool _useRead = false, bool _useWrite = true) + : len(_len), useRead(_useRead), useWrite(_useWrite) + { + name.append("Seq_").append(len).append(useRead ? 'R' : 'P').append(useWrite ? 'W' : 'C'); + } + + virtual size32_t create(IBufferedSerialOutputStream * target, unsigned row) + { + byte * next; + if (useWrite) + { + next = (byte *)alloca(len); + } + else + { + size32_t got; + next = (byte *)target->reserve(len, got); + } + for (size32_t i=0; i < len; i++) + next[i] = (byte)(i * row); + if (useWrite) + target->put(len, next); + else + target->commit(len); + return len; + } + + virtual size32_t check(IBufferedSerialInputStream * source, unsigned row) + { + byte * next; + if (useRead) + { + next = (byte *)alloca(len); + size32_t read = source->read(len, next); + assertex(read == len); + } + else + { + size32_t available; + next = (byte *)source->peek(len, available); + assertex(available >= len); + } + for (size32_t i=0; i < len; i++) + if (next[i] != (byte)(i * row)) + throw MakeStringException(0, "Mismatch at %u,%u", i, row); + if (!useRead) + source->skip(len); + return len; + } + +protected: + size32_t len; + bool useRead; + bool useWrite; +}; + +class SkipSequenceDataProvider : public SequenceDataProvider +{ +public: + SkipSequenceDataProvider(size32_t _len) : SequenceDataProvider(_len) + { + name.clear().append("Skip").append(len); + } + + virtual size32_t check(IBufferedSerialInputStream * source, unsigned row) + { + constexpr size32_t checkByte = 7; + size32_t available; + source->skip(checkByte); + const byte * next = (const byte *)source->peek(1, available); + assertex(available >= 1); + if (next[0] != (byte)(checkByte * row)) + throw MakeStringException(0, "Skip mismatch at %u", row); + source->skip(len-checkByte); + return len; + } + +}; + +class ReservedDataProvider : public SequenceDataProvider +{ +public: + ReservedDataProvider(size32_t _len) : SequenceDataProvider(_len) + { + name.clear().append("Res").append(len); + } + + virtual size32_t create(IBufferedSerialOutputStream * target, unsigned row) + { + size32_t got; + byte * next = (byte *)target->reserve(len+2, got); + for (size32_t i=0; i < len; i++) + next[i] = (byte)(i * row); + target->commit(len); + return len; + } +}; + +//Not very compressible +class Sequence2DataProvider : public CDataProvider +{ +public: + Sequence2DataProvider(size32_t _len) + : len(_len) + { + name.append("Seq2_").append(len); + } + + virtual size32_t create(IBufferedSerialOutputStream * target, unsigned row) + { + byte * next = (byte *)alloca(len); + for (size32_t i=0; i < len; i++) + next[i] = (byte)(i * row + (row >> 3)); + target->put(len, next); + return len; + } + + virtual size32_t check(IBufferedSerialInputStream * source, unsigned row) + { + byte * next = (byte *)alloca(len); + size32_t read = source->read(len, next); + assertex(read == len); + for (size32_t i=0; i < len; i++) + if (next[i] != (byte)(i * row + (row >> 3))) + throw MakeStringException(0, "Mismatch at %u,%u", i, row); + return len; + } + +protected: + size32_t len; +}; + +class RandomDataProvider : public CDataProvider +{ +public: + RandomDataProvider(size32_t _len) + : len(_len), generator{std::random_device{}()} + { + name.append("Rand").append(len); + } + + virtual size32_t create(IBufferedSerialOutputStream * target, unsigned row) + { + byte * next = (byte *) alloca(len); + for (size32_t i=0; i < len; i++) + next[i] = generator(); + target->put(len, next); + return len; + } + + virtual size32_t check(IBufferedSerialInputStream * source, unsigned row) + { + source->skip(len); + return len; + } + +protected: + size32_t len; + std::mt19937 generator; +}; + +//Output (int8 , string, dataset({unsigned3 }) +class VariableDataProvider : public CDataProvider +{ +public: + VariableDataProvider(bool _useCount) : useCount(_useCount) + { + name.append("Var_").append(useCount ? 'C' : 'S'); + } + + virtual size32_t create(IBufferedSerialOutputStream * target, unsigned row) + { + //Output (row, (string)row, (row % 7)items of (row, row*2, row*3)) + __uint64 id = row; + StringBuffer name; + name.append(row); + + target->put(8, &id); + size32_t len = name.length(); + target->put(4, &len); + target->put(len, name.str()); + size32_t childCount = (row % 7); + size32_t childSize = 3 * childCount; + if (useCount) + target->put(4, &childCount); + else + target->suspend(sizeof(size32_t)); + for (unsigned i=0; i < childCount; i++) + { + size32_t value = row * (i+1); + target->put(3, &value); + } + if (!useCount) + target->resume(sizeof(childSize), &childSize); + return 8 + 4 + len + 4 + childSize; + } + + virtual size32_t check(IBufferedSerialInputStream * source, unsigned row) + { + //Output (row, (string)row, (row % 7)items of (row, row*2, row*3)) + __uint64 id = row; + source->read(8, &id); + assertex(id == row); + + size32_t len; + source->read(4, &len); + StringBuffer name; + source->read(len, name.reserve(len)); + assertex(atoi(name) == row); + + size32_t size; + source->read(sizeof(size), &size); + if (useCount) + assertex(size == (row % 7)); + else + assertex(size == (row % 7) * 3); + for (unsigned i=0; i < (row % 7); i++) + { + size32_t value = 0; + source->read(3, &value); + size32_t expected = ((row * (i+1)) & 0xFFFFFF); + assertex(value == expected); + } + return 8 + 4 + len + 4 + (row % 7) * 3; + } + +protected: + bool useCount; +}; + + +class NullOuputStream : public CInterfaceOf +{ + virtual size32_t write(size32_t len, const void * ptr) { return len; } + virtual void put(size32_t len, const void * ptr) {} + virtual void flush() {} + virtual byte * reserve(size32_t wanted, size32_t & got) { return nullptr; } + virtual void commit(size32_t written) {} + virtual void suspend(size32_t wanted) {} + virtual void resume(size32_t len, const void * ptr) {} + virtual offset_t tell() const override { return 0; } +}; + +class JlibStreamStressTest : public CppUnit::TestFixture +{ +public: + CPPUNIT_TEST_SUITE(JlibStreamStressTest); + CPPUNIT_TEST(testGeneration); + CPPUNIT_TEST(testSimpleStream); // Write a file and then read the results + CPPUNIT_TEST(testIncSequentialStream); // write a file and read results after each flush + CPPUNIT_TEST(testEvenSequentialStream); // write a file and read results after each flush + CPPUNIT_TEST(testParallelStream); // write a file and read in parallel from a separate thread + CPPUNIT_TEST(testThreadedWriteStream); // write a file using a threaded writer + //MORE: + //Threaded writer + //Threaded reader + //Threaded reader and writer + //Threaded reader and writer all in parallel + //Directly create the stream around the handle to avoid virtuals + CPPUNIT_TEST_SUITE_END(); + + //The following options control which tests are run and the parameters for each test + static constexpr const char * filename = "testfile"; + static constexpr offset_t numTestRows = 10'000'000; + static constexpr offset_t numRowsPerBatch = 10'000; + static constexpr bool testCore = true; + static constexpr bool testCompressible = true; + static constexpr bool testRandom = true; + static constexpr bool testSkip = true; + static constexpr bool testHighCompression = true; + + static constexpr bool timeGeneration = false; + static constexpr bool testSimple = true; + static constexpr bool testIncSequential = false; + static constexpr bool testEvenSequential = true; + static constexpr bool testParallel = true; + static constexpr bool testThreadedWrite = true; + + __uint64 timeSeq = 0; + __uint64 timeSkip = 0; + __uint64 timeRand = 0; + + __uint64 testGeneration(CDataProvider & dataProvider, unsigned numRows) + { + Owned out = new NullOuputStream(); + + CCycleTimer timer; + + offset_t totalWritten = 0; + for (unsigned i=0; i < numRows; i++) + totalWritten += dataProvider.create(out, i); + out->flush(); + __uint64 elapsedNs = timer.elapsedNs(); + + DBGLOG("testGeneration(%s, %u) took %lluus", dataProvider.queryName(), numRows, elapsedNs/1000); + return elapsedNs; + } + void reportResult(const char * testname, ICompressHandler * compressHandler, CDataProvider & dataProvider, size32_t bufferSize, size32_t compressedBufferSize, unsigned numRows, offset_t totalWritten, __uint64 elapsedNs) + { + Owned file = createIFile(filename); + offset_t compressedSize = file->size(); + + const char * compressMethod = compressHandler ? compressHandler->queryType() : "none"; + double rate = (double)totalWritten * 1000 / elapsedNs; + DBGLOG("%s(%s, %s, %u, %u, %u) took %lluus %.2fMB/s %.2f%%", testname, compressMethod, dataProvider.queryName(), bufferSize, compressedBufferSize, numRows, elapsedNs/1000, rate, (double)compressedSize * 100 / totalWritten); + + } + void reportFailure(const char * testname, ICompressHandler * compressHandler, CDataProvider & dataProvider, size32_t bufferSize, size32_t compressedBufferSize, IException * ownedException) + { + const char * compressMethod = compressHandler ? compressHandler->queryType() : "none"; + StringBuffer msg; + msg.appendf("%s(%s, %s, %u, %u) failed: ", testname, compressMethod, dataProvider.queryName(), bufferSize, compressedBufferSize); + ownedException->errorMessage(msg); + ownedException->Release(); + CPPUNIT_FAIL(msg.str()); + } + void runSimpleStream(const char * testname, ICompressHandler * compressHandler, CDataProvider & dataProvider, size32_t bufferSize, size32_t compressedBufferSize, unsigned numRows, bool threadedRead, bool threadedWrite) + { + try + { + Owned out = createOutput(filename, bufferSize, compressHandler, compressedBufferSize, threadedWrite); + Owned in = createInput(filename, bufferSize ? bufferSize : 32, compressHandler, compressedBufferSize, threadedRead); + + CCycleTimer timer; + offset_t totalWritten = 0; + for (unsigned i=0; i < numRows; i++) + totalWritten += dataProvider.create(out, i); + out->flush(); + + offset_t totalRead = 0; + for (unsigned i=0; i < numRows; i++) + totalRead += dataProvider.check(in, i); + + byte end; + size32_t remaining = in->read(1, &end); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Data available after the end of stream", 0U, remaining); + CPPUNIT_ASSERT_EQUAL_MESSAGE("eos is not true at end of stream", true, in->eos()); + + CPPUNIT_ASSERT_EQUAL(totalWritten, totalRead); + + __uint64 elapsedNs = timer.elapsedNs(); + reportResult(testname, compressHandler, dataProvider, bufferSize, compressedBufferSize, numRows, totalWritten, elapsedNs); + } + catch (IException * e) + { + reportFailure(testname, compressHandler, dataProvider, bufferSize, compressedBufferSize, e); + } + } + void runSimpleStream(ICompressHandler * compressHandler, CDataProvider & dataProvider, size32_t bufferSize, size32_t compressedBufferSize, unsigned numRows) + { + runSimpleStream("testSimple", compressHandler, dataProvider, bufferSize, compressedBufferSize, numRows, false, false); + } + void runThreadedWriteStream(ICompressHandler * compressHandler, CDataProvider & dataProvider, size32_t bufferSize, size32_t compressedBufferSize, unsigned numRows) + { + if ((compressHandler ? compressedBufferSize : bufferSize) == 0) + return; + runSimpleStream("testThreadedWriteStream", compressHandler, dataProvider, bufferSize, compressedBufferSize, numRows, false, true); + } + + void runSequentialStream(const char * testname, ICompressHandler * compressHandler, CDataProvider & dataProvider, size32_t bufferSize, size32_t compressedBufferSize, unsigned numRows, bool evenSize) + { + try + { + Owned out = createOutput(filename, bufferSize, compressHandler, compressedBufferSize, false); + Owned in = createInput(filename, bufferSize ? bufferSize : 32, compressHandler, compressedBufferSize, false); + + CCycleTimer timer; + offset_t totalWritten = 0; + offset_t totalRead = 0; + + offset_t rowsRemaining = numRows; + offset_t rowsWritten = 0; + for (size32_t batch=1; rowsRemaining; batch++) + { + unsigned rowsThisTime = std::min(rowsRemaining, evenSize ? numRowsPerBatch : (offset_t)batch); + + for (unsigned i=0; i < rowsThisTime; i++) + { + totalWritten += dataProvider.create(out, rowsWritten+i); + CPPUNIT_ASSERT_EQUAL(totalWritten, out->tell()); + } + out->flush(); + + for (unsigned i=0; i < rowsThisTime; i++) + { + totalRead += dataProvider.check(in, rowsWritten+i); + CPPUNIT_ASSERT_EQUAL(totalRead, in->tell()); + } + + rowsRemaining -= rowsThisTime; + rowsWritten += rowsThisTime; + } + + byte end; + size32_t remaining = in->read(1, &end); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Data available after the end of stream", 0U, remaining); + CPPUNIT_ASSERT_EQUAL_MESSAGE("eos is not true at end of stream", true, in->eos()); + + CPPUNIT_ASSERT_EQUAL(totalWritten, totalRead); + + __uint64 elapsedNs = timer.elapsedNs(); + reportResult(testname, compressHandler, dataProvider, bufferSize, compressedBufferSize, numRows, totalWritten, elapsedNs); + } + catch (IException * e) + { + reportFailure(testname, compressHandler, dataProvider, bufferSize, compressedBufferSize, e); + } + } + + void runEvenSequentialStream(ICompressHandler * compressHandler, CDataProvider & dataProvider, size32_t bufferSize, size32_t compressedBufferSize, unsigned numRows) + { + runSequentialStream("testEvenSequential", compressHandler, dataProvider, bufferSize, compressedBufferSize, numRows, true); + } + void runIncSequentialStream(ICompressHandler * compressHandler, CDataProvider & dataProvider, size32_t bufferSize, size32_t compressedBufferSize, unsigned numRows) + { + runSequentialStream("testIncSequential", compressHandler, dataProvider, bufferSize, compressedBufferSize, numRows, false); + } + class ParallelWorker : public Thread + { + public: + ParallelWorker(Semaphore & _started, Semaphore & _ready, std::atomic & _available) + : started(_started), ready(_ready), available(_available) {} + + Semaphore go; + Semaphore & started; + Semaphore & ready; + std::atomic & available; + }; + + class ParallelWriter : public ParallelWorker + { + public: + ParallelWriter(Semaphore & _started, Semaphore & _ready, std::atomic & _available, CDataProvider & _dataProvider, IBufferedSerialOutputStream * _out, unsigned _numRows) + : ParallelWorker(_started, _ready, _available), dataProvider(_dataProvider), out(_out), numRows(_numRows) + { + } + virtual int run() + { + started.signal(); + go.wait(); + + unsigned batches = numRows / numRowsPerBatch; + offset_t totalWritten = 0; + for (unsigned batch=0; batch < batches; batch++) + { + for (unsigned i=0; i < numRowsPerBatch; i++) + totalWritten += dataProvider.create(out, i+batch*numRowsPerBatch); + out->flush(); + available.fetch_add(numRowsPerBatch); + ready.signal(); + } + totalSent = totalWritten; + return 0; + } + + public: + CDataProvider & dataProvider; + IBufferedSerialOutputStream * out; + offset_t totalSent; + unsigned numRows; + }; + + + class ParallelReader : public ParallelWorker + { + public: + ParallelReader(Semaphore & _started, Semaphore & _ready, std::atomic & _available, CDataProvider & _dataProvider, IBufferedSerialInputStream * _in, unsigned _numRows) + : ParallelWorker(_started, _ready, _available), dataProvider(_dataProvider), in(_in), numRows(_numRows) + { + } + virtual int run() + { + try + { + started.signal(); + go.wait(); + + offset_t readSoFar = 0; + unsigned batches = numRows / numRowsPerBatch; + for (unsigned batch=0; batch < batches; batch++) + { + ready.wait(); + offset_t nowAvailable = available.load(); + + while (readSoFar < nowAvailable) + dataProvider.check(in, readSoFar++); + } + } + catch (IException * _e) + { + e.setown(_e); + } + return 0; + } + + public: + CDataProvider & dataProvider; + IBufferedSerialInputStream * in; + Owned e; + unsigned numRows; + }; + + void runParallelStream(ICompressHandler * compressHandler, CDataProvider & dataProvider, size32_t bufferSize, size32_t compressedBufferSize, unsigned numRows, bool threadedRead, bool threadedWrite) + { + try + { + Semaphore ready; + Semaphore started; + std::atomic available{0}; + + Owned out = createOutput(filename, bufferSize, compressHandler, compressedBufferSize, threadedWrite); + Owned in = createInput(filename, bufferSize ? bufferSize : 32, compressHandler, compressedBufferSize, threadedRead); + Owned writer = new ParallelWriter(started, ready, available, dataProvider, out, numRows); + Owned reader = new ParallelReader(started, ready, available, dataProvider, in, numRows); + reader->start(true); + writer->start(true); + + started.wait(); + started.wait(); + + CCycleTimer timer; + reader->go.signal(); + writer->go.signal(); + + reader->join(); + writer->join(); + + if (reader->e) + reportFailure("testParallel", compressHandler, dataProvider, bufferSize, compressedBufferSize, LINK(reader->e)); + + byte end; + size32_t remaining = in->read(1, &end); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Data available after the end of stream", 0U, remaining); + CPPUNIT_ASSERT_EQUAL_MESSAGE("eos is not true at end of stream", true, in->eos()); + + __uint64 elapsedNs = timer.elapsedNs(); + reportResult("testParallel", compressHandler, dataProvider, bufferSize, compressedBufferSize, numRows, writer->totalSent, elapsedNs); + } + catch (IException * e) + { + reportFailure("testParallel", compressHandler, dataProvider, bufferSize, compressedBufferSize, e); + } + } + + void runParallelStream(ICompressHandler * compressHandler, CDataProvider & dataProvider, size32_t bufferSize, size32_t compressedBufferSize, unsigned numRows) + { + runParallelStream(compressHandler, dataProvider, bufferSize, compressedBufferSize, numRows, false, false); + } + + using TestFunction = void (JlibStreamStressTest::*)(ICompressHandler * compressHandler, CDataProvider & dataProvider, size32_t bufferSize, size32_t compressedBufferSize, unsigned numRows); + void applyTests(TestFunction testFunction) + { + SequenceDataProvider seqProvider(40, false, true); + SequenceDataProvider seqProviderRW(40, true, true); + SequenceDataProvider seqProviderRC(40, true, false); + SequenceDataProvider seqProviderPC(40, false, false); + Sequence2DataProvider seq2Provider(40); + VariableDataProvider varProvider(false); + SkipSequenceDataProvider skipProvider(17); + ReservedDataProvider resProvider(40); + RandomDataProvider randProvider(37); + ICompressHandler * lz4 = queryCompressHandler(COMPRESS_METHOD_LZ4); + ICompressHandler * lz4hc = queryCompressHandler(COMPRESS_METHOD_LZ4HC); + + if (testCore) + { + (this->*testFunction)(nullptr, seqProvider, 0x100000, 0x100000, numTestRows); + (this->*testFunction)(lz4, seqProvider, 0x100000, 0x100000, numTestRows); + (this->*testFunction)(lz4, varProvider, 0x100000, 0x100000, numTestRows); + (this->*testFunction)(lz4, resProvider, 0x100000, 0x100000, numTestRows); + } + + if (testCompressible) + { + (this->*testFunction)(nullptr, seqProvider, 7, 0, numTestRows/10); + (this->*testFunction)(nullptr, seqProvider, 64, 0, numTestRows/10); + (this->*testFunction)(nullptr, seqProvider, 0x10000, 0, numTestRows); + (this->*testFunction)(nullptr, seqProvider, 0x100000, 0, numTestRows); + + (this->*testFunction)(lz4, seqProvider, 7, 19, numTestRows/10); + (this->*testFunction)(lz4, seqProvider, 64, 64, numTestRows/10); + (this->*testFunction)(lz4, seqProvider, 1024, 1024, numTestRows); + (this->*testFunction)(lz4, seqProvider, 0x10000, 0x10000, numTestRows); + (this->*testFunction)(lz4, seqProvider, 0x40000, 0x40000, numTestRows); + (this->*testFunction)(lz4, seqProvider, 0x100000, 0x100000, numTestRows); + (this->*testFunction)(lz4, seqProvider, 0x40000, 0x100000, numTestRows); + (this->*testFunction)(lz4, seq2Provider, 7, 19, numTestRows/10); + + (this->*testFunction)(lz4, seqProvider, 43, 97, numTestRows/10); + (this->*testFunction)(lz4, resProvider, 43, 97, numTestRows/10); + } + + if (testSkip) + { + //Test skipping functionality to ensure coverage + (this->*testFunction)(nullptr, skipProvider, 64, 64, numTestRows/10); + (this->*testFunction)(nullptr, skipProvider, 0x10000, 0x10000, numTestRows); + (this->*testFunction)(lz4, skipProvider, 64, 64, numTestRows/10); + (this->*testFunction)(lz4, skipProvider, 0x10000, 0x10000, numTestRows); + } + + if (testRandom) + { + (this->*testFunction)(lz4, randProvider, 64, 64, numTestRows/10); + (this->*testFunction)(lz4, randProvider, 7, 19, numTestRows/10); + (this->*testFunction)(lz4, randProvider, 1024, 1024, numTestRows); + (this->*testFunction)(lz4, randProvider, 0x10000, 0x10000, numTestRows); + (this->*testFunction)(lz4, randProvider, 0x40000, 0x40000, numTestRows); + (this->*testFunction)(lz4, randProvider, 0x100000, 0x100000, numTestRows); + } + + if (testHighCompression) + { + (this->*testFunction)(lz4hc, seq2Provider, 0x100000, 0x100000, numTestRows); + } + } + + void testGeneration() + { + if (timeGeneration) + { + SequenceDataProvider seqProvider(40); + SkipSequenceDataProvider skipProvider(17); + RandomDataProvider randProvider(37); + + timeSeq = testGeneration(seqProvider, numTestRows); + timeRand = testGeneration(randProvider, numTestRows); + timeSkip = testGeneration(skipProvider, numTestRows); + } + } + + void testSimpleStream() + { + if (testSimple) + { + DBGLOG("Simple tests: write then read"); + + SequenceDataProvider seqProviderPW(40, false, true); + SequenceDataProvider seqProviderRW(40, true, true); + SequenceDataProvider seqProviderRC(40, true, false); + SequenceDataProvider seqProviderPC(40, false, false); + VariableDataProvider varcProvider(true); + VariableDataProvider varsProvider(false); + Sequence2DataProvider seq2Provider(40); + ReservedDataProvider resProvider(40); + ICompressHandler * lz4 = queryCompressHandler(COMPRESS_METHOD_LZ4); + + runSimpleStream(nullptr, seqProviderPW, 0x100000, 0x100000, numTestRows); + runSimpleStream(nullptr, seqProviderRW, 0x100000, 0x100000, numTestRows); + runSimpleStream(nullptr, seqProviderRC, 0x100000, 0x100000, numTestRows); + runSimpleStream(nullptr, seqProviderPC, 0x100000, 0x100000, numTestRows); + runSimpleStream(nullptr, seq2Provider, 0x100000, 0x100000, numTestRows); + runSimpleStream(nullptr, varsProvider, 0x100000, 0x100000, numTestRows); + runSimpleStream(nullptr, varcProvider, 0x100000, 0x100000, numTestRows); + runSimpleStream(nullptr, resProvider, 0x100000, 0x100000, numTestRows); + + runSimpleStream(lz4, seqProviderPW, 0x100000, 0x100000, numTestRows); + runSimpleStream(lz4, seqProviderRW, 0x100000, 0x100000, numTestRows); + runSimpleStream(lz4, seqProviderRC, 0x100000, 0x100000, numTestRows); + runSimpleStream(lz4, seqProviderPC, 0x100000, 0x100000, numTestRows); + runSimpleStream(lz4, seq2Provider, 0x100000, 0x100000, numTestRows); + runSimpleStream(lz4, varsProvider, 0x100000, 0x100000, numTestRows); + runSimpleStream(lz4, varcProvider, 0x100000, 0x100000, numTestRows); + runSimpleStream(lz4, resProvider, 0x100000, 0x100000, numTestRows); + + runSimpleStream(nullptr, seqProviderPW, 7, 0, numTestRows/10); + runSimpleStream(nullptr, seqProviderRW, 7, 0, numTestRows/10); + runSimpleStream(nullptr, seqProviderRC, 7, 0, numTestRows/10); + runSimpleStream(nullptr, seqProviderPC, 7, 0, numTestRows/10); + runSimpleStream(nullptr, seq2Provider, 7, 0, numTestRows/10); + runSimpleStream(nullptr, resProvider, 7, 0, numTestRows/10); + + runSimpleStream(lz4, seqProviderPW, 43, 97, numTestRows/10); + runSimpleStream(lz4, seqProviderRW, 43, 97, numTestRows/10); + runSimpleStream(lz4, seqProviderRC, 43, 97, numTestRows/10); + runSimpleStream(lz4, seqProviderPC, 43, 97, numTestRows/10); + runSimpleStream(lz4, seq2Provider, 43, 97, numTestRows/10); + runSimpleStream(lz4, resProvider, 43, 97, numTestRows/10); + + applyTests(&JlibStreamStressTest::runSimpleStream); + } + } + + + void testIncSequentialStream() + { + if (testIncSequential) + { + DBGLOG("Sequential tests: write then read alternating, increasing sizes"); + applyTests(&JlibStreamStressTest::runIncSequentialStream); + } + } + + void testEvenSequentialStream() + { + if (testEvenSequential) + { + DBGLOG("Sequential tests: write then read alternating, even sizes"); + applyTests(&JlibStreamStressTest::runEvenSequentialStream); + } + } + + void testParallelStream() + { + if (testParallel) + { + DBGLOG("Parallel tests: write and read in parallel, even sizes"); + applyTests(&JlibStreamStressTest::runParallelStream); + } + } + + void testThreadedWriteStream() + { + if (testThreadedWrite) + { + DBGLOG("Threaded write tests: threaded write and read sequentially"); + applyTests(&JlibStreamStressTest::runThreadedWriteStream); + } + } + +protected: + IBufferedSerialInputStream * createInput(const char * filename, unsigned bufferSize, ICompressHandler * compressHandler, unsigned decompressedSize, bool threaded) + { + Owned file = createIFile(filename); + Owned io = file->open(IFOread); + Owned in = createSerialInputStream(io); + if (compressHandler) + { + const char *options = nullptr; + Owned decompressor = compressHandler->getExpander(options); + + Owned stream = createBufferedInputStream(in, bufferSize); + Owned decompressed = createDecompressingInputStream(stream, decompressor); + return createBufferedInputStream(decompressed, decompressedSize, threaded); + } + else + return createBufferedInputStream(in, bufferSize, threaded); + } + + IBufferedSerialOutputStream * createOutput(const char * filename, unsigned bufferSize, ICompressHandler * compressHandler, unsigned decompressedSize, bool threaded) + { + Owned file = createIFile(filename); + Owned io = file->open(IFOcreate); + Owned out = createSerialOutputStream(io); + if (compressHandler) + { + const char *options = nullptr; + Owned compressor = compressHandler->getCompressor(options); + + Owned stream = createBufferedOutputStream(out, bufferSize); + Owned compressed = createCompressingOutputStream(stream, compressor); + return createBufferedOutputStream(compressed, decompressedSize, threaded); + } + else + return createBufferedOutputStream(out, bufferSize, threaded); + } +}; + +CPPUNIT_TEST_SUITE_REGISTRATION( JlibStreamStressTest ); +CPPUNIT_TEST_SUITE_NAMED_REGISTRATION( JlibStreamStressTest, "JlibStreamStressTest" ); + + + +#endif // _USE_CPPUNIT From 8e8698445cd6924d536fbc7f5f49eb93c64c5259 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Thu, 13 Jun 2024 12:07:58 +0100 Subject: [PATCH 052/151] HPCC-32053 Avoid crash when secure roxie is configured with no cert Signed-off-by: Gavin Halliday --- common/thorhelper/thorsoapcall.cpp | 2 +- esp/services/ws_ecl/ws_ecl_service.cpp | 4 ++-- esp/smc/SMCLib/TpWrapper.cpp | 2 +- fs/dafsclient/rmtclient.cpp | 2 +- fs/dafsserver/dafsserver.cpp | 2 +- roxie/ccd/ccdmain.cpp | 2 +- roxie/roxiepipe/roxiepipe.cpp | 2 +- system/security/securesocket/securesocket.cpp | 9 ++++++--- system/security/securesocket/securesocket.hpp | 2 +- 9 files changed, 15 insertions(+), 12 deletions(-) diff --git a/common/thorhelper/thorsoapcall.cpp b/common/thorhelper/thorsoapcall.cpp index 6fe8f4c8bd6..51ca07f8485 100644 --- a/common/thorhelper/thorsoapcall.cpp +++ b/common/thorhelper/thorsoapcall.cpp @@ -1305,7 +1305,7 @@ class CWSCHelper : implements IWSCHelper, public CInterface { if (clientCert != NULL) { - Owned config = createSecureSocketConfig(clientCert->certificate, clientCert->privateKey, clientCert->passphrase); + Owned config = createSecureSocketConfig(clientCert->certificate, clientCert->privateKey, clientCert->passphrase, true); ownedSC.setown(createSecureSocketContextEx2(config, ClientSocket)); } else if (clientCertIssuer.length()) diff --git a/esp/services/ws_ecl/ws_ecl_service.cpp b/esp/services/ws_ecl/ws_ecl_service.cpp index 444f89c27e1..0c330e61cc8 100644 --- a/esp/services/ws_ecl/ws_ecl_service.cpp +++ b/esp/services/ws_ecl/ws_ecl_service.cpp @@ -271,7 +271,7 @@ void initBareMetalRoxieTargets(MapStringToMyClass &connMap, includeTargetInURL = pc->getPropBool("@includeTargetInURL", true); dnsInterval = (unsigned) pc->getPropInt("@dnsInterval", -1); if (pc->getPropBool("@tls", false)) - tlsConfig.setown(createSecureSocketConfig(nullptr, nullptr, nullptr)); + tlsConfig.setown(createSecureSocketConfig(nullptr, nullptr, nullptr, true)); } } StringBuffer list; @@ -299,7 +299,7 @@ void initBareMetalRoxieTargets(MapStringToMyClass &connMap, farmerPort = port; const char *protocol = farmer.queryProp("@protocol"); if (protocol && streq(protocol, "ssl")) - tlsConfig.setown(createSecureSocketConfig(farmer.queryProp("@certificateFileName"), farmer.queryProp("@privateKeyFileName"), nullptr)); + tlsConfig.setown(createSecureSocketConfig(farmer.queryProp("@certificateFileName"), farmer.queryProp("@privateKeyFileName"), nullptr, true)); break; //use the first one without port==0 } Owned servers = roxieCluster->getElements("RoxieServerProcess"); diff --git a/esp/smc/SMCLib/TpWrapper.cpp b/esp/smc/SMCLib/TpWrapper.cpp index 1ad7022709d..05ab0c82c34 100644 --- a/esp/smc/SMCLib/TpWrapper.cpp +++ b/esp/smc/SMCLib/TpWrapper.cpp @@ -2144,7 +2144,7 @@ extern TPWRAPPER_API void initBareMetalRoxieTargets(MapStringToMyClass staticConfig = createSecureSocketConfig(certFileName, keyFileName, passPhraseStr); + Owned staticConfig = createSecureSocketConfig(certFileName, keyFileName, passPhraseStr, false); tlsConfig.setown(createSyncedPropertyTree(staticConfig)); } else diff --git a/roxie/roxiepipe/roxiepipe.cpp b/roxie/roxiepipe/roxiepipe.cpp index 5599cbd2df4..7dd068d552d 100644 --- a/roxie/roxiepipe/roxiepipe.cpp +++ b/roxie/roxiepipe/roxiepipe.cpp @@ -677,7 +677,7 @@ int main(int argc, char *argv[]) { #ifdef _USE_OPENSSL if (useSSL) - smartSocketFactory = createSecureSmartSocketFactory(hosts.str(), createSecureSocketConfig(nullptr, nullptr, nullptr), retryMode); + smartSocketFactory = createSecureSmartSocketFactory(hosts.str(), createSecureSocketConfig(nullptr, nullptr, nullptr, true), retryMode); else #endif smartSocketFactory = createSmartSocketFactory(hosts.str(), retryMode); diff --git a/system/security/securesocket/securesocket.cpp b/system/security/securesocket/securesocket.cpp index 4c6e8b10a35..616203b62fb 100644 --- a/system/security/securesocket/securesocket.cpp +++ b/system/security/securesocket/securesocket.cpp @@ -2051,10 +2051,13 @@ SECURESOCKET_API ISecureSocketContext* createSecureSocketContextSecretSrv(const return createSecureSocketContextSynced(info, ServerSocket); } -IPropertyTree * createSecureSocketConfig(const char* certFileOrBuf, const char* privKeyFileOrBuf, const char* passphrase) +IPropertyTree * createSecureSocketConfig(const char* certFileOrBuf, const char* privKeyFileOrBuf, const char* passphrase, bool createIfAllNull) { - if (!certFileOrBuf && !privKeyFileOrBuf && !passphrase) - return nullptr; + if (!createIfAllNull) + { + if (!certFileOrBuf && !privKeyFileOrBuf && !passphrase) + return nullptr; + } Owned config = createPTree("ssl"); if (certFileOrBuf) diff --git a/system/security/securesocket/securesocket.hpp b/system/security/securesocket/securesocket.hpp index 1dac6af9c27..415040e2a3e 100644 --- a/system/security/securesocket/securesocket.hpp +++ b/system/security/securesocket/securesocket.hpp @@ -95,7 +95,7 @@ SECURESOCKET_API ISecureSocketContext* createSecureSocketContextSecretSrv(const SECURESOCKET_API ISecureSocketContext* createSecureSocketContextSSF(ISmartSocketFactory* ssf); //Helper function to aid migration to the functions above. This should eventually be removed. -SECURESOCKET_API IPropertyTree * createSecureSocketConfig(const char* certFileOrBuf, const char* privKeyFileOrBuf, const char* passphrase); +SECURESOCKET_API IPropertyTree * createSecureSocketConfig(const char* certFileOrBuf, const char* privKeyFileOrBuf, const char* passphrase, bool createIfAllNull); //Legacy factory methods - should be phased out. SECURESOCKET_API ISecureSocketContext* createSecureSocketContext(SecureSocketType); From 128bed90b093b1155b12a1fbb6c11c2897cd9359 Mon Sep 17 00:00:00 2001 From: Jim DeFabia Date: Thu, 13 Jun 2024 17:16:38 -0400 Subject: [PATCH 053/151] HPCC-32057 Update Containerized Doc for clarity Signed-off-by: Jim DeFabia --- .../ContainerizedMods/LocalDeployment.xml | 4 ++++ docs/EN_US/images/WhatYouNeed.jpg | Bin 61987 -> 24743 bytes docs/PT_BR/images/WhatYouNeed.jpg | Bin 61987 -> 24743 bytes 3 files changed, 4 insertions(+) diff --git a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/LocalDeployment.xml b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/LocalDeployment.xml index fe3f3ef7419..4952471fb55 100644 --- a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/LocalDeployment.xml +++ b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/LocalDeployment.xml @@ -13,6 +13,10 @@ All third-party tools should be 64-bit versions. + + Note: When you install Docker + Desktop, it installs Kubernetes and the kubectl command line interface. + You merely need to enable it in Docker Desktop settings. diff --git a/docs/EN_US/images/WhatYouNeed.jpg b/docs/EN_US/images/WhatYouNeed.jpg index e54752c68e01a63e5ba2b56a84ed106b89b9dd3e..77b5f56c9f43e40b6f7007435d598aa729742520 100644 GIT binary patch literal 24743 zcmd>l1yr2bvTh>@1c%_71PBhn9RdUo?w;V#H15_&Ai<%r;K71JaEAnU*T&u5{dH#M z%$d38%sKbnb=Q6Gt@pEnsm#a2Wsq+#|SOF2HXW96SQzqsK_dPf$?d00_U{gZ=*X?jLSq0pQ^f z5a1CJ9z8-tgbm{by9YqTeuVR!RpjwgrT0iL>~Pt9qtcNnMN5C+DGwh~u^Za^Jwd@I zAS5EDen~@1N6*2@#m&RZC-z2MLQ+avMnzRkT|-k#+sN3&)Xdz%(!tTm*~Qh(-Ty;C z;Kxrv!I0>f*tq!535gk*S=nE6a`W=b$}1|Xs%vWNT3XxMJ370%dqzgb#wRAHrlCvA zE30ek8=G6(N5?0pXXh7}SJ&7uk>L>$5fG7niVO$u0(&7~BR+c0`WQz<3F*Dv(-&;M z$he|W>7_rOP_io@;u+cxqu^6H;|8p|?{g;IPUFP3Ke)tZ+Mg~m6t0KU` zq{oIsz=nH(0?-iPU>^{$0m6Vvhs-(Gh_-5NP+b@w z9|NQ%wj}t>FTBkWD|ppae!00b^LFcvnbyS%b`jsw%AH0pREvPUjU_fJvg(K5h74=NuWty>cTgn} z85pws^leA*6hn~&4v~t1JnHAg%qhZl3yJbQ6C>veD$+ye^fK8B^AfYAVNjQeNih4T zmun(418{DR2+DOzL7wvc9<<)a3H*Zm)5FXSDfC(mvNgUp0I^2be+z2hDe<3oGLBCKrzuTrNJQv0N}Y=e|Hr z*edHHgbUft6ZBD~rpeLCBd%29Y`&;3bF){4HRM-h)#N&l_jk@{?@%Vl8|Yk`cT*=|z3I`qz& zRbcwZOr6|x$83|sR(S3C&YljGtGV8$tL3<{arMUO#y2|!W6}aQyib>+YxzUQ`I@)S zY}{?{n9bj;lNk%Md-4CzHvUigoWXx|@TBYXIJ2@KJzvPkoCxE0ldM>KyFt544FMb( zYbc%UanLA#HDc;f#KyCumqBhz9t*oSd1767q!kL!b%pFM{E{PbhK41wfMQSyt~qMs zs#BD)?2l*3PJpepLk1~c0dW+}1Q`!$K z8~3=6&-PX&f5)rE^k1-r#{Wm$Fmy}F5jcF@2lfPidH~q-Efzu`qZxPd*XYNM?)RU( ziVn`fVV?W`H{y{i;Z#OMw~u$zvT4XYX;|*~752x5Wux&I?gVa61rt471GWxxpD|7x z&tveM5Z??CA=Kqk?I4LeF*~F)1Ejek^Mn?)cYdN7UWHmc0LpGB zBFwMm%23&j>{@QWm;syzIXm)lhr>*~!9eRe%F;3Gid=pVVH5Afa*`I)HI7D#8}xKp z&ZxjF!VVIu#gB6X%eTx6Y1F(kIvQ)|v5n2_vGL`Vdl$}Z>3w-4=cM`E-XrHt*H|sq zpC(jVJE$kZbK9$}FhIv?E`5t7^MlJm=THikeeY5i$!W*N+R9z({+ok>nHdlLA|M(6 zZFklUeo@gj9~z_2HnF?cm-etUU}$6~W!Ms_^}fvQY_POU3bE5_TCIxgT$9En^cuk~ zPdgjA@?f;0D5t58ImuFgtb{3Ue#gMy97GSjkXLN*S(ci>VeJxx>wMkjWr!WmVY~kT zXoz{akY9gAEU%=L`W)O_Gmxn5=&m!?8^HYaFgI92kB_k8*6q`foDC1XkOrb#p6%3+ znPh&7gqh&s8b&dQ?nP3W=dj)6@ty-uo|l$*ldQ^|eNC@?W17Qy-L=uWs1FK&7VriB zU%X)?9^4C699&hb*-0Oue07q05+n(BihRZ&{kB&oPn+!4Sbf+(g`JYfHRr~@Sb5fF6R^CCwwcZklcSo5 zQ<#93c8hC7N3c{ljs`&I_q?vj5mw$z;G79pPiybCe1;nFysd&e{+3>baiIT7swBO& zPB-lRJxSw!v!Fssn}@#BS%`xc$YyAjA$RRx>duhQqxtfB6udJ(`T&rW)-AoXbRRqp znq&!1%M#`#PdbupsY_YUds&1?vb!AkV#FrY$kfF%yEWOYEcyJbgaJuEneK*2*sIpU zY!U=A8tqWbqfdphUMPpxIbO}~QpNmO){{^J#0;&xBZRNjFeO6D?b;feq^`{!iEtm$ zcAVO?iEEUoQ&Mj(ptXQz8ax16IP)zrzOFmiN6?`Jgh(uDtO3y5(9q!ODq?tr%4Jfn zG??N_@~=L3cs>BsiqwkD`dzB0oweQW`Z^~nJmwf$FP!ICs3+9Tm1)pc`acuutlfmp zzCtu3?Ap^F%|nSzr#bA1P2&og3olUNi`kG5T{@pron)xj9T_?26tFe*BHm)`vjI~~ zYiP0>vnpXxBG9aLlQd7y@)@hU0_nePtgl|&h24Sm13$;j(dF8o6dM3R=XZQ*+;I2ilwN_oD?Kan z6`DQ!r*O>SFk9u$ksRbvOO4=ici?5p*(*Ftd9@cS7W~;ISM1#PxD(aMSLX?5Cnkd% zYwFEVTqdB*eAEMAHMF_B;9!4Y`~h(M0^E`~(Ro{4zEN^uvaxehY<@qS{uMF`syMyy zzu~LC@P_SNyFwlSC_o)j*kBKU;&QVGfVk3Kx)@M)WEeKu$GbEGkIt$Cn_EPYLo;mz zc#l_th-D9eqkJ%wL%8vRMvW?{g6092UI-=E!VJpj5< zZ@8-F*P6h$pVAJ$a;9DFnLGf%)DM6a4zT&~9!s6@ZDeyf&?RDl<(F}Q!Y9z)`+M03 zKnF}enNR~q@YS>X{-Swd&*m$v2S5O8;R9e0$Z~VV@&F*pypf5p@cOA5{~Iot24RbP z4pX3!)&n4Q!rSQWd+?SL_qU}5vYYV)mW#2&q1 z_D|&rmjl5r2QX2=E%6iJjmr4D+xQ1Sn9^OQB{EXOPE9al+@twl5$F(hfy5dQ`|;^O(*U*R$$KX z@s=*k?A}8QywDB4?5BWofSX~q5xpNMx*P0-S??)q9c996zsdRQ91nn>f>KP6H`o3& zjlrb~@!;(_%t@LdFh@xgt}ut`|L_-$!F*1b;-}VScmA&R-20Kty9Yq)0{}wtQ(?a@ z1+&3laO@EksZ{_%K4F{+LVFyh*M> zF}eHMRkX9r{pM{BB^jo64^aSZt)8rwbWz>jSgpyKNv|1&Go9dt+?98r+a+t!(`e2F zp|8@?4}f8{{62Fg0ye!0gBv@Tu~KiFe=33h?@VF}V-jT;9D&;EdoYvz5wtL~4^(y@ z>|So&OuJrt?*zk)3ikNjrj9cjFyHZo%}4(B-1q@7DGk0HhdB?w?7jiaxy;n>4}Jmm zpY3k(05Bi^S1t^L!M}517})+RW`_|a)2~p3(F?us{H^dWkpI=kVJ!AX2hRVy0|(rt z{O&v1XMbXrzkw0+CoBA;kN;$ryMv!Dj`fhn|XYcxLtq%Yg zZ8#kLQ(pPki-Dmr7j`!J)pEZ}`u0y82t(t)lT_wcNq@4}|CkuqFwXyjoUp+0o16r{ zkw#Evt|#UA{%sfpiuq!mM~6*{txFeD^Xcfs?)!meCxwi_r1E%G)u@dy^E#w=yJy1g z!Z2QM)aJ^AE$EWFn7kVQ=C1?^}~qo4cJU$X6_gIrMaamvbpz|z6KZl zWN63C8;Os<27-^U;MVXb6ir0~`^BTm+o>A6)wEg`*sgIHQzExl2m+&sN{WgR$)FYleD2*<2TvjGd`@Y$T@ zU~8O%rF+N~*F<;i0%Ec)K#jDV9V}j}P9bE@YLa%YjR7<91H?AlD5jl{yQcB?cst}&zvY(ch?+aEAQ*$2MTYSi#v%{gg?1FvG}UL1-2YDtQp@c zDi)fydd8+xcVLLPy-;WTykQI~DG*%KGO4V69JzW5lmoL@!ghI4nVkNXJ!V{i8@qA4 z&+I45pH`}OKi0S1i&aO-mlE{lB=28{mxbO3&=u(BIUK2omX(L!uN6NFm#r#wgv^U4 zqQ)xkk+somga-T|#H^>Zc92t!y;7@cj_(r8s63-{o#|w^n8%K8gVKLq6_Wm>Cn^Rw z`IO}OW|h>qSRp+J+2a<1o4r3rG*5bzTA5?Hmws8H6-DiDsi+SP3}<;Md{#g&Ot1Ku z`kTz$=7mZ7aq-!eTj0?PEl)0JmH&A?5zcluVmw!q`eiW*f96w$&KGajEO!(x9nD?Kmy_|`@ze9oWjT8_rK+~EH z2@&qoy009KtJ%iE4#SlfNi@s)X=WB?Osm`(``7rfAe;e-x@jyS$36JfEF!ecU2n&$ z7;Kf@IjH(*N!t4x*%vw`!Ng=W<;U8~CpYRuG!^(&$8SdpB{6&nHgMnx@k|KmXFj!4 z%WKc~%q)o9$Q60X$BQ0OM|?V68$aDxuv+S3Ml*Tqh5>op(V>iNb;!3GZb_=Emms5| zAsKheb{UlCT2#z@CQ&KJi!GV>ba%_fbtLu%3nDF5CnP_O)`M&Pt{^#hKARJjx_KeT zfwsHRolH2SLCA7s)`{=iy<;cH8NnlJHnZms^eP*H>`B;7&fg>Ba-M~QK zUpd;wM&T04=5Vr|AeFwNsiyh(0%{rWYdL(GB zX)S^jShw)8Xl;yRlhLHCtlA?aur5(GD%dFyH_W*pcFowCUV5gQr^iojx}H1Zn_SIy zc*&cwNv&^HV`{|I^?r11HW!gHzUQgQ#dfK8UIkpWI(lLF`RggtBCj6-Z~=;NuhDB$ z`OTpgnpAcYdehfcHWqm0U4K@19cqzl-gKSTG*MO zqR28@n?@3>*8*7H$Hfx5UzV(`(~0sxLwnci5+74;RgRbrJ_ra36@0xiT&H^`exl-T zD6K}q5S5BadZeQg90T!iBT}_y5KB9}2;&Un)`%88a+x|ge>|4nScw+-8EaWB${?2R z#KE^`CD~ttR$_MS0gwny=#UfAa^}-}x~v{lEW6rvG82t3_h!;o+B$khGUWn`!{^Bh z#V0iW75E)+ZG85@TaAsvIOA^y4B8$5IFO50?YNw!gUXiksvV}r)v7R390Weey2mLp zGQ98XQ+PYwM>zJVvOz8eqtpXim3Q7qVi~vMBl}isaTW>jZz}?IZW_}zyBRt{ho2C9 zOru#3=HQp^5c&RUtKw9r+L%-5j+?ak>8YKW3z%(ghB=8;MFH)65dC|g^Aoi0rz@WJ zPmjOZ==y$VnC{s!KNzSc%Pw6=PXhWOb2DMck!T$~Hr59jmZs)56lfb=+V^Vxz20BSB3 z!f_-9or)bW5!(1+!x7r_L7(h0{?UAJ@q+mnC#oPbL;Ym6e_z;3S6}2cuH1y6`Qc|* zitMixb1+oTCc1TFxvj6!4s+V2iaq%)3U-2Q<6T%*>BY^j8d4I@LkP#B*#Z=aVgfLb zUnXyPO!y5W(wOhmHgdPiJw_$0#CN;RD-rr`geASv?u_@jm4jjj&qrBEahmGU08rS3 zp`xhrPRa9JW731FooGqgWH;KJ67ysth*Kxh9^pkdxJn zS1d5E$=Z;{(_E>;@;Jk(O=P__!Av8 z3pKv0gsw60(u<2vs{x7qqcLiy4**JWjt4-=B@*khO%R>eQjx66;RSTbL&qs@{#ZvM z{0UqN#1JS0_ta(s=W+oH{_6{Em&EtM3M`(b9HL?sT$}zr z`U2R20OM1#0sD}^@X7mzUgh-}o1L!BZS;T4K9>?GbX0|RD9>)(N`KLI&1msrP*gsj zq{nMXDSRcyz~B(54(I!0^_|}nM0>j=U_FlRu=F{bC*EQT|95qY={n8Gju9ebH%$(R z)N8#yH8oc?H8@oO(%bc~Dh@6t9rW~FJNf9Cyq5usGY9x!v;MDo;zkbHdLcFQnxXl6 z5>Ho=kkI|a;E`{Emi9_@mRp%9QTCN}q@A~PD{k2aGus-24h7qjSaY)8FHrKBYWq+k zHy%^3vaK4EbL}c=>@kkY&S`5#nB{!FS@>|`8Yn2^<$Odn=A^losRep(zEq9>K|tIH zBI$<87Z~pVkPrp!NUJKXDXA+pT{n8RMpf*ed5cHjcnXX+nB_pHfRoM+UXXK=z+p$!6BFf02GzdIc6E|A=CAm3&JR?auh0u4csz`;wMiSu`(;F{zB9zOGUeY-yq4$nprY)2bMNPTv|WmN?+0gwkA6Jv{i8+g^(46)xeQv5bCWU-uSD(B!kWIwv1eA^L8I^Wy78`Z6`KV;!O_c|3e+<_F^!5b&LSn2qSKkQN|6wr<$6O5 zbvkQ;G&*n{Dm>sj5+eC9ZB4t)g;&d}OZ_xh?};bfA`6bI^1ed3rE}VJJokxyKj>W4 z*p3~2liDhlT215GB9>GRU5Bh%zPf0racYFZPTv#^+$*fhNbMmPz7->xP}< z_O$BfRx0D~4cNqG`_`JLZKE;o3$#zGMc0$7rVdW0_6@6oSl%+(76hT!ao8FgV=|GY z#ELEg`sS|D)Y&Rer$)H94YGa@IdFJu6)usL4cp{&8Rn}h!%;ufRd#!%ZY|d`<(ws? z%_kFnxRNZzMI2rgQ^-7JpNneTuw7Gf zCEFC9^saz#K*Uefq0*@YIDGL+Yf@$Q1^xr9d)g}TRMDmm5o~3=g4|0^{j>4$sWi=e zv1?}`7P&)h&%)=YI|$#T%%)1yBk8V|7LkePyeMnz8xu~d{8Vue!2MJjFll2{?=L@^ zGs-wJ|Je5fdxxQgnJMJ;$fxje6nHcA@LKDfJ@=BMe* zZRbPl4-6HN25nfJeQ!H0!qvqsD2*VtaD@Hb%6l^N$Ka3e&YkNvVNq@8`VH1CMUvJSk4Ym(TAYP{b&;cg7vWOZ03hf$~NIVZ1A1Pu=) zJ;Lx#Z5R2cNkm>^480Xnzjc~PwVS+}6g*o$xsw|ytBzW0!oO6>-5TBMFW#mDTck61 z1x^qOWNA>68yCuNArBHEMn`-`^B<-U%IPVbU&sDBaq!-g`_Nq&-4f{XR7(Mb-WLC= z;hVgTk=d*gVu*d!nxQ>1n9>6vLMcF_k)WeH$CV0V6y@abBvI;ieARo&2&traeVZCclex1t#7o{3I(GQ6Wmrx&7L7;>EoZz`WR$gh*Zvk2fpd~8fdOwGgSBv8v7S+SF(+u0PA?o{c=Ysw}ny`~v z%MG@pjZr3d34{ zE!_c>siWp~I$fUX^rbJyQIh0sb}hxmtR!P8$a^tJTl7rYj7Qd$2oEiKq8|=QUb;_W zWT@0uU2an>Azl)fBS#FnO^abSgkhA?l;q0D(m(J!zwU0tH{DJ&8}TxZ16B>YsX z+@Sb4d~s^WU!~>MY~@84y?|1(5Z%5WyLEL*>aVveG6lD|yOeB0_vVRUVx|9JOlDMr#^#bY>a^Nh~a`Mr{#XzxNVi2-z{;gDlU_ zuaQ_vm9Z^S-JeobGG)O?l#u$|PL<24+>Xf4hgyfwy_R;&t>)-tDmklPC;;Vn& z=YS+^b0Mx<=T6CUc1EJ8smRWtJ;~_FYdLwHTmS-*&{u>K`7r-be2*d z`N}|qPEU6_kF3Sd@9))bBr@7RECvRu2=9gch#hU&oJ+8>hJnG5nT%hHZvx+Zt;tfa zTWf4C7v^8@l?APM$Cw>n-l7+82)5$ zTbf(zqbb(SzCyR}JRDbgB~!ZH{6Cj5LQoU^{JCUQX*V=j>l=SEg0IXR)`1as>-AYq zP~r1&$K!sGJ*tGBGZz9LuS&9gP9v{)`}8SJ?1*VUEQRq$bNt?Pf{N{U3zXVBVU63s zTe&>5aO7`0JYvWr{1~a~b0={`!~-uzzCF%N60W5P`)olznT0AAEtQtCZ zE$p#wxOf#3ILG5}xhT^XH*mM+0{LN=I;OU^ej((Ynd9NnnXmqMBjDD&D)o4yb>h67 zGkIitGisLpqq$zpqbQ|M>a^bPo&Z*goRo*98;eSNH8Lbof=VteZ`%C^w&heb{5lm( zYF}l@sMVTEskrY4zL;UH=NIX4EfSoj)zjv7C)yzgIhE1MI_2&N;ZDX{Pbya4;3bC; z%+NHa618ofl*@8f)=Q9KCoEnye0>YZ#I+#mlofQUY*uH|_G+}5tt{A%k3ZQ4ndQ#6 zf{d057eHWiJ94A(w^u-n=_Vz zhl?A2!58Y&Er*w<1SF&D0RYtx@2mF20pU7~(uG;^3}rGSWv84Wp1pc24x1=ze1CAJ zzwZ8_-!@!?_AvGob^G4J>dcO^r43~ZyB}xLtR})Ib!W7Q7^Y|H1tb?MOG1kh9{~3_ z>ou>+St6N}`LAkiI5T(+pHY$_hbFrj9jI5jajaG+qzHkmJ=z_}kiu`>i4MJnwr+M5 z?r7z~7?s14D-xVsqt{c*MW;n(B1c~FP$#5`;u*0<$Yg&YkMk>)wbl=YWU@Z?ZRB>H z+QXH!6|wPbwE@W{$EF%SoFE&W1c=81pM~SiQLa|Mi@@Y#8>SD8RrcELctO%cD#rF~ z(nP{;zMLvb5%P8XI%i`9>^s{f*F>pUb+t_)Ep<$dYjfER=XF^c7C`ovd1E%|XOf+> zPFLRV5OQr|11-9BQq>JiCr^+U=AYL}KsZF=)B~D&N*HQCx4*;>&dsOIU&-B3qV2K&%kIHrhzje`^6n8?* zEdQLLny}4yrI(Zws1YqMtJ{;tIXCE9Y?OPn5Mglqa^5In9tz%4RcoXOgZ1~ke6O-} zU!NIKHp1zJ+L7qmFh-$W%hqR<68hI3XTNpM>1OJvlAU|B9X10Xx`nidRPlV zCeru1rurgX!u=Aocy_IAy4)$4zN6!hv$3w6S*#+b30J4`DPszwAl{}(7?!L$PU_Cp zD_)ZL3p*ugM#3%J+yujy8WPnFbrsdAld8dcOs1|np7MK@M;BE3LRL1qw^?Q!t4&-Z z!vzL?b~g;5eir2I>#~x-Gu}#)oghv!ne7(WuL-G-t3TB}V}18H5;SWH8hLt=1b|}; z^;$E@P_V_-gnCLDZ&r^MO6ATt*gIvzrOdUhowreeB4i7 z`y&+7!rA$!ez-XMqVESHTEy(}-6eSqSV5?!=<(G+~CY6efH=uQD;d3iSq9Uwx22Vzpxy96r;aV zy&T`XY^}rEOv`c8aC8-?DJ)nc_(q)_Xg69lG%MrBdxO*0gwebie;mD(%~c-X%_@g3 z5+UXv@#86LzOb^D?E6faTtIH;M0dl_AB~cHO(k$6|r5OLR5{(R5e%_oG(JJ(l|%9?oIDYk6Jpq zndFjLyDrobl-@fz#sxDKCnAx z>Zd?;wZwd)`7+jm@kFO(!0<+_puxOP^G#uHLfDh%2-YT~);xJJ$#e4o?e+zf8F`E_ zwETS|<_m)Y+CRt7&$vW-*W{P~Hy(TG`&YpETmBuO2%y91ltKU?cQN>3ielJfmKITu zj-DA8H#MjGXIuhh4;LdX)W1!K1TL^Zd|j#y6=)D>Ed49(Z6C|g&@kxja^jJN^*{y` zI#P)h@V6qe`Uuyjg0e30o*(-hA;a{xCpKS(yBjp}AJwle;?-5x2)2`5H@+f$y)UJD}32%@n#dGa_` zi#Y^n4cfJR0GynAz*-QC1n%d~w3!>A%`PfzD@!p1*t(*_y;Ub_dWLeWzFQA>V<0Eu=0I=j6fR|icwuD0Fyxb}y>h1N6mfEzt$I#pC zBld#!teHLQb34cfKVaEZfeL7{k|Olhh0v*$m8wGK>a;H?N?Yrph2!UqOwDoUsSV9G z@m{5|yuHdwU}y6h6fQUlW^sle@J z`A3E5?nce=y$r<+UO*MUKP~)XxUA2+DQ;rR!uV;OS;0UGFi>Sp1_}gM*SmMzNNl@L z6WH8SNZL<~Om#BHa>kZTw=<`UXIA1CU!`p}Ry1Nvm~~O!iw#EckmPtW0ypOOk}rnq-GmZ>uqG2RaE> z&x)BCkILRlu=YZBf^+1b_qu;SZLIMZd@91;NSUC`)En3r&Cexb?6Uc!ph9QWi$HGHjiUj}c_om6S+8H@S*2P(t3dW>TwbEsl;oFEU^?INN9BAZEF?$tY-^?zt?Lx}J|9V@e5{T@uprhI0uv!)0+~xA&7HqRjYkfbBy2b6T z@}a4^)`?s~#01Q}CK2YtW5pu>nO>USvxqw11JTUulrIDm%c8ERUSD%=W_CtADQ$OjDq3ahGPE8R{zn&hPCPTPRo?H{ z+_e&r&9)4DZ{h~D1=*Wn%aL3?GkYml=L@fc zJ{57Wdta(OZl>nIk(`D0+CIF8$cMhulC|@329~{rW%Ra6Vz5nV4n>%w!R{reRte}? zRULpT!j3Ss3u%zJ_8Ggxv__{WG7m3qEwOX?XqTns;o}~;y8&Ll6P;gCZ$&{rxDQv98~lYz0i=;Un_#D$GrLWNU=i{<(1u_T=)GMZEJ$iPP%d$Q^jn~ z`y>386{WG=Vr>!~5cQs-K4%NzS6~+77qWE4KoY?GO+@-~gz0YL+=7A#-NC98K`Q0Q z=|&tweZ4FBw9>j3wKywPA*KoA$(yf5+u{&KW ziE=#qAN%Ua4QSf!9tU|9NSmr3$clvj)*QJEboOM5mDcm&K4gkpWmpevid9uIhLydJ6knolya zl!O0!;^wa+?O*Nn|GdTchfjaV#&x+av@)!qlkbRIXfNzZeUZxK*V58+VU}2B{KB?s z;t+19zSgbOTW)-OBEmxD@Bly)fcGZblKa~50072WK;1~`-aLv`WVDoi_UN^=pk^v- zuo}%C1!tiuWmloQprMr;cRLxT<~Y>|izB~DQ3P0U0dlNMSM7GCf-u#QrarW6s*d$K1Q}K(uzY2>4hv2vlY}n zX_JkM4AAwJVGAX3Ss(Tfv7jo-OL02f(T{HZwyD?=C7r(Qw>-TZhYb5ihobT1!a_}% zD7`$dw%InN6p`n3;|`rkNp+L3cGt>!LFD~=B1?iQd^-{?-R_d&OfU4c{+KqAwNI<| zpcOxabt4vB%RcGu9UuB2ZnFxjk6n8iJ4J(1mkCR}l@Ta|>6`dR4AB5T#N)*CU;b*;OV_qGbhKecf&M?bghi5t@<0bUz!x$i8fCH)F9- zcxgEK{nx0+tD9?t*@XN%&Ze0(Zk!{%8+YIzY@SSq3(6IKs^eB0iJFdi6{}$fyKxh1 z#wK@T24+>VpECJf3gglCLtlxRIjQE;999$`l#d=E|H#hwAi4l!HYj;5&GbJLE7jjo z;KG&_%$yfTZ&Fv`9D6~VCDpO;jMYNak4FGMUqo8wj_jg(R)MMr=+1I!WaY^{o_;t` zx`Y_ct1j(5{N8&H%Om5Ms<8SQ_vdO9)ttm0>{y?<3J21iME09W!&+i_VLSVaR=(Bv zMCXP&T^Dqlul+f4Uu7Mv1mdmoXFH$wp_O46qj-MLA!L*%H6c@*zfh>#r6FhxC-W5a zf|d7Lpt?hnh5FiB+l-*T+}yskh2C)Kj_=RWkK>^jqke1*@+QOYxy{Ww0lqYc^AU4Ide-t9kTi<|z3^0N z8R(I_G|E9HLt;`Hl`R+bdD*JVrJlotnMl*^^Xx3`v*nW1+b|$?tw5Zz0ti#ExuU-A z(xKooLS_9#(*r=V{-dfQ6*`YoUyJ%&i<&$==9+k72TChxoSK@&e_Sh+1BCS}A7~!Y zoTn^AV|=QH3O1#H5bd)<7$l6M2V%abD|0X6<1U5L<5S_I3JW`yc*4lRCL+%k89e4fBHy(LM2Fh# z(`H~|u6Z3PM$X}dO6`$PT^Q~r0fBQ}O7{sCwu=Q?Ztjb@^;Zzl@JUGTS451b)$ zT9^e{lPTM5K;6A+&XKS>k0-+rNF-Mg(T2kg52Ghw0GIs{Be?EMRLu=qqTmzj@6-~8 z!999*wPNr+n4+qrq32yh89$t=4N-S)K+H(>jdA=yS05I};yhw_QNMvU04f%*pw!oB z>t0(Bla?VbzN!}G^QiA8CeQq`jIksUvAZa9vpDB4zr6p0uyA^N&md|wt}7N9h*SZ% zUA|3+l9cv=`?#SO8GGhI7S@`u$6I`!)m8=`v!l0JaL8Ri#5KhHqhjBwF8WAFB1QFc z?B!DzY;t+wv&E~gbNzhW+Nbx=y+s{jEH9Yx}Csc&YHo(@H?CY62Q)PBuezp z=`4ryYm_|4Qwh7B?T)d;f1}8ssTn|)EcspY9K}RMyu#sq;pelnyT_`;|MrWIKGk`1 zW;tJd?sW)0y07+FYQ#8E8GU{uH{KwUDT)dQ$sb`0#n&8*r{@dU2pyl6$xaR4O{)39 zIkhf?dD*ObR2=ApZb%(&lCmO=jbjO05YNzfqAzmYh&@n?7*G_ca#6E^s=Cb z)?s+{<5Jurc&OH(jT80fa%SV&kA0zU%H@kge1>If2;RmSND_9iheV`|9}JOr770fc z6l-mr+#SyeM^{Y_#7fZ;_g#sz4n>+_2c$!OI6BDk6KO7e)B5X zyfBEYi+}}V2KzUPoFmpG22b6Yv z0AOx+N5INjCL;g-o0=gxE83Da38B@`t(#$4Y2l#j($*z?|JR)eKEg7vIwji79eJ;+ z>&u1!8V}w#=evv1xCJ9`eo(ynewHuS`R3wzyX>muT|my&B!Pk1r+zSTyW?&xdxyVe zmd~1&Xfj-QLT18dNd^IZW8Vw~;-)+)7)HCyQ}A|6t>RMv^S z0lBX;Lo!eF9LWg$9W~Y8$kO7OR_V|^#vmUDJb~*Lu2%#gl#p5Q_BIWytjg|}x-Ya| zI~5-3wc3z6KxgjahyeVV)r2qbHa;L?U??;)o8@~+WW@Ix8D|4EQaPiE!SSX+-pO?FVRt+ohh#U`KGXHKu%6pR09*wZc>f?KiwnZ_Qhf zh#DAVv@#rwrHOqm!l+Z`EX1=K>hB-UfD)w`8`VGpsps$M8vD;^@3#u&{~0CvUn!@x z-D$2ImWZ`;+5|wO>YDlR^BwzzbsebBN_|a0`zU?q$Hs(x`SR}A$;Mv*miY|S)Xr6G zwk{WrO=}^|{!p*EwV?Rk__w1KwH(7BdYh<_kL@kBSIXjMY>xr2yj4q#^DEL$xWUis z+>0mpZ629xcoU7h8?iByt*UrToBf7$9ZMmOOj?*P58B6Lh0xv2%1b9?Txfx~TsTl% z&iN0=k>B+7&))v$&aWC=IxG>X=|l?le}jv*DxkOG#r*@jl`C(g1;i`_^MOnk%q) zXda_0#1gqe%@m%5=(L<^STeOHptd{%Mh+2kAuR)&>x-LE#8-eIx_f5xy~k({)l8e{ zx<>sTjvCLq8FXsLW1lU5DG0<&?}A}X_`VLw4!IK--US8a-|F=L1d;NTP;6<3SEtWw znvdM2p1b@Y)Urn5xbBxDTr<#gw22e@#})axZ#k<(kuhlsK{?GDhjQzRi!W z#{5!8Q`-X3F4s`B#37V<`Ah2AsO%mCbaOqws*H!IVi0TF-Q`n**jH^mi-j2iu0Rq+ zG?x@#p!M25_KtfpG3GibX?qgIHbF&j^LNunOXCAl-SHPGScE&F6(z7F`9npmAzLH zR0!oyO7iH-x|bKY&}!uk=ju${7ARV(Tg4HduI4JTj=Ot25fUrdp*IDTUUDw=Bgq~E zRe~>5%TW{b&zT03K0(<@%b{IdHo?>5E$}|R-7Mt5dhPjrsPIlr;g#e3b)=rTc1)sB zk_}qCy$H>x*y!CM189=0MhLLbaI>-Jb)jY!q*RT{XBGt)678-h*{g9p#JCG-+NnKz z9F{qA>D|M`)%B{nO1bpFf%09cO~MqUlj_*+3T4G6VJ?MEb2N|R|4J`|h3i;4SlJ{N zGr;}%&!Q;``;2e^V!hS(ReRULw-;ZStHh{c4M|o_#L2X#Xeol8VOnwzKaLW+csgJX z=)TP*M2qflkc_N&qi<<*e8GHcJzZlxGwM9M-{CsIDw+1Wsfy~$mw&ri`JZPhhp?lC zAc_`ZrulN*muq{lUb^Gg)Cr0Nl##(Y4lBuMc4G!CoRVaf#wYeagem`5Gw1o$WV%K1 zASyFT07rW7y|+kJsz7K`rHL8@0wi=sI-(S%w}2pJkkCU7gpM?6DpDhXfJiT)7a^3J zxoh1!UuG@m)AjxV&$stoXP^DsXJ>ymo$o*AawZhxZBXUIhMy%9eox~c*PdnHwc${m zxROOlt@^5f4&-32;9XWbDsmIjsS0wj%mlAkD*Cv4dSvEj5Qa1SH)(03Bi5LzB!i7w zeedb+3Dm{AU@S1+)Lm`DB&26U3?M&jeLysi2bvNB_vpG)7#E_sn=&S7dz14B9inDu z!-k-Ak?$s>oBR?8(0XvlE_DpMV8euL`)?~F1yHI4`HZTjO{()i-nb?B<3@@ITv@Me zPU#!Buf#oD(-i2jb+4{K4-3hM3U!`bM@($Vrx)2gc;|+*OXlt2)SqNUNBJVd47dcNRcB_aQ} zc$PvdK+#6ph*4z|pE8Iz5Acop9k*}+u;3Hpw1sR{ zD-4aKU}{IMZ;D=sa`9ASuJ53KW0O~BAz0qi z7jB2NODPieWD)dLq1x{7%&trpPDRp=!AjR`A*4D4CZoZW^sS(&x?#6NxY8TKa#u6> z0)YATE9S{~j)M4@%D~&f8`CM=qIp{4A?iB>w~*~*-T2HBIaeDEElP2WdD;b_mEy$b zd&_JSc1k$3vtsmo$k*}Vi>~4f;KXVr8`7C-1&=SBwlXyDd+0Q6*T>1nJ`$iJ+u-#m z6j9$?*0om~aB4a6(Tj-nl9k;8SVNPhyMV1_{CVL@xe66G2AJ-{QM%9~WyH(N zd#c?De4jb>-$?X_EqG^dIaekgdz67Kf*rg-C;2zzKAFfI_{ZKS!E;w*b9bbr!(!oXyJs8 z>ChwscmElD|2V%16GT8TP(@K(zWOJDiysHxS{aYKX<$cUaGvZEJDE@Q<_#is5Srue z7Q+Spnqa`gUsD$uC2e;L@_KhHs-mRbYot>fVTJQP2`n9@>xzive6Fi0z?Sy}C(C#> zX@pytd|nKPjDOq~rpW_sx^}2pf=8R|7bwoG)Q2jpdMzMK|dA#SK@J&Q{tIoM@d)TG6%HdEr*v$qwo{0Tf1Q@dQ+E>R!^= z$q>&DvAGmck!sQ*?0vNF7woBUuKZ ztDCxx&;<`9jgJr|FYtMOiKj^+&H8lwto|&+_R>ZCa87||247Bk-RSab`c-vjY z_c6O7zh+%n?2?l$2B_y8be^Q z9oae`Vx^zRy!2%n4WqdL=uwuNm9h=>L6!gmx&nSKe6sU=fka#Y10#AAqk$92SpDH- z`E&XI-z%8^>{?dF-!1c5NNxdHc4*loRGTYUS^oW8xA9Ad`=|ZXng-o)@3Y~LvRRXE z6&fx|aamT?^-at{wVmlO7XA7qt-VY>e3Mp!(f|;J0Gv(izfHrfIZZUy1L!(e z3617lPd9rk6J^w6pmjCVhPq0@ozA#}sAx!Xu>3tw)|)CVv2<9sVMqYmS^;5N_2IQQ z+XhtPieLlrYkD@n%L{|0Wj_Y=-bZ}9&ZIeV-(bPE*wS9uSf&bLPU(riF41Zl6IY2f zp-pMB0|q2_{s{K{3^(DENbgYqDl)>~GfyC^xR*w<*lt6?cq3YkYAx%vB?YgK3UD$I z42C-zmR?T}WbN%Bn07O_MJ<2-gV!cWsa#An#H>dB(p-VQ_%B{-1*HgMY8AcWe55m- z!NAPUTH4F%uc2(Ry%X)#Zd{L3Z0!dUtG<@cIzfIt<@@4FDhyr1uIKTYvr;F?mop&eMdrl_%QCWUH-?c9e54$CWbxvb&{N2g42FQ(j*xoSfHBvBwN( zBNkVX{dNMfr#}seg>Q(n7Cfrpib&-@Eyg~g#hP@4CJLQ>^YXJkIy3a1tBjT) z$eE2SrR@{gmu<#$8t&w-riXP~4Cu0`>OTRvMO-zRqI}F5N zy{Mxz(W-`5ImrPP{q|x5D zR3S%f2jZW;I?@|c9SUY|W{fhtM9x*8K{R02EXp`Vy!7<+>*vz%VOE+>&YW+SAioqB z5|uV*=T1@=j>|D^Sz@I6xu{V{kuryo-nTF7f5pK5{-gbg0L6@y$d%7)4G)t(0E)>` z#B0M)aRaxZG<~u6B0c;P*+MM7#k9p$YUt=mJ~+5FRGX6Q+GLAzAF*!OY*}l2 zQ8Q56QZbTWBO05Ceib(;+y@aS=V+0MGQu|q3q8=a(mm^&9FN;vF(LK4odJx%hw+wxvQxKo4u@6il9Qtg{8aLnSf)?!zvqN=sBDet}^f63%OM4tt1=9OP6P4huqE%n}mIprJ3 zS_{*nE&%TBU?OgEbM`%o$vHxm0Rv@ztd@r7b&|B9cZxlY|G4-hn@mYpfl5Ctig&c7 zehG3|Manh*`z2xh<1DI;!+VZwF3>mCR)kEtS*PY$eo2i70NRp*i{H7V{pm?(ZSnw6 zq#ydB3#V}&WOA&4IwD%z5nR10{S>Jix5q%qynBrZ=g@IoUSpk|`83?tjYrRuzwmsR zv*&btv4CFk+hCty2C0+OJKxt)@-u*)4Hon`ze;g)M^gyJeJe*-N2ryf6NRJ(7ln?sgSq1)D8RwS!7jkg zCcw@`!NxDZ#U;Sa1lsc=8~y}AN`RQO-|;Wbkce$|0d7$bNf!y){%5&6%4z}^1C z4>-C%`vJz}r@e42@V|9q@cvK-$blsokssyNpJNHbfz+!l0Q1@>V@YKI{%RY*zHR~L zPhpZ*lfZKT6BQK|4HXj&4HFjw9Rv5)4NS}%w}@`uymj*?5iaKS@w52R_*V#rg@J*E zjfI1ajf0Pkjg1cj*!Vw0aQ_npSMLE_R6qboMS!CM;Bny)aN(||LATMb+TcV%E0N%? z571!PfdD27A`&tRDjGTlm=1p>!U6CIzlb;h93mV7JR&?23Nk7Z0wyO>`)#A8Pi zS2MXu<@}5T89zL&OoE!`mih-%PA(U}FOn#<5gOm-cg>*O1nK4ebW)L;=F%-0t~`VT zS__cx@1C~~x-IU>&_`tkR7AH8ecjKhY#&}a(6(@Y5tCijF|vHfE30Gq=;hm->dw)X zBU}I;9<&zm+9niaB<^byDB1B4K?|IzZX$6!3&)39@In2{H%=PUc^AJ3NsU`(P_A8C zWS9Xcblfcipb;5_=F(a`u8@V_Cj9pn(Enk=)dYZvaNQ>^APS6KJd2&cU|roG`(|X< zb*A{hjx@>4Xsh(}BE4A$5`6^(NH|L8_js{7Dij@WW7A=P-Wcwu-2LM3NNHbO zCTQHN8P(#_kJDwX(A#JXZJarpeAO@cx#VOk#wTo0llin@q<#BgaZvCyQ|i}uP<<2M zG~@cI9ea0<=KbW5;IfkS9WnQNwhkD&%Av&Ag*(=)P_q8PkhAH^nC5++%djEPS0$1B z3&Wd4n8Rr)3jOI^wz6x&dDuQ?saHVK72xCFC3YaX=euJh(4U{=mBU?C6*P4*iBSFk z4Ro4kTVzM2+4~B3^HOKHugJ|W-#8fZ@&zYjHDBN2zTVEUpy*cRl-U!5)tchG{lzUG z;SqDqt(};rxqH*prAWph@*q zkZ?!w((z)WbNIlKOP%r4>SmYE%FQ=DRU=0T^@Am0jya9VSHLlIN^fC-Ptn1>Nec&3 z_VH)*wwEKFWzU*3%8I+Dmg^y}eU7%TfT}AX!EolD^Gl5$w+F=;lrt|&VwaK2k8J}) zlIojP4`qBpXB}7HavK_A43ah#t*|v_*OTblCaq0$>+W9|5A4hyI;#4vIyZ_gKVdBU zLbffYnk~B5yfe(ANQ|Iy1@K<(b~Uf(7R=Q8~X1LBoIh-MhgiT;z`9qV=C!YvR zOfOA7n0<7(S!O79F3T#{y7~6vyKHNjTU6876)@8v!#nIyWT;Hc$JE#&+}l;uG}cwL z|F(W=*6qM}^9n#y2uaa#=pq~p7~CjfY=(pm!&I9Foah%Aa>mjgQuZV%Qk_9lx9(8$C(U^eNTVw=b?U zHhfATT#L!?K#c z?Njl~Cyhb-G+@g9o7BDfyjlM3G-Q1HQ%1o>%FWxKrrkO9P~lUVx;ksmnx)svW->04 zbJoB_I2CY??l>??JSC3ksS6>9c_HfIXXjL$-L~zpl9c?_UvJ_a2HPRt21Xp_9cz&U zU*%Kgvqs<5*_;c})#lt%Vr{_(pLgkc_FLP;%@mEFQ_u{l~NkT+B{TMHWODD-E?9REw_>W<%6I0z5AW&Zx+Go5J>t=k3pDSC|e&;*C;nc{Pywb7D?>l0K-LeB@<^jaYB7 zqhT?dwRY-wAnHsEUx-OUuAkeHpQC*_<5-7(xhPt1&dDJAcR7H}JGJmK+Th7|5Mx17#?v*kFyQAjVa`Pw1> zUZ?l79O45Mo~l;K1krik+U9xD1n?TD>^F}n$CWI2HIsCZR%q{AnNL~e70{7xSnX}M zkCC{-zrS@rymQ{KdQzG9qNi@x*22E1>!X!OQIBKxD6ydX@JVuSk*qwGycS$3%qHWj zFPj{tz>?t64PK7R!ELE zjM3pn8OnV4*53P3V+H3(+I+RzsjY2szh2!1>$m(ax20N*(2*Mcg*Rm68(1OZ_8GRq zytH7Z**9ecnZmb3r*=<>zvXJ4*@ z)khXXcH3qa^N^i0I%!>zaQJ@tEuR4kghN96Q*2q?M*a&iNK?-KdMTSb|J*2PNZN2& z+8L7Fk+sUDw{j%eg)}?(;=w1UqAGptG*rF8B$@ zBg3`2iAiX6V^hwW;ris7Pw>8NMfK{P^*cHH9zs{ZhkWc)-@}*T{R8(gAl%u_e#jS) z)m!|rJL{NGf6`MO)tYVw(&Q2F22i$Rb|KuHvd(OZ%I1l|og#Mye4M(I(a-&a_23e^ z*)6wsvG(F^0GLm-pxx+dOEh5&RKJ=EYMT+VqtydLaD8 z!+nim{sVKZ^B8OcfD<@r__qhbDvbPZNmu_jwCh-)!gK>LU%}}Nyx5QPXWVXsSm1gZ z^dpWWuqJ{c2!8kjYXYz-7Q7}{VgNdT6<`9m0=j@B;0CAw5b$FTQeYFMe?9($yO#bn zQTxsJCx+j+pJu|^4sLdKU;&$JBAYt8IhaFLO>NA;)=xQgGgo%71silg6P!=lIXYOv z>aarzV&VGdwuGyr{m=Hb8S0i+Kk3(|Aw%t~%^*;1J0&P64EL9QkgjPU8V@{Ct{d`F zf6|R&>Eh_-^cNJeql>krwFAV#%^sA&l-7W0SG~r8Z7dTvS4UZh1H{F|6#@$$py}xZ z`N@p_gBgZ_QRM9{DZt|YYyT14T%U>w{)PT4JBF#Hq@$gq%b$Xa`$NBk>~8`n0;o7Tz+#Q&>gc5E<_d-UIdRbK zz(o2lSWHt#S1_Ue3myX;Vyk~;zrF_V!a4#l!4qEe+jafN^@e+0hx>6qLd3sr!4ewo zS_;MwU}(VF;F$`Z0$_Cx0PNm_cwT@ATxR%ZVd9^KiRHt87AF2#nD}R5;-7_ye-A5Y(u;1e|fXn`mA1P%Z<0C8|K>j;>FlUfSkJ~-8N1gE-= z0Q9;BPTql=f5SlmNCFxF7fj)g9)8TCQ2{6T(!m7xT>QU>0y`f-mOTcm+6knc27m*w}usDm$86 zTX_CE*6Vlpe_;jl(A>n;*nQUF=Q37+Tw#SVCBBoFJA|6u*Z?;Cl8B_6(ZG zd2NQWvVgn;)YZhn3?eTjOa=PDVr^|MASuo*$H9{h#T8ji>;q0_@xbb{Pm$u~Si8BOkz$!_Mso zlAnhSJXqOb@F!jOZ|HxJwH&O$NOJs?Qvv&Nhe%mlLZC1#HxCmBH!pbdfhRu`c=0oF z@N$7Ch~wj8;^t%nPj;{l;`un4IQV&(IN8{lINABYlb?y313Wo7Kpd!tmkm5YUS3$6 z8>E9LCl@D&AUltQ1Sc1}w74Wco0Jr{GzTY-IJXQNn~b=Wgp`aFHx&geT?Ou2dqC_S zNLhnJ0lkg|H$TsxNxFX`aj^fAx=scS@TLi70}nSh=P&$U9p&WV^w~1kTQX3z{JDE%f!dT&jf~sot=rDgNdD!iJglH9Ac1(otFvB2M&-I z%n~q3!2IC`QwhujFloSC5CpaI%dqitNOJSBvr9?yv-3)EaZ7S@aPji+OL6dX!M+f# z@38`M)==<1<@xg~Mg!vfvu0;aaeZ4AFfsdaNtnUzXb^L%pJ?mPxW*R(%<`~6}Ezfxh<-{0mU zzrJ(-DgTMUKN0vR0{=wdp9uUDf&U*x;E(kHhy%C?@CcmkgBy)0e{M7ugDo^505}Nn z2!DQE4_xm({;{+7KZ5Xo90J>kG;ojaQNWT z2pl2;9Bjem=Ya?h?l?w3LPiI>1UDJOI{rlh5Ru?O=Ri8RF#r(>9svmzGzJm2Umdm^ z8Gr*xVraPdL>!#b1SB|Y?07e&ZlP09aud^t%MkKXamj&iSdp*IN5euwLP7yWer{vN z1@I_uvg4zPiBk>Ta*HP5Q8P&^b18q<`aFh^cNB;183#3mvjjcSw{T7x+Ie+T4K8ml!yR3p12y4NjO!+O)=+ZX=PtHIPqPFkZ~la zx!AsaQ1>fuf%>=7+(Mz4ciA=7cqc`Wo}n4l7C_5Q_+u+E9JqrT_B#0RP7S!vYJ74+ zuuRLc_zLjdo2=AvJb68S7B5e-KR1~y<2kEn#QlAbYOJ7+?K)6!kv^7#vwDh?2%h4h zGr@hAWrR<}t|^S&bmz_aUk7moT{<`FNCXNx1ezL}uSta8gI4&2RKM_Eob2ozi*7d% zxO<3tK4i5%RnB-cv0tNAMc_5_PIK1i_|pDZ)$_T}V5{}yW-~c#%L|xxZ?s75l~&+P&pz>J6qUH zdwIR_0sd0c$YF9W#k(Tr0Dmi}teH^7&9)L#CnRsJuhIR&x;1eyQN3Nan;3I(&%g(w zt?V-Pz6-!eQ8cr2u>ERXO)wEETz1?pyD0kFX9d%ohy&D`)wedFH(4Jmq-QU4sy(K%u-n(QB*R@Y^>CouNZ7M8UZ2%P7?WF#fl(lv>>UkI~5h2yrMu8uBI zBs96MTa)n8`-Y`NO7A*Xzkhh2Mg68tb}M}0Lr>zzo1G52g7C(6i^aiwlm2PdBe#~^ z`xxlQ*Myim&g&Bw&ZXntt?MgB-tGcWRLyh>XU^TDRiVWjUWt_Dd24mMTOK=zpf9T^zO)gRwZEcN-sax64__>sYJ9AF#_)3HU-<)F5wia9cJkM`iT zRr|ctSv~!Eqo`{2^v?ID$k}hnpyeoLGlP7-_6KRAl4mzTV3CaW9r@{9QK7-PL;O%d0JeMihwMWyuN&9zreY(3gyauwzxBAEJt9%D1 zF2p=$O<|1SfzUaj^>|}?@Pf(nhHjdu>dL{E^U7vh2-D%2p|ZzC#rf>oH6Q3>Om)f0 z=Z5T4?cK{}iOUFo_??C(ZpX^BUMVIj6qpOJcQ8 zUDqnWy(Q3s1@TgRHqi%*=Fsa4=-QP)yfSXcRxkgv-?(?pncQ|PE zL{}@)Gx7|s+PjrF>qHKZkPd&J@%wJg1o~dK1JBe)VxDw^*2KMTihfCnAuWk$3J@i_ zzEk>f*qTaKj$=Q&t0j55e3=~2H^%b z2X9z_nKR$faPKtN)LxImN!{eT!m&oJFyld@=~+cp9s(~dUE6_~;$4aGN40V_WJe8T z0_&mc^%OPk--uV@wDxFhzCYorC-YisIxX?MbUAfz1n_cn+WjBGj02Bb)*X;UIqpM> z>lUn6%zO2D9oXuJ?b2?UqVmfJ8JpG?$vEQAL^}Ybe*+~uN`5oxRb3)x)C=9$LRL>0nd+~ z066VL3*9z<<6mjC>HR*8HOgjEeAqH#*UtSV2u)(WSue{RDLj~{n=k=%2n5}7++$s9KnuKC$B07MHkF_Hdi6{v7BEr^$t{cO7n&&fpISxMi{ z8QPsRy%lp(q;K_$6`)lq2}XYZv&lI4x|{Ef?WWj|HhGu_?vnFdQF)HwTxGOQ>EPpr zXRPRjCKf$t1|GcvTPMkLKN$ZufU`p;&HSW!d)2eTBF&#{Z;uqqOgdHjSKr_)o;AOX zF=~W5Lh&D|LiBu`p*?|ZpK(d<03EX~Yo?p3GY}Md^g|d>NHQkCdit}Wl|vlkOffq4 zvkkimxf~YYA9qb2V6F68mVS?2;-F7nDc0THTK2M--Xhum7P+6Y*!b#){ND}dP;bcc z#^!~pNR2Z~EB@K|am=V7aZ>RmsnCwan<{Sf#2;$@E=#}fr5~T3-3#k(s%K%OvTc2T zG;l7nb|(6QH(|7>(lNFZd&9nbqR#iTYUIJs0Q~(} ze-F{OV%D)L+v_ixEJ1e7A!_=PJyy%+m}&j~hbezM&_+Y^vn*%X;FS__;3KQ2vtO!tfbTtZfkus^@qy8$y_JJrB;1NTc3b@!Xj65RiuZ+O5qc? zw{ci*JFc?=i=KI*53lw7-DrW&a;R|d;Q!76GN`g#g6gLW{8jW%QnMhE~kmm*F^ab#jiVplZ!BeFf+OiMaz8A;TTk5 zG`Nqcl*$6(Id5Uk#ru03ep>6x&E$|!6s3~)aGY50qXSf~C4d`4A1U!u)NbW@Thw}| zBsWi8?AP37)8eF^$0pK>wVGJ@am;M|?Orrw7Ng1U$&&F$g%)>-@#-H62$1YX1F@P92xIN$>v`3i%p z+ukz0tQM!!z3FI) z@m~w3pj1S<$s=isr(cyDn4n#@P_}NWqHl;QQu+&$LNBWygI3bAWZXqv=Xe_WpBJgQ zVSTJJE-RxvS@w(`(jozk_sNJN@A@;0qA}WAFd-zsuQ9U3JF{!(vsrwvO~QMg&yYh! znf97Sh*txe5cXD;#Lv1@lFsYq=b~JqLEewGXp9pEv2U`3+vD=!3HPl(CwGOTXGGJ- zLxiZGJB8x5@JS3vnQXrfj<;-K*q17XI$PMWQ{}t0GsqDkY6NxMqTth9d>s|rmqyab zBa?@??%eh~Z|wkegxM7Ht)RwZvL)r(Kt+F(t!MLO*9s|>s5BpRV!u|wqMXM+#oxvb z)*~IteDMBjRQ3o1ou|S3+-QkxgG`&lC9L$HA}xHfsIcl2@k?RT!O7Waa;^Hj7lbrj zbm(sonQ8YkspU4~YSpw*<@HG3q~5c1VkrBdxF!}EAfvbTqCg3WPQ>>U!#J5gz4<*H zMi0KxNJdvP%4c&g++>?$yfIVM@JAZAlB_Wuu(lrRalG*TZea2}dHuX`Wvrv2YV_th zM;3}EX*(@L4{J@jj#Ar zYh92p)xpqC-MS}}CsP|Y6!Z1Xf|?k+7Z)e%tZLQcZrlUex0i|Pk43p`DlmwWG3P-a9zS5rERa#C|;#@cdH9qumoVIupRnrw8MmFGRV zgKy~~bz&yEX$t)Md3jv9&ZA=VUD!D@vqA;1SwU@dsGfzVh+P9*;y~snZwOk6Vrp>##f0-c0%#dpPv$H@zdkkg!D4K%9~P;ipX}B z5qHynX2O!`OlL{P*}m=baSoYi+skYqlsF!KgO`9)Ezt$ZoGdWjqDM~qxr?n!+$u?k z_QH~CATff2!2`Vqz5KQsjBjtzakDbZk&PfC}VVzbRx^d1xH3?PZ1U%`9qoF1bf;0!brPz8XLIu@Q;?;Y(9F}IdGTt z)|Oe_)@#|+Sf^lyxJ2kcr%2w=xjvEe2zp|U@FoQ(b~&WIf)l}y9`Rlh>5vf9V>1CI z?7YdFvHRQ+%jij3ClzQpy!&{1wz)FSH$KXnL}fAf{?VYa&nU!MS z{Vwy2K2F6Ui>oY$nUm8?efG_psl|Ng4%fS_T6fnJ^Ai@9=gk!HE;d?7IGeSCLDL$4 z$kl#QX&xoAx7J+e-U^EEF!wX~%oE!r3AD1txFlX$qsU#Slgs9sNO!D4Om5y=o?_>q z2htb(A*KFJ?(+;%sbWSqC!5*S7vLM!pB0M!!T)jNyrh&8f8ilnYAKO(dE&}l%xv~9 zIwca>-(w{yPR|&VZq1P7sxIH9fj;@ZXepqfVqGa`uqZC71((5$GL;owLMPfa^vQC} zlWLZ}$jziS3L!GCaT3}W2Ax7>ieDqW$ z6f}`kReBT`&1&mEIE8(q4{@Ru@&ZUcyeT7I#m5Oyh`2?i{k)4GuOfAT|0rJfaH?KF zwH((fByU}URL(g_SCyW7DTg^wLmfUkFssR;DUG6I{2fhv`CwEQ2VKn5DoiS1U*)%x z;GOFVaAsY{zzBSb#%iL%u`Z4-HAEFFpC?j1=o2m~e~gHuEQY@qwIqD=x1+4rPg&r? z8xmmjeoe<Zf4D4 z?fs+soI{%RFL{djE1i1_LhTKpHj4>5WD_;Z!=3*v(rKrc;r@|Beg|W6ZuUB($;JlX z1yt3U>hb?9_SX*ZDt)PYhVwLZ237I(dsSxQ@|;9jIO*td6eQhZPqO~X{NGtKsdWRwBUDGIbaY#U_g#P<2;IKoVQX06@sG4WO0`Jp#&Hcb!Fy z5({>+$vkUl<%l+n6`-Ar=NifUqiJtdqE~Jw7L0eQXzjZ$8Oy4;ZMv=*DbR;TPi5pN ze5Va{`?}ztCSAek&^Kny(CFp3jNX1PPWnB;vohF|C1f3@fjE&qmgT6In(4au@}aAIf+g%5hB zP)--OV2+QHE~TC$jZC0Fe^Xu{fK8*s8GkZdFkivh4PBD1aY=l2L@4f~j|Xj(*_wEq zVoM&We%iWBnQkVl(t74+TBA9y7t-_v%;Pp+O_Z#{sGny1{-oXgs>ZzCR?}z9_4PDQ zLduFvmF8>z$023?u3m|_&D3bdr*YZLeF}pVZpI8zhjPR5-?-&IxeLin%w!|)Hb%jR z*X)(g-Tn0;dQLLDVKS-Y4WAuT`kP!}&bn+d7?M1YDk2QnXTbV4^iW z-OV;J-4HZ0Yt4t4_SzEZ6VH)bh@w6@D!*V-q>~g|q0=(edsaCt@HWa;yX8g^)fEt7 z+G68g71aygR~UrS*t}m4vOi?BcT&;KV-{R&CFL;prt8X z$L&+f&x}@dX~D3U$l_}bEKS7YA&I|PO^m(TQAv!M6C}lv%n1yY%XORbr3YK6(S)qQ z$MM$h66Bp1eIV?;tJ%oG5a4B!Sm}Y274b$1;6g~*JAv?@ebqQoM9pM1ZO{80eS+9* z&`ukpC*7W}N(~Wp=9e?>8IzrP`aaaDNEbDbG(s%d;BM_$WI5#E+cw)cmec<^@#w3eIJrm)miE5Z1sm{VrBK9lM8$Z6pEWQ&6 z87&ztvjBEyxyvFMl{jX`^Ni<>C#7MHhpI1cIms)8NDKi!YUH_ikoHFo4g|@wZ+FgWgNbTnwy|YNi3_6_NJCp(5o4`z})dX zrUf5A6K8Xi?qj+)dc^yM#1^gk9u9Q(ZG)(Sz zXYS%;ti+C!(v3tgA_n8D*sFj^D>2y3-)Iw+VeJ&p&V85drKSwYjG}i?hFWE^t_(L{ z2#%f>Ubv#oe9usZii`GAJ9pH?*9S?HQN>a5ez6&1kLFuG7mf#H$$cRhVtKGUYz9R< zetAr>X|4SH$#!I$o@nP8mQXp}EhNl#baW+M3&i){{u=n!Zp`H11w--27ZoPv#zcD&|d113&pW z0X?Z-ykTGNrv*G?9Ll50V*2u$!igAQwnD!z?x)yYeC|4s^od59W`5vzpRjmf6T`*c zppjrR&$>&&=yC#wa7v3T`vK?kS0 zap@n?{t1^ZvF#*Nd~6^A^=z8{RW-p)eYCqsD6f~WtgI?2nX~&|H@EA4eEK4o?|$aE zWiab5f&Us`)nsbqpJWp8NCp{bb?+~)9nVMYBjB@ohAV&fRKcM)()0f#&UjyXq12=g z6YyI1oV$C@@zR$Gq-E(aIA-pW;=id@f~7s~QJpF-_=6{RHt&}{PrvEsL=>%N(=BYx z&@0yv5OFNdg4WL~_nQ%QD5hOCe$GbpxfW8_2RwGP78M04@nVIRK={Z{_x1By?`^!! z`edLJ*cSeHY4wdeS9XiYk^BO)kSN)W$ge3MDyIWe>9sYFhfW4!SR@M>#AFj0aBvtq z62p_7GJAR4ir8q8ll3B9<$gu;&h?`f?8l&h{7vS8ZsAWVt>+=gOh)JIsv%q-KCvj6 z?oLciDfu^44@?`}Xqm%mCMt_$c!!-+S<@W-HvQ0?{7u)nUWrhz zW@lV}>>jtqZHs)fo?#`=_`EPqE%s%b?7t^9C@{Y4tp8Y@tfkA8@^J&nb65LRd7(O@ zGW4L^0~Jih^X0*UYPcP%V4m-<#+|WD_5kqYozqjUF{60kJKsD zo=1M-fEdiCX4JnSt?#?Jwaivew{e~V_0~|?mbZ1h7t)O_K9(>J0e=`~%4(v$8;gzt zWA+L-(V#q(>b_U+D_tsL_iEe@CH~osQ`Fw4uN$9oN|k4o64)SqZ#F*vtirmF65mXp za|Ljji<2T)|K-;eg=IgLGYe9lg&5v@o1vr zL!}Ya3sKKEMEe&O*QWiC>R`727lw;lOP!ZDKMMI(cw|{Rg$eAIICbCaP^&*QSnA`c zUufE_d;+KVWwO>pTH2oT$c^NHJP&(|$fdYd%X?t12*IivXUea(wOUPB2?fAt2LiTSycbJ+9V>~`uL#bNrcdC3>$vuvq-PVC3Iy$hf8S5qd_D+P!l3JCD>D}Txhx1*SY`vstmE6ph?w>NQ z0Asr-i{hhKliCGS6WtRR@fa18>Hd~vIkhKl0s2-Bk^wr7u03eo;P18Ye5sszp>KO$ zu&$n5VohG-{X2An$U0=2AGa{ zr7OSCstj<;FKxwiSL+JaGB7o2WqJ@E=~~rsRCaR&-R{$wpZs0Jcn*co4yA9Ck`zq0 zGTz$}74GU)Tv7%d#^h||8#@T6I%C*I@7DR;97=d;aw}&%VqW`zId`0=+0_GixHfgZ zYO8iE`c`?lW>pLKO>OE4FJZ&FikY1jt^Q|Mz(H_*gu6hi=1{WCxWf9u(rv|wLcvna zB)MU|fr!P{lP++a0Ey6;k*@F zxLAmu7%MzR1D{ziR9+W3cwT!Li%XjJH8F$D9(=2c%iE@JNqv(XUsh0fNVBVnvz**T z9|p-(mN@7kpP#e^&m8D}6+WMcIGs^!hu%wSa1fEuAbo9Je8RmRY<7C`#k)(LMdK~n zWe+88f~fkT1}A&LqoOiaAG@#I5I?P~wwnDB#M#%C3@A;QhAkTzMV%XKGLF>p0-c*} zxM3Ej@EuEid^_S*2M2u(&ufS{9~fYSD9<(Sb>oLL9kpnwUdr}ibEcpvAttarr(i>h zdSD)R7eXbETG8zz@*zey?G&Qq0>GO`=AdRK@j-U{vB^tJQkmW>m0x^IInFJO^LKHN(<`F;_e@J#0f$#^rz#l>dFzCYh^ci;s}qsvFzqV2xPna^j{ z%`%rqj2n9P#p_$c9O-AX6FRP5gf|$N7N+K_zAkWzEUFx5HCBF~UK{x+I&@MLKGN-8 zuy;l{whmW7zVn1W;*fCaSYcen(!8(zSSL!SNM*v~z5RJBnS9ze_YY^ZES#6dSm-;U zQEZ#?j2N9gG98#|RJ;gnFO(KK^uwx>8fvtL)!r=;YHAS9TIU;6U^RG)>L*MZn;U(` zJQ3A5OiGD&SbAS8Xs9u4n^b7~*fh1V|M>{FFt1d(UVXhyNfs_?g2NrdIRQJ$sqI;a z8>L;jKgo_V+}p|O$pTwN(wNu^L#`AKXL4nXM8P+c4I1w;siq~;x~861)w%_}eRIA( z-7%_bz|)5{=p2k$c2MBdz)t?O<|I6jz$2e${&V&1fJw7Q_~60_BV}U03LAQh( z+UC+vV)z5g3+&sA8@0WT2-X_Yy?oIov7Ck99kU#-Nn(>1(Ldj& zeb|b7DP93(q!lM?0(J6WYTk<^44}%Wt zla^D@Yhp+r_0SgXjEr!(0}lLgZ+)?NVm>NR1r%4+Orw9bpD-FEHtMVjX>AjlcduXF zwK#pKqGLigt7{T6DYUpepicz;h$!6s+sSRpab+`;Tw%QOZ_TaHS866VHEcsoN*2>Ip(dsx+ zQ_L+#54sc%w;WdDcqcZuC}T$@1;0Txf7ygr@;a<>lFK|OI78o#H*9!#ZgP;cxQb%I zA@5PA0+G{}wGgciaV`-ilXK!(;9}_X{xa!`2&ZLx6|qdT&~WdWufG&g?DxJndIbwa z=^Jvt^xmwUe%tl><%h->?1Wrw)hN_Hgd|O`N{qJ%<{!Ph{Z?=4Ge`IGbYj}QLhhoi zY^#ik%d(F15cf7g1XJc^cHKrQ*kv+5G2o#9ZsZ%28v0}*_f7KMq?y3|e6G(I8V}3z zg=Tv>E_MFYp@?3?!(u|alv+b*oe^c{rt%|4q)5+pyCLtki^8Ltp7_)Ag^l9x)r&U} zBzBs;a?f37B$l?#RfdP~yo*x~MF)N-Xj4_SFm3v35XaP@0RrDAA+?1#gs=%+0{ey~0ujtCdO5{Q9EUaf$3BGEY_FI3b85xGKikIpIf3RU~yg@vf z((55=G&bFyt3Mt)9#=c*60^X_W=O$K%Pz0q>f|DJF7m zEk5c&E#@4kC^}>-`BGnHPaN}B5MyJj7z)3mee7MWwOy*)n*B$P&YrQ@@0V9;NnTu( za}|B=w0r%I*vxXfNnsr0j{X=~T$!h$@+XbNEo{XHXD8~`gufE(kx;Qq{>0o&>)S~1 zrM~|x(M34ZUXxfbwuAk*F6C)f4ax{fdd|j|~Fw$fCfBcbM~SqO~IGvTAEv zy^lAPSII~&16B04i~O>xX`l*Y8DI>Y~{>r{SY{bbGi6) zHN0f>YZr=?yP$^?mGA#4x{dAA22bzZ2Or?f?;QHLY$|;_ z!6-bVYbfgz3-R!^%xYIFX4w}n2#{vqEFyTZgeQR6Dwc` z8h#7FzjccS{q=*#AKJ`ch>*MGl${Y0)3EOAjdjY^Y>!u?1Ukuz)0Vy|^~bfHSPr1< z;kI#E-2UcjA--PlIGstCH{YAH7pG9`r_fIFYj=uY{%iHtk+&xyVuY>4iF>RK)1~n}m+6COBsBz@?P>_RKR(nkCB(#NUSjFF0qzz?B zMy!N`i@$guhkLwsiq4!Z>)Gbk1TXYU!7;<+sW-(TN67znbB|&TH7jc zhoINPVc)l5zvPNsai9A5U2Kj5ySE(K`~FQ=6+a9)+T0cVyv~o{Z+IGsi(Ubp&W{8r zDZFQx4Xkrn8jKF4>`kQmUrbE#ZK zd17%E$lc&z?SKMFmjIyhCajymIO#a`&fldI6219-ERsSuTjQ74agkV1UH09d-UEO5 zw7UVBpR|?IJ};ED;*HnX)WX1m5jumq%@-|%JFK&EvWMQrZ^=Fiy03-w`HlB_lNMIr zXVD9(Ie9XrX;{59dZs(qq6L+#mn_hZvNhO$u$GC(*dRJ zeYL?vO|bPIe`zy743FaD;w=IGf^NNt0UG(OlPIoAMU(r32|5#+@Rh{mHJgT5yTN;k zCd?)D6?B5U?LNyfPXkKBv$Digt(2L$5h!f#z52>#bokl|OMpWAnHb5FG%@(6=$Hau zd0%1pC+xT89!+oETWmF^Z9MDNLNjNP&XcaWo1MkUKi4+KI*CTJhWVT;GEUV>LS-Px z2|>RNAbm~^{z&J05)Z#*xq9$5oeG>S-!nuOHE<8d)I#9``GY2UVrOhCANt#2IP2}Z ze2DK}9he>`dXdQ%5Vk#}<_ptiebs_`9CQVAum-lg*u@7|<&AF!U@p)%jD{krq&MV+^~|AsbJ6pcrfpj zH3R7TCf$~oIZTi?{xr}9A<1~wz|M^8#t#$-(~lETtQHuq0HP+C%gezF&a1oFR!#J% zq-_Fak5vd&aio<%rr!dfrvJGPuZja8l!HAHaI|_H8cZqPlO2D_U2MmU7&;WDz)Qkc zB!m^gR}%kC6K6~u3H82oi{JCmU1}`r>PBYlA>>y$f_qO7g9I-(Pn>B&LeJ&Cg;JIl zypUU9x%GiTE1PTbgHHz_!TqKJ*pXA8Q=5HY?!Lei>lhxO|K(-+qw3Spz#Tf!XqA2A zn_;rApwhd1-LIgM|L@@C{P@fnYq`5Z`1){(&LyMo9>;R~R%HbyN8QpYBZ7pEy|ur+ z8!1A#xTIRMfF_w2^RmceXZ{+0@^NuAuZoz@jDCy-rJGYaWDMs7oYXAOn1^G(T_Y7=9j!^5evJEEH-@~9}p_g|a zdY(iPl)gE`W4eCV5H*y8w5U%e4ok^%rC#JdJFhMN{c?Kho|g(ND)wH=bV2F`&ura@ zZlJB^zmY>9XvoLjl*B49+GsSS(3btdYD|D-6&`CL`(INEO{9rZ-C@R4^xW=+P6mBBOfp4no;C zgde3G=eYY&+|B7wnVsq3!@IfI83RkoQ#Cc!w8iX=y_(zuat4R#i(ees_x@Y3&_U{< zXYp7G(9V3;{pv|E9dg$&azha3SVqe2V9&4=9C4@FGDekXB!J%2=#bR+12s)T+D-qD zj4I~0@JZnrKa-)<@l9Uoktx3lgF*-sQ5U%6ET_F@r^#80oHJ7D%uC(2s|%O>6Q+ z7gb!5Kmqa*j{@j?`O$%k6)!|ef4n~&erhTI4* zmTk!O5CjyMk!3L#*kufb-k=u4KMl23Km1?By>(Qa-?BE?xHb|T8fe_zLm-X2Lm*g! zyEKhUXxxIk2DbzV?iSqLB{;z~XhJ@I=X`U{S?8XaHFxgJn*X}r)$i`wPt~qnYwvoh znB_P(&_$!69&R@iF-kB3tQj2z0id0fWTG@$DzGEH5{52~oEVQHash0_sy=quPEzz% zDqCS2Oa?W<(f6i}7ew+#?eryxoTSNVucJcQPc!YnFgi~zsAGvJ;~|7s0EKrYhOD=! z0d^ORKrbgig@RB)#HZ5K60~UPS9^-QeL_w}#V0`F9z4Rj9kcqKN4iE}!T z-^aRNSSwyQ{mmf6mw{Z1pl) zDT{dBxb#Z#Ad|f%XPH=3llzlAK}`{lS$}?YnvvMP-aMYGx|UVgrRhj`rsg^WVIeYe z^@|UqNkFm(`Lc;9ti2Yi<^C;n_c#ep$$&2oeDP+~@*Oy&VK z5{E3rt5xz&Pm1!op(Zc$ey=4-5DHT^aKkd7m7kQL&XAW-bXXaR>Y+CkCH3^lHxqzA zFvge7($BqUOD3&utPWGj!Um&JrOLC#43~)@wEot(gZ5a22Y?&Yy`*E*sK7A?7b#!; z)Frc0>h>OJ+Y)&9_=UlFpOySMISMufP1hM!8Jrp1@Y{XUl9rZ45+Zk559!I zCw%|R(1ZYl>$ew(%>Z>qiy2G(~mg}z@8q$si`tCc6b@YgF$fX+b*Lk zzP#ba4+MA5F-_F3j`Js#G)c+Zg&$3R zZ7&NVLdKt4FnJsnmuALrR2olm>@i6t(J0R(T0NSw2}em+X>0NVl8c97n-@($NPODT z=C>r(yHg+jiE1mx?qGV`)!cjW04F=DGQ6y=T&zX{qwoY`k=zW+-L+=6@Wn|1kuJ=@ za(9}ghzdreI+F@X&+}6L+kwsP^X;(5sw+zPR#3d;?WFw!g_9j|^B^-J3PfQ%9Vx<5 z`ke(uT~p~xLwAsMIGTt$4UuRFtarW^VVOIxR5>M07wqH++1Q78s*J(x?${)wRY_X$ZK1icEj+5F+_1($^ACIG-tG-}ks=LqLggly71! zNlL<({;J65Q)tvg%MJAK4BQvxsSFJZees)yWoo+h(j&zEHy#N;F?<)zNqXy4Q2$Y^ z)-i1%PXIZYj{n>wB&9`fiQFW)x*iVLFa&L~Mg7FoDHyxr*hz_RA9F?JJ*SidfFj!o zS~;4w-A8Lez61)kXXqo~INLD2@k*3Ym1sPldbYOUgT;nbE}wd~EknIO09$)gKej21 zsn=(nKN{5wl>PuT7Pk-@I!3TK$w9i)ENK&fOfe4o>a;?86-RSQ)x>&+L-A^BR*9Kf zYHlMH8;WdDwKbQuvm9prbiqZj@LcoPU|wM}PnrNXylKTd7wzHa<_lQA{FnOf)P76- zN}C1|5r!$NvE9uKI_LulM0=B{(+Dvp8$*C%+=# zb7kE$u4j^AZSKFRB2*4k{lEEB-#~j4{{YC<7=;4q7lp;+rGM8wS%Fwyht14~Y z2gw{Drw+((b?g(#4r13Ea_A+aV+fV{f%FDY=Q2s~wQ|)ty(qpSMsRq)IhU*WkSY9w zva=0+lkD0{4V6)5{>qv3cc`@0&8_y1H(Qe@tO`YZpeXGf;ii*4F^pEuAf>58!nlH5 z1mt9lY$(k7mS?($#xBaJl#o9kaXLNF$f)%-$}_|R_`BKLec=@&CUMoJ19+8I%=FBC zbG0DQ0h8Ux&F*x0rRyJnJwp<-z*(oH7m3DaGs6+CkBb*EqtkG>!GK zwBOr_cs|+>kAeq8^tXpOzLj8nt&Lf_ifLB=h%8)!P(oEFanuxON&H)E!q)6m^ z&(_P1MTI+7aEA1?7Ea6C*x2l1`ZnPv48I>Bw3$k0K%I=VZW7FrU6xOd5x|Jkp}6d8 zGfMhWpDdrK`X%y3Bq8UVvXcD@MOb_KL9~i{{_e=&mrv98gZfOhy#}LOJlkLcW4<`p=c4VV@MJ}nb~F?Nf<~^=|zjz@>L#Qqh#fD zvm@k7SjQ_fogI(8hjcN>jzTAI_ek}**fl? zHqxvwKGj6b$Z}5O$T0fmkoE^qCrL%`3~sJqwHq_O5f*Dp7;{;HF_uip*|sP!i6MWC z3qo-xsB9#XiavNe zLTXS7)Q<|ZrLTZ#g;$-Of;RMt`irnsc*k2)@s^#J*4QXqmWnvZ>ZqkT?dpI_G`*7J zr^3OBf|5$cO0LP|@x9D45VjQ0n$DZR15p-%<_b@R6WwBsf1dO$9ek`BpABzu)>KdeU1#PMH0^HTFvOOoj>LxBO~Jlhy2K0w}lK4GinZH z^`|}a=U*W&B)SWu^wN*?(lHsahIEkww-Xr#a5fu~8bU=t`aq;C47L&N9cAaw?i2KT zBBLSWirj%ns^bxI+N=RuS}_!_>$@C+FNF(TY)xn|k2qzhGonQigj;>%qEbZ0k4HvA zQxaSc$O8dDguVX2=_ls?mx6Vr|B6*h5Io=Vf@J5@yKsM|7Sn$!T$l|eCGCK z)|&da;NcIz(?9VbZh#<*{M%M0px4ROoTnG=PKfoi7=z^*wd;~gHc9Zbh;YL3VG;%k zOFid|P8846+O>-V|uVlOXr?>KW?r6ocHuCKIdgm9k ztV0&KDle0qXrTEUE6ef~(-=0adUjJxT|kfDAd4AIs%{J-HxKOC-#nM~MPF(tn8v^d z!QQmVLu^~m?wH-evC%!?1b1GNJ8xejrMn%aZb3tAdct$h+W_MX)aZHP#Zl;JcTz=( z4`rn%l0?COO{3}VA@Cne{`nM_dZ)|UEGm}v?p$9HzauR@POT^b*_m3;;};X2^udG$ z*^iHPWyohzPXoMObypzHFB>oAEe8l1o~!Q3|Ehy?|XDl^sL_e z2XJG+2!NZj3HIuqil)Rux04yE41I{5L@r=|Soaf1dwr?H4EX^yiAe#Ia#+isf#ijL z*QFVNyokg^rNAliUw6OtqhA9aTo-=;Rv-wznzer-v3>jITK!zBocY!kfVMyv{0CsJ zxb-7jDqID&{8?dgHqU0IFjl6SmXq4U0jaIe>HfZcnt? zKlV|$Qxokc?u54%N=tzAn4lkl;L=n81!1a)Xb(s0jhW0j5VeA?5M4&y!=$D7y>xc>_e$N`XSQa|WK3edbNrz^eKlzzA%r*?BW!gP zki3Ej5J3bNYa=z=|1$#lrw&M-&i??Irj>M1eS^GqD4Z*8fFcBSPM|b%jk*>&u5f~T zZVYw#u3mc^-DZy+#Il{9Lj0UT;%Eb+Zn?)>j{jhiK0*q4mCfEc5F*iinvu|z9>Jy& zq-MzkyeqD-A6plzvDI$P$fszp2NHob$!NGSOC}t79N6j#D>_;YO&cUCK3Vq$BHk$1 zZv0vn%wEu*O7>kJ(~J$C&2CHwmDS8<8K?y^N!9GVAxJ>_DN3@Ozw#kOy7K+zdGF9F z87QhOXj+oFEgg6lXTb9f!S82nf&1<|G?%5P-5iP4zoOFih5MM(cYkf-#sde=%}})V zuraouVtr~;ZX~$JT^YO!c9^<{B@C**Y1MX=PD{O$bJKnV2Od)@tQ{czvESSYl->}V z!3-Qoh$?NOPE&l0dNgg@1_vul^i5jKZk%5A2`iSFvm2XKnpO^lx6IdO;B;Q0W^3(u z>#gbK`bO9LCQsjx3h=idE6M=H2g3bqh;i-_#|-ylf| z!9;ACF4|Pepg7oZU&u(I_>}WL>Of(sG|?_oS40lGr{6E9=$iW8Bq`ZM8B!mP69yl* zCjNk&tocmA;$Ko6V}&1a_2Yt?|0Ndv7fT@dh?M8@bJtcIN0WEAyA>}~rVur{l<>M) z`xBxcNWB9FstOLpBPk?tU%rQ4Uf=4XTn(^2Dw$hEUB5EmxmGIqU<* zIE$3l5AnZRt@nL0bEA`TGTb5;z!{AzETtsKu-C2>&aQ5DO=!6vH%<63VPd>8rKWQvhX%kxNtnYg|Q1OkTu!?V-xxEv~M4`o;^WOwNJUli4!9E zLjN0uR4p}+Ar{anYUvu$FjyG)PLH`f<4#t%+M!CK;fs%CGx@%NJW&;GC56kPk1_3t zERX?W`AskVSW`izPPm13;=%u9b5nr$CRr16c7{Rh*je6*-A&RSr;M`HTDFumFZY@J z@Cm-G=B#h#>okADEA|lZBQ%B0AX}Jv$d2752seccg-GTMS{aV{){&VWc!vD2yXIO9L4T#_N2O{npQU)KVJ0Cj zSMbwCk(N=XRCwMqdEagkH8?rn+l>DleA|n zqdLAX^O4kvrb?`#fUdMIuh=SLDBeh);z|p*)Ze>{Sh@m8nuhy%J|{GtyOF&xDLTexJJga; zeDLzTaG*a=iAG-Ucg|I#mp=XIS=(5~d#&}zwF=Ih<#=x9vO7H-#$06X`^4dE3K*F3 z*fNV+A4pe1*QNK(si(y8WO$DDk3M?eKWtNVd}tb|`c(P<^a1rD)2JUbHYL|$%D!fC zg`b2)@_;h~E1?ikf<-&GlJXnZU9f70omt!vRxwYVSQlpm^Gk*~BGdrg7ZR6eq?zG* zX^TcI5hv4z<=@az$ldd4D74YR=0e3bm;$+!a@QIfh__!Cv%C74;^6%i5^2>$Fc?vwLcDZf z{1fBi82;0Ccb)3nqJkB=f{X#iFp=t}_5lT&rAd%)_tmUE?oB4vDK$t8V(z!dN!Wh zOtQWkE-TL~&=sqos=5e$GQ9WR;<4$!8!L$ZWW$Dk(bOi4K?ikG(~?AnK`LJlbh59> z4_riHaqt!N2E^ED)%vWY0}z)_b*iAhlIh--jef0jF~>cJkW5XSNMI}L* z(OiDfO9#(ou%lQ#R0ohM&iM}t-K1XDSf7097#5T%I~)?gXMb2G#|#q*2&o_a2o=pF zUKZN(i9sIX!1_)RX1AhUtKEuzFFG>>B+~}aT}G`)y&lq*|lySOrcTBQ+JDh z_R%+s;#2L?AHcrhZCe3>@03{&=m{SiPqYrl(jF{qbAt4vc~=cb+%@5~;_DO|_7f#} zSkO*oLT+4ujs$eg&_A%l`aX1z_`+t*dC8KdqAXChI^+w`NugJu2w=8a;wWyinVs%L z-h3aQz-=(dkRUiAs_f`fV1qy&jK~c1@_ZCFt1!Ak{xB@u9gQ@IZA)93`wikhpQj=~ z+5Kz}ory65{l_IWHx#Ww^!6Us6%?zR6tg)OyL7=Q%-8>$YU)RM+ThqHK%D`$>NUFV3xN+ln?w`x4jGiupi*Jhq5LXw# ztRFf)C2Jvo($-|xz~X&JH~!yO0}ua;5s>& z6m_F7@RC=^)y~&fE^k(I;T&fsx+xWUgw=xgdIJ*AZ^FFg?6i<-0vvEqu6(wT5@ z5D3L}0Oo`S>cB;Y@y*`s)1>G2(Tkj%n_d3H??Ss#Rz-BgSf0-hPO3lQ94VbIPK3D? z-Pv39v`qgJ6yv0rPu;B6bzZOg1dQwqjh#u{tUmbrH__0ag?IKe9>90^&u^)Fy8Ja< zzprQoSC5%Ec>jq3V_z7B)9^Ym4%5HiuI^Uz0CXZI8ng^U(h{A?2QD(+Lbg3uF5A zFVp@@{;zC*!{F~SlbxSF|Li}!|F4sz^KWE*a_t1hfL6SYS2*gw@VuzvfGyEaI8)D5 zkkNS^g!SLsV>_Zt`1az-)8jcz214j5pKIAMRiEIK7!C)QVI3bvX%E?j7o0rj%~9tI z3`hR~gx{Du1#rhEh6$%4=?;6Vd{$elC9B&LG?iM>WBVprA|wxtdCDp1gkv?ixg7jA zRq=moT7S2N*9}KCH=pcJG|Xp8YNtEOH2jYzOm&H2$`JXWBmm!1J%P<0W)Ni^Z`Nd_ zC4rB`vY^?#Xy0C6miw9JEiLs8#}F^Sa2v_kdtZd|4HC_h(ma(A#mP5mO+JM*Q#SNm zbKo}>XfG|^;niiZC?UiNFl&q2rqsne-%?Se4(`nA&q|f?u@ncms2F&F&UY%$_7p}E{HcW^?Er1*YX!;3%yH8FF!zv_M9s;h z302c5MRG)_EuDQVZ(Ur(SFXK|D)cP~FbTE9HRR&iE=%~PhzKp>R zeHlogK5Z*rn}rC_(PrcP2wFU6UU zl!2*5i$Ebn%tfr57U5;Wfw?JxKfaJbOk$CZFV#VD*8n1?;y}}q z5NuWW%L8{=<hvb)i;O`&XXCFd0!qi! zVv#$nM5@1%ybLU4)wUTDuP0|%t+p|_e8He`WPfjhGKqt=p2>!}KyqJgFr5|+Djnc+ z!vyFdl(VdBM=o$QrL2tucpa4Uqgfz$A9qVtib{D)96Z=4yUT}auwK?Daq^hI03K4xe_Ih^x>_Oje0)@MA0IBpU! zm!~0Hn4de6ENRMvF;(8zgaVJSyXdg0esNCIS}*UL#O=y7yVctBU9Q`j;F zQ--s-(-H!rfjh@vPsd$2+{&ae3y-?Vrjjr$X>q%h(K8g5~nv%(Plj|{;hsLX>nyMPlk8;WYKhQX~X zGSht8jGcP-X^e6$>Nzzdlv46YEGa%rUIZ7#BB-0GxqOc#P{0kQQ7SU2g6MIXK_cj?ybO^@>hkV>tj2*(^WaqT>9P zjFv z^r3vyEMGxcGj##AA2=D>n_)Yy7J=dLIhN0kZW^(NOfwX8$178s84!KW_W+kl2n+4j zfPQ^$N;b1R$8@-1q_!i!OY*pnZ0myvja@Vf^;ZY};z}LK;E%?Vbk@nEGKhuD%rDJp zqc^5(c_^^tY%~>V6SVKOx`*!6CI@ht{s2lG2{FAsgiD`E-U42+8UN&Y1@vF$MmgUd ze#Np)>Nfl3B?6LIYiYApd{-QzL0y1yC*`6n!Kgm~13_-{G?MEl&Se(i(luEnNXjNo z)Al)^eZ}QEF3@f;NqM$H&}B;jvp_N#naLmDkeiZowWL3&AC+RtB_W-f^BtTV&aB(> z)UdVn=^^nU`kT-X*Q=)RZ=Qoxnmm-9HaC^h`IecFk0dzpyLkCjCKIZ@QqJTMPwlbS zR5wDvsK(Uu?|D%s)|&4uol-B(tm1?Xri&CkO4GIcIhC%Rf0((5O%N?Lz68zj@-IUB zGFQUZ8U*on)d_u=o>@c~~+>JZgAOM;~8 znBt=6^(&1p1U?&YC`L=e)y>hs5<_*j0vAE^HxsI`dmfJyr#D9cbB{`+n_OnCcxQ|W zTx=J@38%tQgyo~S-fJ*Qo`WLlemsNxn5tgfkZT|K$SlWDGo1&&Zb+o7Qdr;%Y;bU$ z9%ot}b24{|74yU>sqaf=vlnbOnLaS1OCw$d$TqGz(IZ4s8R4VhqCs#)(U~f){5h(? zUBWc4ltFgbBCRCxhnLOEb} z2@9+-?g@shzj`eZt9Js>UKx8fw?4SL@%VR+SDqa(&QZU_EC8|&lC1;^tQ-%3mBWs8 zN=ada4-02eXZ0hKWO6rje7RWm%q3A zMyAT%lTH#a<94uwbTn44KJNW6LD69z=3Pa%+T1dt4l`=?-gop2Gxy%3o3mlv3Oj67?JFImZHwqeWrstJ8Du3j~4mxI@02f zYqBpiar(yn4`2p2{w$7UZ$5$CT^o_4t)5Ul2w(ngT}qFfLKCBH8~d`~%pKDU0q8lm?#f=Mnxr6StPY{whjtnYcOQ z&fvpnB!uz!bZQGV467U9VZSeT>^))(g|T&f*jDk4MQLd=Q!sN%9Gt*1 z*>Pe-{d5twQEGnO_v>{eI8Rk{mkzFcULN&+lu7Q5n=s`J*k67?ND%!M)DPN=rmqbN zwSCWd;u_>tDsI(kRQ(|yL|MRjJ*+o97&eim{*tGtFN@pOmz})Go>8e$pm^t+L)4_4 z#LjKX+%ejsZo^R4pb|7W&EU(3!bt>-9*4ngQM2V{aHhYoLx17^F#RUyO^f>UIyM9a zx-7Y|=k$G8M97c$#G%pp0F;yRY}=c{@mG>`Fsc~x07&F?DG_98rxMs>8Cyek-6xXT z*AT+vz)-uAcNxkIKGV-D0ZVp>WEqmvV*Y+U1YI< zty}^Vu?`qRG#lo?41E<`Kk+f&7Kyv8E*9qj84Z|zB|HD=aUdnAJ8c=2^q$jfa-$>e z(&{eyj<2Trx2E7-+pkEQ2!sZXaCH$K6F(YTC!3y-clEa2RZWPJg<&@l;|)uz9kQN zy=Dr-;Sq^(W(=6i)t1iNVQD16^=nZaXwJ)s;N;+-&Xk$u+g%Iap%FkCgYB+7{mp4S z+wtqRPqR)gPoeB5J>i^Qd_UZMh|i5gJWFAEX_k^}EQKF(?@%ftW>t8t$g!f(D`TOPML$r@Ah)02^4P*EKn++)s<8(oxewrilQ)Y?#d6KszDOcy^3 zDK;bMM#t$x!I!`cY7Hs+?06Ll~pkZ6Nmb*;+x zWtv9oEwrkOs(wC%G^&)rux#b#=u8n2SkukjO>ZtY*F-?`k(h7@MkDxh zohXR~1w~=30+?D}I6<wOVwY`b zeIdcHk{W}xm%_I`l56C!?A-QT`00}j@1iUM)_+l=uzzdgLAd;3dv`&HKO=o~k#X-k z3`I~3cC84h;ARc?XZc@P7QXMBkagI`;?Fnu&BYT5ma!DbDxcIIDC@ccE`mtWQEZIv z@AHd-qraQT-iS`l4wo^+i90%RHY2GYAFG1=L{9<3%~@#ExrYmxUvZ2h(g;C+DI~ zFNl;56^_W?TM$)MMb9hMnr1GCI#7;un@@(pNnQdAi|rsJ&8O{~>sqU0y-%0^8to$w zq??DYuTT0Y0wi^vdsPzz5_of`vo><{X?hdU`y1m5^H30hQOTGI=sUfW?kYUuNr9zd5Wd)#LWk)&Nv(|RjUdkb|Gk<0ZTr0C`eE92LVVJD zWxx}-P7)`TRM zqWTsolAPR}m)>IY`(cez-`IM>yT>P8K3WQcf#pKK@zTjk+q*22 zDy%VY6DbR6gI?}<5CMYMA;qJJ-xi_^@gRK-VFIJR(YArPw)!*effw4C>X9Bdp4vY?8hKK67W;Vk2&6IF~tlj!d`DG2iKPsPW}Z6|I*2>N9C4HkBuEWZ-i>;h6?v z#&q+UMC#Q0J;?soSIj`h+kY3z01vuG@(&)pn>n)l}m!Jbj4E9{M+V2XJrQfAiE>29@S&PdFQu zyeI0(Go@rerx3yPI5*s&Acw=9SmL@71rKmm@jYVuoKN8gR?3f$=W7mjdO#w8vCK+L z*R-_uFPqiIe?wex^vON2T5 z9SakfA9wMP+?zxJrBAMh3#I=&&+3aaSL6B9_{9`tJ<4Wl#K@we@8; zd0{zz$7S;Glu9O&v%IA=PY7Yo1qR1{{+ja#Fl9;?rERjU3;wP{IHvLnD~16XKW13A zQGxkIh&nuZFhCkwCe4>fSn)~GHHtqDLE4TK!FDjT4D`@0gaO`Ur6tHzs^Oc!r9DV{eLki4kPQi%%zO1$A;U6 zBqHsuo4e~af@&7&eB_nd_kGa!WAaav4kOb{Ls4u4qVgZwS(EHk4HEhGmcG3^!Mx>l z`2&z`K>(8zzLY`KX5_eDy&hR#06RctGYRN()u@2ARFgB#Kc%6tz#q2Sv*iGyfho4| zZ4$sB7hbScekmcN`IMHW=awuPdg?Mx9N@V~Z&5P|P)QSA64i$q8`5DpI$vAZUk@TY z)P|}PwV;>p6<|YI<$w9E4g9;JLcg?UH#u2m2r?QY_YqLW65X4HLPb-B-BnvsJa?>F zsX>0h)z^iD=7aYRs{%cm8+#r87Xu^h)eh)>+e%qmq?cqT(mYik(F@G=eK0ydhew4G zz)k-ED158)S{}tzTVaxvLQ>|gDCf*)% ztgemoTP|~@Oc$!2lS=l4qY?t0vBtl~saX+hdeAhMM_NRJd$}zvldek@!^S_C`UrE1 zkL4z3{&Kk7w^Y(8R$9~d5%ZP~{gINbUuaiH;=>{s^x!;e^LpDURcffzyfK;+mweB9 z#`VhyNI>?-N6UGjI)nl%oVc?2ufM5#Jdq4o&`53H3A3JoRdUYS;5G0zIN>Bb7lF&d zZp7=x))4Ehk4L0?&lvrl6FSX{xleh@*#7T;4*b318p3mhfC*1r(D#D!d4KTh1d?)Nt#l!oalUb=dn_N3h9arN3KVM!=A+~ixOPz3t2AuWLk=i0l33b41uTnn;PRZPCz@Q0a`NRAZDo03#DA$j z^08Y&IgFoa6q-^WE5EWg!^T1&I#HnS1+IQ&HKzQsaSv?KIAClG{G75Hs4>V6e<|Zd zq=WcEjA(^o6#RvLCo8t?m6@^(fo(%qQM}{%Y*D3TbdEc5<(J7~bNXF8^WWjjaqx&o z>`i>Fwi2YIHi|E`0dEZ|8V?NBVA-X@>2TTMh~nuW7js(W*0lLZlCRl;1u;GF zungK)SPKfBt<2Lrl10qNmG&Jagw;uxzUjKAxk4Yc%YWgs?kRgcu8RrUCN!hSIZ-5U z3kE+dbFTBeKn(o-SyjxUhwwS4Cxr%uH*pg-AeYLvGT2iHqi^ftu;G_-lm9BDL9~m` z^gd&VYY0c;Sw5CdN#k{boZk6L7zd=XgzKaDxb#Wj|X>9k!g^%Vq&b*>u9 zFUbT}`Qx3p+RWa9)AL3$tHw?~8#dHl;hu=?^?!c{dgFg-vz8Jx6( zEC0Lb>u*u6i}8->#Shu3cl+;b>|d*n{DM{0HBgsWJc%qU8{p17SH=Pzop6L5EcwT{ z-K6wfA9oJCd{i=daDTU*?f3us2>K)X(bdEJZk7waScDPD>B%xp^5dNuF(fhLfstv7 z8rp;+y~RRduh7HvoU}_E)o-x+UD4-CQxq%mH`)cXDHR6gRGQ*_81Tt>d+Qm$XuI5% z)oAxQBELX{QxPmbIyZ7bTXjuo7!6wn5*AqW!c>?x6$IgY?1{`}78=pk<})s4b;wGq zS}q!Z9l9|ND1G=B;FF5R&u9^_Aw-%YLrg~TYCaf~7X8Mn9~Tb{hes?84X@I!Gh#%3 z+@)SL%Pz)@i2RhcbU4TkQI~g|dI1Mv(rV!0ct5JVe{eVW{{piAzS?lzGJ0-1e>Q9R ze{=L7z3QnjsvKCd$P}+V>VFaJxan1mRpK!&cLThi-sq@U2&mxyhL*5#s#F0I=+;x4Y;b5 z7{9)@o_v$O0&D48j=7|LyTP8dYSxslz0|5I$Q4M&q!?FL%!rHwW0ydX6w|3ebm7#b z9V@<(@{q<9!Z>Q+)keX2AozmLU+P;Uo&o7S$ z|I-6Dli9a|MXO$ls{!$P_|%D2FDh-+uuH=ObnF9RPqx9-SNJ^Jy+~zTmfw^HXh=MI zL@3Oc(`7Nf%o!wgtPs%m7w)!D=^?fJV&jk0qK1n=ve3^R+gk|Mzb5u;EX|-yn!T~q zVqWq*WOc)YYMm0{N!)-g@7Z)+K8f! z=|kvdO2yjYh-FA&jE;28%kSPKRuo=Y@b6WMr0>0E+KN0Yrm2b0~pmt#rS$IZTCsBbYq{%Div*%uPZyD1;ie;TJ!W8t~>17k+T zRA_rp;V=SCoO1BZ@r(GD1=Z3+09dKQyE~j$6@P?dRU-FY0y}ivAma}}ckD&rWLXn& zv38k!i*9{S-<>?mTlZKDa4~D2rftIrZ?uDk^OvnCvl6|@04EN_x^0r1IE|rT^b8K; zXV$b1Rrdw19dY&BZ8ICM6{ib}mZM7U%aoQHv_YM)H(3c8AGaA7Rhn$bX!x_18Gj1Q z(IsWZ9es(H4+yJ3UpQjFwx2ic@}BwW!O0in8~iP~tgPs>d=I2E&D4rr+k;e|3CtX? zl@;Mm-Nyjwza>t;OG3L7`r?bx6$lZU7vY@ZW@ey?L^?x1can>J<(9+VSn@UK6aV-o zK3WwX?x(K~G`1#Ut!_wi6xVfv)z^7&h*RbiJ{CiBHmyknYy)(LVW)%?(3*kEt(Hlj-O`>a>ZuH&3%uq@mM2t)wVYTdBI#9W2<*4cW zp=@8-tg@mL;YK2oOrgAHrpEbAYFL%nw9C?_hn91Yc5~E)EznT#DyS4;ZAiY^esmwC z+pa#9;2KtB^_H$#bAUI20fio%IUe8Rrg|L8qTH-3p?nwz@xBv}_33HsS-*RIe&@4( z+a&eNpMT9puKhWSyS{cH#`6y#gZPXJSrKVqb?5aurJhTuyjMtU(`7d8 zVXmnkve}MjicU1*Z9?uU)_5iZFugq#B9YhJbLVX+?vU}3&zU6f4Zi-UZTZ^zXI_SY z%D0UQ-x#Apt6qeiVg4c?AaBAjB=BPonE3pYYKZ{YS1;18h4|0(Z? z+d{EJa$)fYC@Ah&h?Mb4i0$*+hN+sWb{F9U`MQDeDeK>aUHR{2X5gnAUI-UQ6N35+ zW?ajf-rY5vawL=?+@P}i?O5OM>?Hs^u2+CC@tkBDm&&H3s93hRW+Ym|7_@k*ePnER z6vMTC{GAQD!SHLgKLFzTMWV~$xG#ob2`EtcK*|WrqJhBb1TfrL)e{E^{#?6fIAb*d zRfI@~(CHqZU%6ZoSw-iC8YK~p4jmz&7;h&Zq3e>8`A4&Xq5Q#nyth-lIMQ@QT?0hQ zSR$OFD9K2yCfv8AUffp)N25|Kb}%=rz0Z4jeuLKK8tVNo4>8Nk60%UVvZS}@&V!}p zK2as9-BnbI>-Y1_tK9{~zbcA>A&j~uK2hGV!m-1cL22ZNwf>TxA)xPgyOB=gO$OECICSH7A;u)ITaTNF2!WW ze5uY2_2|T9zy}}!LIv^g@To+O0){fi2CusOJUZ?!4z9QTeZC*vr+)bZ=(zoU&}l5f z_x71Nbmnt*{XG|V%eeN^3F7C`=^`Er!4cZ=qj~NuE2%!zGRz3rfpC#b;o(TPh5G}4 zA;sZ6Q;l31#Z~7ulf-E4`mO=uXSBl4Ee%C(A8>drYjJ5-K}KgiLq5T6VZRzLa5lC- z$%#|tbf#xaOvHvQ*qwVLD~e}1osl9xuTW<)uDAD(0Un*`&uPn~t8$f6jCMlQzt8-h zGJpOWnfL$xmvmKo#i2EOGb`@l$>QksS^MpS z(>xbx=1m8W7#&c9XEmbGu`w9CZcd#s^7pk~L2M4CUKpxpTrLcg72U%{^Wo*ys`-hn za})Hb<+csp@`TL#UfiW==kO9E4wFh81%L}dlXVucGm;sGg9|=wG0Oc+#uQH|?L5C7 zS1-6l{zgd)voLbn&BJ^%OaV5R;FEs7Qz5Deis>Coq>+XI0@VT3I59O#at0uFX9Nw4 zDlZJT7%emala8{B8@3kUaeLv4@9iIe-&yC(2RItz5jXlRI?SUZ3=Bcw{tLkqgq;r? zu3!9ijE_MfbcIkO8o)w34Gf419)Z>+Hbp7-NWG9 zG04K);bH4XM8VIsiHtLVB-m3IJ86S!j)O=MkvrMN5-Uyh6x$1yIsrrc5M@>Ak4#8# zH~?_%!jUF$V#O|$f$f;K=otq6xQh!}uZ9+jn+$~@{GOjW#B9qA&9;e!x zJgD&^wP}~pd+yOUq&y}%Mlt*4_y_zk(o^Wtz2_I ziaRKE#^0X0ldC}Me+d-}E59dkZfFrQGF;@-;gvkn48>yCa@}Mgu_E3TVdPjGZbNF) zX;G?VzSP8%>+%@>bn={F*OUHkX$hJyWq}+?m9~$uXJ;j(isrqvn<8ti=%dhQ3rYdEdI_-dp&@Fjo`Q;UcMlkiIl{yYzOQ{GA|E)m^AV4(r#pwB=W6zwftl z^976x$K(XFdMHd&Wh|j*GuA_&lQY@D?^XDiiC#^`F}du%Xf;_(H`1z^t4MQP116o{ zxCaFokFm*&E>2BhS(V5O1k{kvmezo7D!l@-5C8{FW!uASkBZf zQ=cy%b}m$4<{xsLyp8g(E&owUi~LM`ei*hZEaN2#o_Px+p11ZC^sy?i_o_wT z0lfpJ)VDNTlPc}AlYiOKYa3d;M~f0tF0X^cYV-9XR|Mk@svfVW_|Fn|zTLdKDbr0` zh{c{Z)o2d&{{J@$I;Z0zw;fKRB?Qa z9~+#q4&_udX29KL*g&j3VWI!fUaNn0>JkuM?H+ljP;2tF*d?{=8!OHOt!yKbRqeHzQ)977j0I%5@ORZtDshydB^46D? z2INAY-XPwSuxTWyQ2+~T;e?OuK}mDZJhDxrVTV;1bv)NoDEHh+ST&3@Q0+u0wtu{P zjndG$g^?8B`Yj%ce$jH<3IAD-PNPy+9@8CPNg%XwdfCV!1q}5m zZZzm+Bu58*H$o@Dv1h3)?Y`s_u_Yy&)nAoCMAqcTzs7+lYwAt-lH^ND|BVh!5o)UD zR?8(zjHRVV^NF_Q-VQeLTgp4iP@88LRgw?r@rd_%yH8z46*{~}{}>~NcPKL=J|)2z zfeqGRU*R#zK*ZLV4O%VObo)V$NK+3Cgbm(A@RWJqssJ5{0R()3UXtD`)1bdKb*?Z` zYK`ojBG0xw=Jrc%@}80G>hR`w$pf;cqJcPP-5C9RJq*|*`VLG0NL^J;Eu>)`7%mj> zNNCh{Nnk3oPgz3RVE+dj%vjm5k%_;~J&SMa{r+>iRbMVnutyICL)nXJRYsZS(t0sD z%g`drjWknC|Gp;l<9Yjl0)a(g?=IZjmq7B**TO^^`S-6mTsM$_|*7M$kaXdw5x#yhM4rD{x7x@x)mike16>Z z^iD!S6PpK0@^M!b4O%-;YwGP7TE{`y;ug7m@b}-xB9kjA!LBBv4@ulZT8a@X`?#1k zcuB9y4l8=1EvbqX$dyOFRT7UupkC_{m9+h=Y6}k`z zOl_#rEb_A|noOJSNgv+ke8zqCW>#JMA9J@MAy;Zr#&2!~r{yO?d7&4A<=IvZTvfSF z054e*Rq!1VV-%*eZmTFyEpL|QeLdq2->&p*Mo`=lVZ(+9-@%w?MN?iqm7_{3hF<%& zD_1#Hly+~5 zs0Jujf8%_ykdoJ|dktfCA)~BOT)s%m3K9#o>(s;eu8C$X>@u6ck7&q|s>fu^#$7?N z+z92=keBP6B*ZO6F5LX7VJ(23b5+!2@V&#d%`za_n`6W04WGS)xX}7`}Uk=nc+9Owd%GlrQ$I1~L-rtbfiT^Fd_^O#IfWtc@ z`a6*LD8({GXI2Pj!KPSog(yt{1E(OCwfq`t0UeSGkwL%z{)chx z^(apjf>he~9q;;&lyti>lac)D+0Q3Rp(qzko37NDs91MqW`16Vz3?{<;MU75V-EQ>&+o^VP$v%~kqpzx^i%i(ZQS7Mp&Lgib0|#l^ry%jQ z!h5i4%>3W=2fEE63xOpo)!^2@Tz=;|5pLASc!o}`a6l$YhVd_gi)xG#JNh#bD~VPv zMW9sJTbgfcs@4d_UI%Zn)WtDMnW((=GqRg2u1YqU5!Lgb#MRkWUm@#d-@f#6fS#ok z)xC}C@di7z8l|%Mr^k&7hD)7Z*?v58z79*Cy$~2jD38NPr$jWQ+`apS9(9_dKJ6z} zHj!3y`XTI$G~j_&d2zi?@_}IlFZV(M9(d_jTxCN!h3j)^0c%XgS%=3k_C6?Sw;hq3 zC^=o}(QDOaTq4reR~)?>YKm}e8Tg-&9#VI$d)7s4A?bT&cE3rrZYzV^!=- zXB&i~pi%5maMkMkdP6RVCUG=P6h3?qKye9W3r>1coccwwlL~MDNHKY*dA|P4YrCn` zVKmCq{jkDySW9=0#-1;uK3RP&yV?Ma%$U%sxi<~Tf*W}T{r(I!HQlYm8uPnrCwWjb zmF|FO&8ZPE(`6_!Oa~%hGP6TGrXMW3u;`@TiOtVgf|Ag2bXj zU^kLEnIMVZ?j>`Qr4|FSMnHl{SZ_9M?iiS<|AC-c#lkIAfL$9PGiZl~!vDzhGY zlFf$q!jriHSD}S)|4`+b%X=AAy$`&iq@>f}8wVo$rVMZnSHKx+rf0WFg9xd>Ltf4g zF1p`xOv{Q>3^O%8s66OHTa~RQM}FFPmn8htch*VQyXxcm&vVq3`jC24?~yW3AL=CV zo7taNW}>>9#2ft3`)K@L_AGQ1_sYBAhgPA~jy0FbmR`((&PwmL?y>GmR^M~ft_w_V zcRm6!%!rN34@)F--%4AulnN>l>*O?ntaa^q5Or?OK2*s)I+O}c7>AA2$!GKy< zMvAiTgIiHFt(cA}>@Oh|v_O=|fs} zTgWMRr%0xui(70WfR#*vY5sgn@pD@m^#}dzxc4|<`=ADU4qH-gV(?_eT1sYEUSdJT z2Cze>H5$cPaP!LY{OY5Ji4P@buGj45KS1o(p(OP7vLxt99GGequU<5_yKlZ_6ql$~ z0Y?=GpEtr2WzUu>2KI!^OWsMWwei`9?O3G9cd$JCEh-WVwQXHF2OKFfS}_!{WjZW0 zQF$F@AS$IH_C}Gd-Z6KHf5j(HX1#A3e8$x2=%as){)%lqClqqcduG325!d6-Knd%P z)s)XR^hzaf4Y(v)^Vv~C(5p{oBRk+%+$)Mrf=|yxV#6+U2yz79n6~LOLNr#~$cK5N zp~kIz`3l|pwq@wl2EcZLAe+p@1DE^aw+0>Dw-pYM*fsI+4%Un<^{BBX zd{)0R0Td84iz9E(CJ$L6!v9&+eeT)dfsp*{*qg<7q{_aL$n7n>P208tRSPj5<_p5t zg-Ib&+eJjp3Y9RGp)+AHq|X#2>`0e+DgzAl;U!yk#l@3(W}9cnT6fk8tQsoKHccva z!CA!EPtt$UAv9Bl^@l zWC$<3aP_Mm$QZeHsfs#P00u0rQD;r{8Vy3^I3$%VLsOYNFaoZCnZk_9DkN zyLe)fao_07Pg?{AGVCD-5wEQ6fV8QjcdwU}x4oy{szW(Y{k%mX32V;W_{;P>38^OPX{XF>$#to4+k1^I=hWn#wU(`C zAi-u1G4^RLM~TAERHMtAvyLEdb|HqK9tvfpygI>mLa0yLQc9Tl1jf8fIF$K~^|Pm= zNURrg+@tFylswj4G9~BE)wpT5QlWGcAbUZeu=mszOGN{Z6_}tiN}fF$&HNY?r@9jZ zcY%|{VY9H@zkT^A+}$v{*fPsRy}!~r6~LQGsJA*zqQZN-(e3LEcnVxvOIWPOvjo)daa#g$?n>8WZPqb+O&X+)6P+iyhuBP;Qf zUkE}LVEp#9z@0V&fzkmR$)O*vwHd1f(=zodTjn;tSq4NFEb%rOYJPU|RmLh#@9ngF zi2ebPe2*(T1rT-q0nSQ|e3}q^l~*q~)A^3+x_9BG2NLcrPg`Va_7ZRDc)wtZYiVoYs4O1!lA2cRYv z{Ys{wp`ju7Eye9Gkg_>EBxJE+)4JLxs{+upVU2ZmP23Vb6`v!&2G+&4 zrq&r?mZM)TeW$=SSMB|Sq@3MdJ)aFZYckZuS-H5|;ljYfSOqU{roMbiVZB9u;485A zZ&#Sq9#V>vnj|jV|K69BV2pO9tb~pvqTMoxA|Ao=i%!a|%J?D0J*iK#z<0HNLvXHx zH}!R;g}&k&7rAqq1Ur1eRWpavz7e{eNKL+&fKSh_k0NzDDe|swroB% zXq!^}jSj~*1xlTwgfw!S-ceW=@Ia7bCy`S@IeEil%7s9Nmd$F7o$*w%v$NCd_7=7y zI*Z~=BBl;?=h~XErg*Jgar6)*jMjLjI~2>Yp#S{{Bez== zeT3C{dD6V=|J!$O&i%wa@FJZmDQ}VAhDA04>8R2ZEV?b2ov%DM7SjzbK2C(@=g=70{ztTTPQM;7 zl5}JMbPNdl0LkBy@4EQ`V~Aw3ARWF)5dC*T(h_8a=Ldz;eg2l4gI9b>q&k8x;_bpg z1TOydc_ab?9(x3`D9sWYGhf872r@~9AMfnO+}7P9$&+vN!_^UKTu%6DW!OH)WLFut zV+WU|)Z^VPsh8;5?`ux5cZjLlMrIMW%TVX>h=o>D7DzsF5fUwUpdh2}1$vUQoMv1FJb3=35t^EnvMO$`UNDNwKdm2b^pL zw7Hq$Z_pRhMIss_c!0io+!`I&U1x6ea2aNo(@yJ^8h_9>D)d_w9%eW4!?mXFHaBoZ zZs*L%DzSb}Dy5UAU+B3;ggV^~`dA;dzt_Xip2!+xoB)J=o`{HlmU(#fhZy)?IkD7P)+OvptEGth$&ng*Gy0v{f;p+zM4 zr*&#zNEc>|_GpP+G%DR^V7nY{HXIxu)?aBdWA@JJL&faM3;_r_gG_9cC0Nv<2jf!P zXpxG@m3aJ57YImX0EKkCLu7?WgqP=sA1nTF?*ib>Fm;8$vYVMrzip=*SDYBF3cHiL zIt&IT;qv6=U=GWYucM0NT!Ins8xp0Uraa{IedNtiVKd4dYxFrBYI{l&At+!#5$1+V zmXCWLt-uVHi!vG#8G7Lut>RW)P60&X4bI6<^Ttgmi zj~kTS2oiH96#WgqwCPi8%O+IPt3ctO0^^_m15BvqbxH1Yp7^nuv{ezb1kYaaHm&l) zy$MQu9x&9efc2?y7|ws()9h%moGH`476qu)9+Ca6Jl~ufO&d{mYkH+wtGJyzm23|K z;<>Mc^~dr(WH^^E@?9K0hfaxd;>-_qS{2<75d{JNT>;Gj}8&2J_%n zgHNHjLMqNJ{_hZvpJN#knlvNFh-5OWBa6iUR1#b+M}NvTY9spx$bL>S{OCoeOA2=G z!Y*A0?dWau+6lS30(X1`ccHG0e6tR%hQ#XcC{`(3lbUQDkW+IcH6VdfDgz){hI)xqvPlYE9d8tR5Uz>TJ+;dlTV*<4 z^*W83nT3(2B>eLM1`n<#`Gjl7s{_+tsB)CSGvOS*6x>PjU0_iqGXzv2K7wxigO{)S zG%(tJY5E_(?n?+5s+E+07QOh)nXCnV`qB<{*+LV!5nScIxOuu533_?&5hc+h&RN$k z``Uka*kwK#x!VsTv%~n!MaUQ-rM8$ob!01Szf;=ARz+*yxEf562Wp5w_M3C8>4W)Z zM$9_>)=Z0sb-EDJzPTEJHmEu>+89$R@8ttzD5n(9mHGfWP9YgrwI12l6#P0%X$KCk zwD#zIE_$8EKqN)J+J<@52-zC_p0*SB8|(8RT=q@xuyr&3#-SrG`5tQvQHgVdb=#0U zq{A=D#!)=4?^tf(#4}z1;@2JtrA*e&WZwq~h|duz)8P{*4tK$D-kH2hm!4e*zCleZ zthb#3Q`}(Y&)kW=&wGSoOpGeomtX&>482{WX=UX0q-5|fAeY;)hz3Sbgp)B`uV|*q z3&@%*A2fma-iAY#4!uSX4;jrJZ8MrRuSk!{k@IA9=592bQ!3vqq3}tzZ^kw@;pV2? z+qByf!ocKVx3^*zL$-)!`{894#^5OKLh;mS&rm_fW|!t3GcK!-z<(_Y8=gR;pZMip zqjr=!-2THtz2L##-Mtx4>XS{c1G93)w=Co`{r-R`qElxQJ$!hM@C3DCT9%p>N=*5n zmy~qmL?6T1c&3NRX}=C-etkZ#ozKw^&QBdVkzvr$(OMVtTtvuCQ_ZIevoZEU#SdJr zLdDTuWyRA?3dAy2TGa~=deYVn{xCk$c7j(s@FJ8fZqZJ!)r(-aPHh65iHcx}^DZ(% zwy&MCjniy{X;|1fijb@G_39RbfyvE=LYs}o^>r!DsrInkEM_;+x_6jgDjGVb+8o=7 zO@yJ@eI4kZ%q(X9%(GJUh$3F~XMsVVc3*aWH)Y}C6C&wG0DL6#fq1@&C=>TAyE0uo$r(b{sB9Nj!Q27Ua z$}5lw!3pBWqVweX6BH2EwYM<<0@L62VBCp!g;<)}t{2gCUwi{r&QFaf0|ORUJS z6U`fEaAwKJv5iL~gvBBjY0;$P(S-03*Xu@RB5R>|Bp@vCK7siV09fg7_o1J0dls}H zVD<@Zu6Pc$tV8XRFgO_|Dnr;mzz`MM?xO^4p=mhm>n4-F;0j#=dOBpH7FjkJfq)7N zj7O^yAFrgNs|sOPjMdSjtI@@+h}F^9)rHc~A&skZK$bY_5-{tsfyDZXpN?MD+>y(8 z2gB(!5l0@|)ksu>KJU}DQ1xCLiBW!4RQBvw5?f*9^+8sPH0@$z@CM^?pl9LhY(KIk zAOHx&dTt1=8UH?gqI*Z?H-21ITJrp?SkI)Cd01w{kx$GT868a)k3<>;g)-1k`F>$z zgF)Cq^7Th}`s|8|SQ!qNcM@7m{{ZoOlu!hkSTH@Iu}{`ecNHYdO9w#UQ=6pp8ejPh z;pB^qZy5$f`EgFzk{>eEOpASlf7=dUJ z{$u;KkGXlddws0@;5H}(#e(YZ*>uF^^SS_#dc1AeL``0z7Ag{vI^zVc#oF7_hES!p zk0r{XVs|XIBO&)ut&%Aq79f!qvDDv6SLwGY;TcIgZ{A5LRBm1qA=4BO5{=6-be(U+ z!vaWy10EGxUc4Ucw*=CBik6(IvLe{CT|_uX1WZG>L!nmg^!FZv4}kiffN$xP{7&Hu zJ}U%>!!bafwRvOiCxXSRJ64_k9Ws^Co#u5-Q#+UqW#f>vkbPmfm~~xNGD3>9Ps1n* z5ink~=AA$PQ%Bb+_LXh8>iTsn-9`HMiLEV5hRg2X3!KcjxGZP-#R1*wM;-O+ zS8LvJj9JKa2wS~InC}T2mVActO-^IrVnCW+!*4zwGVqyYZlRpcnS1EDFQ-6B1S~nr zZDAXfG0_W3BjmVD^P7{3!#iL9l)~{O|2JiMqz}6>=*=cAty#zVo+j+UVxyF$mFk#n zB0f1}y)!6~m^z@vDA(zEmVQf#du8o1B{1Y;@NHYjC-@vY7?UmDS3AD zY}LQKs8H75w=xHbtQF#7&u9`15p+@ZK{f!h9BNGNUO0D}L^{-2`mMS6OOPL?fD<3~ zO|TmivAP^nS`Vg+JtZOD6=509qZMl(wV1A}-413rtoE>n5}!4%--a|>%?JN*gmFwS3$%%{lJ4nwgy}^d zkn2n6^FwLwAFVHcIe1FH!**@wC^&g<7wVVoymglSp}6k*PDT>H)dqT}G7PN82A^F* zf~}K}agO_)GkvsN&igxEWM9&W+0n!=RL7M8$6B{hPx+auei5g!&Ys9-IMEs_$AW*3 z2JfdZ7(+`zN!%S48@@-?pz8aRQk5JnFZ8?&c=g z(#ZbU#0Z!zRQ>ciD@oQbs3NA%@v^R$F!-N#7XD*eaWgH17i z%1#H6h3Y4jnw4ak*bfjecyM%?=#PrGpYBhNR$QbN3(3By6|w1(LNE42@%L2H6iP6e zktoxGFA+pvb#$~3pDri<0lJY!!o>H&(7qyE^Q&Bum0u@VJr2quOI<&{`A0Eo}e=MaM_rNB6nCXW9(tngtdsY`*VuYf?r0n|4vyO#%BX6?lajLLNB8DtCe+$hqV&%^WWcf841qVjE|dFO7CB5PthkHsj$mZ)(9SZBQiC~+1N6?);futK_r_ND^PH~rT{Z>s$?{zZ z{??%STB3cv6TbCZHs6iN=Mm6&j>bipNziG;E5 zX4#6nzWGG>CLg)+@1y-=FM^^z1TRZ;;taKhd{rHA4Idp{X>c5L&k(v}?WWha||5KRLfKn&QlS zd=yz*%+rcTM?1$3ed%sM_&b03weD=#tH+4mC7WYJJu}BtC{4473k#XaSH?I!CpRbE zxDWjS%M>=nQCEjo$*wY90ktyB`(>|N3_%(>t%b#gRK zE-Gv4B0MKpJx3u*xf~kstcpnJX9To~vqrK7-B+2$56_|QFyfxKAgh0t0(U5i>N;pD z79g1ANBc?q8Ezg7NE@N$JSZe7JeHDa{b0OlCRgO9!$wL(eU3(uZs5;5hVF7v$pX$~ zMJ$S(SQ@4I!KG{5o2+%@CeuRZ$Yx8?T9UDn&ep?HZd0j0PqjT)`qviDm!k_rCKm=x z)!-6&Fl$8r%1y@x_niJ6w(RWeD1I!*KSPx(Xs}%B83OB0FnOKc9czCV2v=}z2-PjC zLzm{?$|qK5ki(||9&HQrV!l-#T+RL2FzYX^R!>lw?4IsGo8|I2O@63BXx`e&2Gh5L ze+^fT1o zRHVxui(%3+zk#-+5i6|*6jFb#Ri`Kn5mrl0Y_LPkdH(5+Jim8Kn(oY%% zZ}tn&RfOx~>^4xUR0X4%3yN#QJ< z|J2cyT1t1MIRdLhQKsm@4Q5N%>Ug-O8wwc>x;F%Y<+OL8kU z$&H?HnIg}wbi{}`)QLtgmr`2WjA$tYdzNYJFn!PG3@Qy7FPDuD+STsIn&}yVndF;W zN{5u`Dw+^|hNRJ(8yxG~0Ff?39~POGVVYs{u7%@rcj9L%F#=me zS%#vh+PwJ7S3yxbS@GM~t|eETfmPEByJLsOPviMoj#lTKGoO_Oy^Lyp zxr`K&XJM0MFlT4pk28x5DSkUjeL!NlV4)qq3u;2Gbe?P#e!lm?c#&%OdQyJ2tbwvX z`eT9HgW|+C-)@R~xQ!`ujb}-do<9X3aw6(z<+Wr{&op^&q88 zUQaZ{cKwczs!loU-E4K}I!$t> zT!hufc>!MJ{1!ZTkV}H+P*_FUjz)kZt!t18Cn5mO(PLFdg4`^O# zyR5Y03n^uob<2Nv!8e0W;>?SN>#ZOE&c7tcxvPP}-Gb$uB)YiPWsXSvJF-)+Bn$QAmgLeD#lugyz9 z=OIDJo*{CHH^+5)YM7}g+EPe>>L}T-wCOtX63}i zuyDrEjDq=`$rafKCEfW`v9(6b(bUyORMHX2v~8^A{Jg`EbL?yS;DMzzyk3(-Rjx6z z00$EXRn?EHG}`w}E0-1c5;RP0k2w`qvqkoOre{OHza*a$TX~`RaOLpR!8~|!!u7~u zy&;D>Wvq9cA*y4W*=GVfm1$A#el#)&ydbwEXwK*14q3!YjAILiq5ZNI)uEzGid#~e4gW0JF8nd_?aZ2>S&@8Fi0g)WZVY9vQrz(;-AY~ zbs^pyfXW3<$p~0$Ff;rEB(kgiP{vQr?NA&1%3P!|X31%EXIn;PjIfl=tHWcntJ6xUhY!B`;`YJuFk0lp>gx~#|pP134 z0EY#9v&Zs%4F0Ga?5gEgNTAE;uq`q2-DXDH*dR0U`+|-?S1p2Pep|~|9bE0L}nxwPCZVK8vEO- z?v#(ac6$_}p}cOgB&H+ge+pS!Pt9Hfl?Hb#{4LGr1=aggvh1H7I(%~Y6`E2R1}W2l z##{^80&SB*zorlAO1qrBC@ZtyhMSD|*d|A8Xj-yJK4G%hN}U#^)#&)38m>+|OzYfK zk>9WP$33!Lun4)Pc^{qisJhe(Kq&is&W4hSh(Uu&Xwx$7@f*J6S~aGO4PC?bm!QpO zrKq^E$yT0=F3gWgB@uI~op~nT-sGL%>fi5wY?ZM~aqZM5q&Gyy$1~LuQOZeLgiDr) zEP{%w^4}AOLKK-SNx`-6w~A7jQ%5K72DqlF!63Pq!-=D*n=_Eoyd5$2$(ng zYAWm_AI5d{_yLj{_Gs>KqG;&v$i6RIE#^oKnQA@Pr)(R$xsz4KVVYQ}kLHIE==1X9 z@n|nQZ9~mQ3avBVcLHZMGHYi!Uh*TZ#Hy5)b$dX~CblLVb?%%@W`rjCQR>MeX{%me zAjp|wwd)7CR)I#ex^}?MmZQeXA-66=+U9KF6?l}r5-IEAmx93Za-9*JW>eI<7+c4J;3#J$sTf-)fvEHdN49i#idcm zHk5JVk#sPamgcQY;CShS(Vxw&pK*`;{{eoc{{#GNf9eXaulz*+>ATwB*2c=?Z#Go3 zlIY(J7U4IoX$}>00QS*gk?_-A_@a8mt-2-OnjVUMnXAf(SVEws_D`9 z7ZUl+tVGeVOt!i~mPDXlwW!J)wgfT8g`I665W3Y#pRM!WK8Idm2Hk-SY+DO4r(o~IpR3fDZ+It zxjr9aY$%I5BsHdsvoiWqRxtrCaqvB&)ZRy7;3LD7fJ{-&ECo!UgbG{7J8&p9%}oqy>EW;`co$Ebl%wbdsdA9-Jgh;zG6QcM2LUc?Tp>um~y#@YeXww)#EnaVr> zO#=2QgFfDg7jJu5Ct6=7=Lc* z^_t||V!jyPRWe7$eN>}p(_qaDEwQGgter4O`a)2U-)(%%+tT#GUUIZ#B9W9didl{c zUr}kiC+~kvF4GN>2^ zK>GUqEgk*Fg!k+IpT}ZQ{O7|Vq;K&l#a!Xs4mJ`>t(O+2@W>z-m zac-r04vXH{CD|>yA479=(Gj9HNQoQCI;DjlW2`#L_9xjkm~~oR|5_&pojtH*s09m6 zRmxL{m1!<0mSF}kSsY;jsBACtA;awjFS4(>iBuS=m+3QR)%@y1v&(W0zZOw!CBT9l zm#Z$YbRo>LSVZ)|pyy)fY~v?YPhZMgw3~R)M;GU(9|P0RfoHusu|I3D6TgjnWr10P z2s4Q!Mj|AG;%U;a5zaT?Wcm@1>ec@qzW6p_7-lF2nbQe4-L8ZS{}BO%vY2S~u;a9m z{QUltF@tav2C?#A)CY*47@gruAv8O$nlA5vt1e%XwcUl{EIrN}j$Jx5igiOKF>I@_ z_h^K`K(2@0as#=uqdWC{1H8k(AJQHef%+~TDe>IS6F+OmjsCI|weEcV73vAA{{X9D z$_5V))^B1?fM8G4ESyX=RaUId3Ok^1clX~4p?Uw0OTN?7o_~8~`F{Z05Y;V;i_PCV zV!;_58cD@6E1aOm0cooH&gs+Kc0?8Z`{D45TSM%+p$yR^MtTsziknTlw0f&`VfrEC z_7b%ndRG(XVWoh5CU6MG5s=8B>$2HNsCQ}7+-Slav^W-I@T|vN_m%5vH8#PvjYz*^ z0g-1QAUY4u&Xc)J$#UObgV~s+5cObl={Oy%)scV5H_f_Spe5U+f5?G3Ve{)`nkJdt zF##6zIM*qw*nE~&Lg&f6K>+$wtj%})sQ2GkS$^2^(%sk4-2VgJKd4jX4K7#W(tvE4 zWZ7i_$cl<8{Lqh2dxZ)KZ&IiAnGoQjzYLxM7lmlYE*G z-PJkHqe!SES6DQu9U6ByrhFb(SyBQK#wlLr6vgOUlT&Q7*6!-+QULQM1!3`eOmtu*Z=QP$do-!Nc|_-xs;_->VC~C zZO&SK@t3L>p^JNcN>e@jkh(|pb4Re+1VVx$&IN)(ReIV7H2kakQMH*@4%Ud9C`5P7 zp@%$74Qe?o3-nYsIG}nlVg3hbG_w7wmfgqdQ6RuM$l!v@f_Q(CGv_5*D0Fl-*Y_;T zDF$Vd+iP%_$h>lM?m*Ach2v}v4k6m1Y2n-XnIu(mbRKe1?Qf>QGa{-)j8#NX~$ck3iU{{B$NMTW%|k@W340uubFZezAp?d`UxwE+$z*hJnt(877;x zVzINk?S_4GpHK=nUiji=0NkkL_zye$rPXj+c?@yVZNNEU`of_>>r`-ZPw|0WSTzRY z2+wSk7(u=!I7~rz%=lZ{w5^O&8OuZS)YRAsRiy1^9pKdlhqbWwuHEow*lP$Q@j&Z7 zE~91HG%!<(8>N87Zwb%Xow0m{M*_`uyz`w~J65qf7jJy$KLB9D)nZ`X`_?O`n?ulJ zNVL^4se~o2AU&iXG;(*ep;A|tSk@hhE~Rsg!=@aJ<)2b!F0Zfmx)tI60I<1ozF4fA zF=5W}W(M9E&SxFz%L$NGv&%{lo@_V#uX6Df_Vp*z%L+iM(w*(VusN7^G%(WpBLvh%I~gy#TB@KU3akLD&KSk#Fk!gs&2}F{L;vAJ;wVxr$o7Dxj2n#yzZ6c_u z`L)uMB;yg8@osQ&UIU21+CQUXK9YZ0!tE}F4&pxg*;F`UHkH|KLq_$9fXw{r-a6{2 zv}|QwC#5(l?<1dFXpg?x-{xC|L#&L4m6EDF2x4O;qNWP?rnB_WXtXnT5iPD{75?Y< zuYSICe@DZcb;6IW&SSumqrcsr7Rt!Wi*M%0Xd5z-B<%=^vM_39(uzPF+91TM0%;y+ zm`WWMsSzhOWm8p4Bs-+n7Py1ndKs{w_E~AoD+?uF6bonZ_KEoekwN>`+-N?5_V(Q8@Y!@H>s_K{|dxH12>GLtO`396&! zP;x`mz))29)}?NnzO%RB0Ow=@Sb? zf~*)eFL-d>#YpNQP53JJEvY&!!QG_NJu!6z68I-KTCxn(+5X;@oOoktIyot~ZsYcl zL(|8%K{ETQ%sWv_RG1>XB5_%F4~lOmwDcI3Vf;c z{)t_~MfNbsH>*a;#7{e-Hb!>dt{6o_y!Fv8YZ;R;rL;Aq=7_0z2PLI-6AF07~YDm}h&g&r$ljhL&&ed~~a(nDguekrvRw5 zK4x5>R@C4l+c2tC=C)@zJg^kJcWk`{LnrDnNN~1lY`CHh!OZB^NO+&WE!*?bEV&r{ zE=E<-iKzj*!d5sMfS4TqS?UakAIF3_q=<9GviNmni}HpTCw~2;v%+VK>_LR0(nd+6 z&?rQcg#m(b(7xhoo16x>7;+ehrCLmZ+CDM{+LcV;VH&^({1tO!!W`P2^J<>4%<>5s z{U_Nn+w&4ptmO8nCejuvy~jl3bY*5|=WdjZ7b$@@?7RA`P9T?Vro582Yhi9+0H+&%%4i$IEchyG_R-k^9 zR%8scKl2$1K^3r)|A3FPF%wGaoD@bNc$vX>c)?3*T)EBfU{kGf@H~qoWIs?kpPiv& zRCSpmgxWsOW!Fcb?=rx;wvwKv#mx5Ed@#N1MQ>8`25!;ZSElFQt-{e=Dc!mZr*YUc zOM2iAb7FRc7JlJ2lq}@88FSK?kk^d(%r%m-Os9gWg-2_~^2re+D9G^fulwi&7eDk? zTueZE`j9nf)mn`d$5mv5^X-GTWc!qhjkPh5WDpT5|uW(Rwa8UNPpg@%tI(tK20<{2I0vi3_0tLO|a)tH1fM-W5|9zKp z8Q7NXuP_B_zGz1rAEKn@`Zg{{<58A9t$O{U(dr`@h(}}f%#|vXLP7Ffbo#PfZ zY4+Yl`*~OZDCRA{Bu-&#sxo53P!tx9ibtyGb==<22OWZI_nI9k;mMHK7--$i??X)m z>DMB0j|CE;L@!EJdQd`7-DqoTzM0V!c*7|H{*o<+pT?$d_16`-S4By?_+=~{D~@mY z3Gqaw_XzV+SF1@ZyqeagDeuMrgJFt*#aM+Em4l3!7!SY?oeY-xMDcAWdg#fN?%;u& zsabHwKmF|Cnb<^J)+9Y8rQMAwc@zfqDla24xS=O929gEyE5026uUpwxQl-9A={*(f zF@QN5yUN`CQ4nhzYeX{#d!|BF68hxvv7$mFsU7P&c25U4)vOBpOk^rnEIv39J6v364ub8yRfsuR!ladN`@VaW$sVNw4 ze)f;JPpYk-W8gV5Xyo)ABS7EsQ8HP;%4#h5s~^!hFC-w6w$lN0Vfj9;#;ki7^N;}X zAi`=?+-OW(5JkUF6I5EAfXz$+HZ_TBF1m)zf84Sxrza2c?+H<8t z_JR<4aL|STr3%eX7cz~<*GFSJH1Gsk>_9?JZ!y}4-QKA-Zn39SV@ThG{N`rjtS9OO z+&gu#MDO?TrI6sC$5(UY|7f7G{{iH+FUvb-#9XOt z#>s)pyQyhkCPc`@le+552kF}CAVcn3KrJKl*Z35d#8|z+c!1>3Y4i=BUGbQi#jHtC zB4NVUtulhiHX>C9P_2kpV%hJ(4ZC{piiqH22|0MoKfo04xU^-ehSd4xPLtBeTE;ZC z&GsUX53v}doTjQ_<%Clqlbz%iF*!)7DIt5hJM(r z^~ZIx&*G=-b1k^&z~2PaOPX}vhayuchy|j^;p15dsib1>1Dd_iL$m+aNV=ghmbtpx zaWNY5x`-&AY>Stj=8RaN%>71j$DzG1P}S&C=yzQ+jS5^`J!%fji@{~Z!NKnPkSx1- z08a{5pCwMKz9kmodY8_#-q>%)FjL)l4=JZhSqND5KQwOlDo3r8aM+H=(-DQd&`5;> z`35Y&Gt66z?%B7W(yGWCAXG|+qb>dWzvg6<$W9}WByPb;l<9k_h)DC2{*B>ZxOULQ z(%`~VE*57ZRg3>$0LlY3{3c3tTvTbA&w*iaTQ?+)e3yG)mva0?#$1OIicXuZil!)- z%J=^O`2PT7^6mBS8O?7*$Fu^cpK=XKiHM%gPe7p`eHjBcMsrSxoUOvb11R@M zuqtiTQ`Qwl%7hY|*#{AP&M)m!_aAE}$ZB?7wK=QNQJBnF#}~{%Mpn;j2DQL#8#Y!l z$|SqUg2$?=W)9;Iq-*-=ZoSzxmr1gJwGVe>TINc_8oKFhzRj|6LU9YK*6V!XWKza; z7geTB-5FNR5OX}?;aH&QGk>Vx3{j=_$;oy}{{XkPV_GlN70*aZB)8P&*A-(>q}b`G zSIqtvjArIdx)LWvy6Y>-NV!!)iA(L9%evvXYGYjYTK2F}qp{YSwT{JejCPqDmE0;; z7IJk-%H*C8D$UWT z&P%_`OS{Wckmt0%k4L!J{My^}hW9bQAb%Ki%jTw^p8MRtw|=XoJ2~#*w{LGf-QH0+ z?c>LueH|Om^E}@U{{T0;%=_Kl{{Rco_j(W!67m8a#pAdHLx2+V$aesbhu`ZHFMG&! zszseUV}fDSEQn)A(+)CXNG$3;M;RxO;bmn4?;3)K^@i}LO!6#4$#O#?0oznU;ha(? zUka7XD8#js4>`+8W5k;hLin9k{N-G3?kdfl99ErC0~wR3u`GOnOw6SK!6A-wmMt;d z$=*`da%04G!IFoOaErH@VJ&)IuTmj}pdZAb!{Y~0^I@|LsoSJ;9t6dlgzZ#`50<)x=uCX=g#Q4C3TE`k zsnL$r*0eQtVcUZ+zBWGsc~ZoaGx~WwX8;46L3103R0c zq^w@BEq*RlB$L)Ssm0MJboA^l@(>uV(1T6+f%>xEbM}H)!l{NWCRyJRqn%U8H)7Y+ zGc9P4%jspd4B{HvMVPv&9_ar7A~AB@6&OP*mgtOLdk#6o%SW_7} z2MG`gtHl1yr2bvTh>@1c%_71PBhn9RdUo?w;V#H15_&Ai<%r;K71JaEAnU*T&u5{dH#M z%$d38%sKbnb=Q6Gt@pEnsm#a2Wsq+#|SOF2HXW96SQzqsK_dPf$?d00_U{gZ=*X?jLSq0pQ^f z5a1CJ9z8-tgbm{by9YqTeuVR!RpjwgrT0iL>~Pt9qtcNnMN5C+DGwh~u^Za^Jwd@I zAS5EDen~@1N6*2@#m&RZC-z2MLQ+avMnzRkT|-k#+sN3&)Xdz%(!tTm*~Qh(-Ty;C z;Kxrv!I0>f*tq!535gk*S=nE6a`W=b$}1|Xs%vWNT3XxMJ370%dqzgb#wRAHrlCvA zE30ek8=G6(N5?0pXXh7}SJ&7uk>L>$5fG7niVO$u0(&7~BR+c0`WQz<3F*Dv(-&;M z$he|W>7_rOP_io@;u+cxqu^6H;|8p|?{g;IPUFP3Ke)tZ+Mg~m6t0KU` zq{oIsz=nH(0?-iPU>^{$0m6Vvhs-(Gh_-5NP+b@w z9|NQ%wj}t>FTBkWD|ppae!00b^LFcvnbyS%b`jsw%AH0pREvPUjU_fJvg(K5h74=NuWty>cTgn} z85pws^leA*6hn~&4v~t1JnHAg%qhZl3yJbQ6C>veD$+ye^fK8B^AfYAVNjQeNih4T zmun(418{DR2+DOzL7wvc9<<)a3H*Zm)5FXSDfC(mvNgUp0I^2be+z2hDe<3oGLBCKrzuTrNJQv0N}Y=e|Hr z*edHHgbUft6ZBD~rpeLCBd%29Y`&;3bF){4HRM-h)#N&l_jk@{?@%Vl8|Yk`cT*=|z3I`qz& zRbcwZOr6|x$83|sR(S3C&YljGtGV8$tL3<{arMUO#y2|!W6}aQyib>+YxzUQ`I@)S zY}{?{n9bj;lNk%Md-4CzHvUigoWXx|@TBYXIJ2@KJzvPkoCxE0ldM>KyFt544FMb( zYbc%UanLA#HDc;f#KyCumqBhz9t*oSd1767q!kL!b%pFM{E{PbhK41wfMQSyt~qMs zs#BD)?2l*3PJpepLk1~c0dW+}1Q`!$K z8~3=6&-PX&f5)rE^k1-r#{Wm$Fmy}F5jcF@2lfPidH~q-Efzu`qZxPd*XYNM?)RU( ziVn`fVV?W`H{y{i;Z#OMw~u$zvT4XYX;|*~752x5Wux&I?gVa61rt471GWxxpD|7x z&tveM5Z??CA=Kqk?I4LeF*~F)1Ejek^Mn?)cYdN7UWHmc0LpGB zBFwMm%23&j>{@QWm;syzIXm)lhr>*~!9eRe%F;3Gid=pVVH5Afa*`I)HI7D#8}xKp z&ZxjF!VVIu#gB6X%eTx6Y1F(kIvQ)|v5n2_vGL`Vdl$}Z>3w-4=cM`E-XrHt*H|sq zpC(jVJE$kZbK9$}FhIv?E`5t7^MlJm=THikeeY5i$!W*N+R9z({+ok>nHdlLA|M(6 zZFklUeo@gj9~z_2HnF?cm-etUU}$6~W!Ms_^}fvQY_POU3bE5_TCIxgT$9En^cuk~ zPdgjA@?f;0D5t58ImuFgtb{3Ue#gMy97GSjkXLN*S(ci>VeJxx>wMkjWr!WmVY~kT zXoz{akY9gAEU%=L`W)O_Gmxn5=&m!?8^HYaFgI92kB_k8*6q`foDC1XkOrb#p6%3+ znPh&7gqh&s8b&dQ?nP3W=dj)6@ty-uo|l$*ldQ^|eNC@?W17Qy-L=uWs1FK&7VriB zU%X)?9^4C699&hb*-0Oue07q05+n(BihRZ&{kB&oPn+!4Sbf+(g`JYfHRr~@Sb5fF6R^CCwwcZklcSo5 zQ<#93c8hC7N3c{ljs`&I_q?vj5mw$z;G79pPiybCe1;nFysd&e{+3>baiIT7swBO& zPB-lRJxSw!v!Fssn}@#BS%`xc$YyAjA$RRx>duhQqxtfB6udJ(`T&rW)-AoXbRRqp znq&!1%M#`#PdbupsY_YUds&1?vb!AkV#FrY$kfF%yEWOYEcyJbgaJuEneK*2*sIpU zY!U=A8tqWbqfdphUMPpxIbO}~QpNmO){{^J#0;&xBZRNjFeO6D?b;feq^`{!iEtm$ zcAVO?iEEUoQ&Mj(ptXQz8ax16IP)zrzOFmiN6?`Jgh(uDtO3y5(9q!ODq?tr%4Jfn zG??N_@~=L3cs>BsiqwkD`dzB0oweQW`Z^~nJmwf$FP!ICs3+9Tm1)pc`acuutlfmp zzCtu3?Ap^F%|nSzr#bA1P2&og3olUNi`kG5T{@pron)xj9T_?26tFe*BHm)`vjI~~ zYiP0>vnpXxBG9aLlQd7y@)@hU0_nePtgl|&h24Sm13$;j(dF8o6dM3R=XZQ*+;I2ilwN_oD?Kan z6`DQ!r*O>SFk9u$ksRbvOO4=ici?5p*(*Ftd9@cS7W~;ISM1#PxD(aMSLX?5Cnkd% zYwFEVTqdB*eAEMAHMF_B;9!4Y`~h(M0^E`~(Ro{4zEN^uvaxehY<@qS{uMF`syMyy zzu~LC@P_SNyFwlSC_o)j*kBKU;&QVGfVk3Kx)@M)WEeKu$GbEGkIt$Cn_EPYLo;mz zc#l_th-D9eqkJ%wL%8vRMvW?{g6092UI-=E!VJpj5< zZ@8-F*P6h$pVAJ$a;9DFnLGf%)DM6a4zT&~9!s6@ZDeyf&?RDl<(F}Q!Y9z)`+M03 zKnF}enNR~q@YS>X{-Swd&*m$v2S5O8;R9e0$Z~VV@&F*pypf5p@cOA5{~Iot24RbP z4pX3!)&n4Q!rSQWd+?SL_qU}5vYYV)mW#2&q1 z_D|&rmjl5r2QX2=E%6iJjmr4D+xQ1Sn9^OQB{EXOPE9al+@twl5$F(hfy5dQ`|;^O(*U*R$$KX z@s=*k?A}8QywDB4?5BWofSX~q5xpNMx*P0-S??)q9c996zsdRQ91nn>f>KP6H`o3& zjlrb~@!;(_%t@LdFh@xgt}ut`|L_-$!F*1b;-}VScmA&R-20Kty9Yq)0{}wtQ(?a@ z1+&3laO@EksZ{_%K4F{+LVFyh*M> zF}eHMRkX9r{pM{BB^jo64^aSZt)8rwbWz>jSgpyKNv|1&Go9dt+?98r+a+t!(`e2F zp|8@?4}f8{{62Fg0ye!0gBv@Tu~KiFe=33h?@VF}V-jT;9D&;EdoYvz5wtL~4^(y@ z>|So&OuJrt?*zk)3ikNjrj9cjFyHZo%}4(B-1q@7DGk0HhdB?w?7jiaxy;n>4}Jmm zpY3k(05Bi^S1t^L!M}517})+RW`_|a)2~p3(F?us{H^dWkpI=kVJ!AX2hRVy0|(rt z{O&v1XMbXrzkw0+CoBA;kN;$ryMv!Dj`fhn|XYcxLtq%Yg zZ8#kLQ(pPki-Dmr7j`!J)pEZ}`u0y82t(t)lT_wcNq@4}|CkuqFwXyjoUp+0o16r{ zkw#Evt|#UA{%sfpiuq!mM~6*{txFeD^Xcfs?)!meCxwi_r1E%G)u@dy^E#w=yJy1g z!Z2QM)aJ^AE$EWFn7kVQ=C1?^}~qo4cJU$X6_gIrMaamvbpz|z6KZl zWN63C8;Os<27-^U;MVXb6ir0~`^BTm+o>A6)wEg`*sgIHQzExl2m+&sN{WgR$)FYleD2*<2TvjGd`@Y$T@ zU~8O%rF+N~*F<;i0%Ec)K#jDV9V}j}P9bE@YLa%YjR7<91H?AlD5jl{yQcB?cst}&zvY(ch?+aEAQ*$2MTYSi#v%{gg?1FvG}UL1-2YDtQp@c zDi)fydd8+xcVLLPy-;WTykQI~DG*%KGO4V69JzW5lmoL@!ghI4nVkNXJ!V{i8@qA4 z&+I45pH`}OKi0S1i&aO-mlE{lB=28{mxbO3&=u(BIUK2omX(L!uN6NFm#r#wgv^U4 zqQ)xkk+somga-T|#H^>Zc92t!y;7@cj_(r8s63-{o#|w^n8%K8gVKLq6_Wm>Cn^Rw z`IO}OW|h>qSRp+J+2a<1o4r3rG*5bzTA5?Hmws8H6-DiDsi+SP3}<;Md{#g&Ot1Ku z`kTz$=7mZ7aq-!eTj0?PEl)0JmH&A?5zcluVmw!q`eiW*f96w$&KGajEO!(x9nD?Kmy_|`@ze9oWjT8_rK+~EH z2@&qoy009KtJ%iE4#SlfNi@s)X=WB?Osm`(``7rfAe;e-x@jyS$36JfEF!ecU2n&$ z7;Kf@IjH(*N!t4x*%vw`!Ng=W<;U8~CpYRuG!^(&$8SdpB{6&nHgMnx@k|KmXFj!4 z%WKc~%q)o9$Q60X$BQ0OM|?V68$aDxuv+S3Ml*Tqh5>op(V>iNb;!3GZb_=Emms5| zAsKheb{UlCT2#z@CQ&KJi!GV>ba%_fbtLu%3nDF5CnP_O)`M&Pt{^#hKARJjx_KeT zfwsHRolH2SLCA7s)`{=iy<;cH8NnlJHnZms^eP*H>`B;7&fg>Ba-M~QK zUpd;wM&T04=5Vr|AeFwNsiyh(0%{rWYdL(GB zX)S^jShw)8Xl;yRlhLHCtlA?aur5(GD%dFyH_W*pcFowCUV5gQr^iojx}H1Zn_SIy zc*&cwNv&^HV`{|I^?r11HW!gHzUQgQ#dfK8UIkpWI(lLF`RggtBCj6-Z~=;NuhDB$ z`OTpgnpAcYdehfcHWqm0U4K@19cqzl-gKSTG*MO zqR28@n?@3>*8*7H$Hfx5UzV(`(~0sxLwnci5+74;RgRbrJ_ra36@0xiT&H^`exl-T zD6K}q5S5BadZeQg90T!iBT}_y5KB9}2;&Un)`%88a+x|ge>|4nScw+-8EaWB${?2R z#KE^`CD~ttR$_MS0gwny=#UfAa^}-}x~v{lEW6rvG82t3_h!;o+B$khGUWn`!{^Bh z#V0iW75E)+ZG85@TaAsvIOA^y4B8$5IFO50?YNw!gUXiksvV}r)v7R390Weey2mLp zGQ98XQ+PYwM>zJVvOz8eqtpXim3Q7qVi~vMBl}isaTW>jZz}?IZW_}zyBRt{ho2C9 zOru#3=HQp^5c&RUtKw9r+L%-5j+?ak>8YKW3z%(ghB=8;MFH)65dC|g^Aoi0rz@WJ zPmjOZ==y$VnC{s!KNzSc%Pw6=PXhWOb2DMck!T$~Hr59jmZs)56lfb=+V^Vxz20BSB3 z!f_-9or)bW5!(1+!x7r_L7(h0{?UAJ@q+mnC#oPbL;Ym6e_z;3S6}2cuH1y6`Qc|* zitMixb1+oTCc1TFxvj6!4s+V2iaq%)3U-2Q<6T%*>BY^j8d4I@LkP#B*#Z=aVgfLb zUnXyPO!y5W(wOhmHgdPiJw_$0#CN;RD-rr`geASv?u_@jm4jjj&qrBEahmGU08rS3 zp`xhrPRa9JW731FooGqgWH;KJ67ysth*Kxh9^pkdxJn zS1d5E$=Z;{(_E>;@;Jk(O=P__!Av8 z3pKv0gsw60(u<2vs{x7qqcLiy4**JWjt4-=B@*khO%R>eQjx66;RSTbL&qs@{#ZvM z{0UqN#1JS0_ta(s=W+oH{_6{Em&EtM3M`(b9HL?sT$}zr z`U2R20OM1#0sD}^@X7mzUgh-}o1L!BZS;T4K9>?GbX0|RD9>)(N`KLI&1msrP*gsj zq{nMXDSRcyz~B(54(I!0^_|}nM0>j=U_FlRu=F{bC*EQT|95qY={n8Gju9ebH%$(R z)N8#yH8oc?H8@oO(%bc~Dh@6t9rW~FJNf9Cyq5usGY9x!v;MDo;zkbHdLcFQnxXl6 z5>Ho=kkI|a;E`{Emi9_@mRp%9QTCN}q@A~PD{k2aGus-24h7qjSaY)8FHrKBYWq+k zHy%^3vaK4EbL}c=>@kkY&S`5#nB{!FS@>|`8Yn2^<$Odn=A^losRep(zEq9>K|tIH zBI$<87Z~pVkPrp!NUJKXDXA+pT{n8RMpf*ed5cHjcnXX+nB_pHfRoM+UXXK=z+p$!6BFf02GzdIc6E|A=CAm3&JR?auh0u4csz`;wMiSu`(;F{zB9zOGUeY-yq4$nprY)2bMNPTv|WmN?+0gwkA6Jv{i8+g^(46)xeQv5bCWU-uSD(B!kWIwv1eA^L8I^Wy78`Z6`KV;!O_c|3e+<_F^!5b&LSn2qSKkQN|6wr<$6O5 zbvkQ;G&*n{Dm>sj5+eC9ZB4t)g;&d}OZ_xh?};bfA`6bI^1ed3rE}VJJokxyKj>W4 z*p3~2liDhlT215GB9>GRU5Bh%zPf0racYFZPTv#^+$*fhNbMmPz7->xP}< z_O$BfRx0D~4cNqG`_`JLZKE;o3$#zGMc0$7rVdW0_6@6oSl%+(76hT!ao8FgV=|GY z#ELEg`sS|D)Y&Rer$)H94YGa@IdFJu6)usL4cp{&8Rn}h!%;ufRd#!%ZY|d`<(ws? z%_kFnxRNZzMI2rgQ^-7JpNneTuw7Gf zCEFC9^saz#K*Uefq0*@YIDGL+Yf@$Q1^xr9d)g}TRMDmm5o~3=g4|0^{j>4$sWi=e zv1?}`7P&)h&%)=YI|$#T%%)1yBk8V|7LkePyeMnz8xu~d{8Vue!2MJjFll2{?=L@^ zGs-wJ|Je5fdxxQgnJMJ;$fxje6nHcA@LKDfJ@=BMe* zZRbPl4-6HN25nfJeQ!H0!qvqsD2*VtaD@Hb%6l^N$Ka3e&YkNvVNq@8`VH1CMUvJSk4Ym(TAYP{b&;cg7vWOZ03hf$~NIVZ1A1Pu=) zJ;Lx#Z5R2cNkm>^480Xnzjc~PwVS+}6g*o$xsw|ytBzW0!oO6>-5TBMFW#mDTck61 z1x^qOWNA>68yCuNArBHEMn`-`^B<-U%IPVbU&sDBaq!-g`_Nq&-4f{XR7(Mb-WLC= z;hVgTk=d*gVu*d!nxQ>1n9>6vLMcF_k)WeH$CV0V6y@abBvI;ieARo&2&traeVZCclex1t#7o{3I(GQ6Wmrx&7L7;>EoZz`WR$gh*Zvk2fpd~8fdOwGgSBv8v7S+SF(+u0PA?o{c=Ysw}ny`~v z%MG@pjZr3d34{ zE!_c>siWp~I$fUX^rbJyQIh0sb}hxmtR!P8$a^tJTl7rYj7Qd$2oEiKq8|=QUb;_W zWT@0uU2an>Azl)fBS#FnO^abSgkhA?l;q0D(m(J!zwU0tH{DJ&8}TxZ16B>YsX z+@Sb4d~s^WU!~>MY~@84y?|1(5Z%5WyLEL*>aVveG6lD|yOeB0_vVRUVx|9JOlDMr#^#bY>a^Nh~a`Mr{#XzxNVi2-z{;gDlU_ zuaQ_vm9Z^S-JeobGG)O?l#u$|PL<24+>Xf4hgyfwy_R;&t>)-tDmklPC;;Vn& z=YS+^b0Mx<=T6CUc1EJ8smRWtJ;~_FYdLwHTmS-*&{u>K`7r-be2*d z`N}|qPEU6_kF3Sd@9))bBr@7RECvRu2=9gch#hU&oJ+8>hJnG5nT%hHZvx+Zt;tfa zTWf4C7v^8@l?APM$Cw>n-l7+82)5$ zTbf(zqbb(SzCyR}JRDbgB~!ZH{6Cj5LQoU^{JCUQX*V=j>l=SEg0IXR)`1as>-AYq zP~r1&$K!sGJ*tGBGZz9LuS&9gP9v{)`}8SJ?1*VUEQRq$bNt?Pf{N{U3zXVBVU63s zTe&>5aO7`0JYvWr{1~a~b0={`!~-uzzCF%N60W5P`)olznT0AAEtQtCZ zE$p#wxOf#3ILG5}xhT^XH*mM+0{LN=I;OU^ej((Ynd9NnnXmqMBjDD&D)o4yb>h67 zGkIitGisLpqq$zpqbQ|M>a^bPo&Z*goRo*98;eSNH8Lbof=VteZ`%C^w&heb{5lm( zYF}l@sMVTEskrY4zL;UH=NIX4EfSoj)zjv7C)yzgIhE1MI_2&N;ZDX{Pbya4;3bC; z%+NHa618ofl*@8f)=Q9KCoEnye0>YZ#I+#mlofQUY*uH|_G+}5tt{A%k3ZQ4ndQ#6 zf{d057eHWiJ94A(w^u-n=_Vz zhl?A2!58Y&Er*w<1SF&D0RYtx@2mF20pU7~(uG;^3}rGSWv84Wp1pc24x1=ze1CAJ zzwZ8_-!@!?_AvGob^G4J>dcO^r43~ZyB}xLtR})Ib!W7Q7^Y|H1tb?MOG1kh9{~3_ z>ou>+St6N}`LAkiI5T(+pHY$_hbFrj9jI5jajaG+qzHkmJ=z_}kiu`>i4MJnwr+M5 z?r7z~7?s14D-xVsqt{c*MW;n(B1c~FP$#5`;u*0<$Yg&YkMk>)wbl=YWU@Z?ZRB>H z+QXH!6|wPbwE@W{$EF%SoFE&W1c=81pM~SiQLa|Mi@@Y#8>SD8RrcELctO%cD#rF~ z(nP{;zMLvb5%P8XI%i`9>^s{f*F>pUb+t_)Ep<$dYjfER=XF^c7C`ovd1E%|XOf+> zPFLRV5OQr|11-9BQq>JiCr^+U=AYL}KsZF=)B~D&N*HQCx4*;>&dsOIU&-B3qV2K&%kIHrhzje`^6n8?* zEdQLLny}4yrI(Zws1YqMtJ{;tIXCE9Y?OPn5Mglqa^5In9tz%4RcoXOgZ1~ke6O-} zU!NIKHp1zJ+L7qmFh-$W%hqR<68hI3XTNpM>1OJvlAU|B9X10Xx`nidRPlV zCeru1rurgX!u=Aocy_IAy4)$4zN6!hv$3w6S*#+b30J4`DPszwAl{}(7?!L$PU_Cp zD_)ZL3p*ugM#3%J+yujy8WPnFbrsdAld8dcOs1|np7MK@M;BE3LRL1qw^?Q!t4&-Z z!vzL?b~g;5eir2I>#~x-Gu}#)oghv!ne7(WuL-G-t3TB}V}18H5;SWH8hLt=1b|}; z^;$E@P_V_-gnCLDZ&r^MO6ATt*gIvzrOdUhowreeB4i7 z`y&+7!rA$!ez-XMqVESHTEy(}-6eSqSV5?!=<(G+~CY6efH=uQD;d3iSq9Uwx22Vzpxy96r;aV zy&T`XY^}rEOv`c8aC8-?DJ)nc_(q)_Xg69lG%MrBdxO*0gwebie;mD(%~c-X%_@g3 z5+UXv@#86LzOb^D?E6faTtIH;M0dl_AB~cHO(k$6|r5OLR5{(R5e%_oG(JJ(l|%9?oIDYk6Jpq zndFjLyDrobl-@fz#sxDKCnAx z>Zd?;wZwd)`7+jm@kFO(!0<+_puxOP^G#uHLfDh%2-YT~);xJJ$#e4o?e+zf8F`E_ zwETS|<_m)Y+CRt7&$vW-*W{P~Hy(TG`&YpETmBuO2%y91ltKU?cQN>3ielJfmKITu zj-DA8H#MjGXIuhh4;LdX)W1!K1TL^Zd|j#y6=)D>Ed49(Z6C|g&@kxja^jJN^*{y` zI#P)h@V6qe`Uuyjg0e30o*(-hA;a{xCpKS(yBjp}AJwle;?-5x2)2`5H@+f$y)UJD}32%@n#dGa_` zi#Y^n4cfJR0GynAz*-QC1n%d~w3!>A%`PfzD@!p1*t(*_y;Ub_dWLeWzFQA>V<0Eu=0I=j6fR|icwuD0Fyxb}y>h1N6mfEzt$I#pC zBld#!teHLQb34cfKVaEZfeL7{k|Olhh0v*$m8wGK>a;H?N?Yrph2!UqOwDoUsSV9G z@m{5|yuHdwU}y6h6fQUlW^sle@J z`A3E5?nce=y$r<+UO*MUKP~)XxUA2+DQ;rR!uV;OS;0UGFi>Sp1_}gM*SmMzNNl@L z6WH8SNZL<~Om#BHa>kZTw=<`UXIA1CU!`p}Ry1Nvm~~O!iw#EckmPtW0ypOOk}rnq-GmZ>uqG2RaE> z&x)BCkILRlu=YZBf^+1b_qu;SZLIMZd@91;NSUC`)En3r&Cexb?6Uc!ph9QWi$HGHjiUj}c_om6S+8H@S*2P(t3dW>TwbEsl;oFEU^?INN9BAZEF?$tY-^?zt?Lx}J|9V@e5{T@uprhI0uv!)0+~xA&7HqRjYkfbBy2b6T z@}a4^)`?s~#01Q}CK2YtW5pu>nO>USvxqw11JTUulrIDm%c8ERUSD%=W_CtADQ$OjDq3ahGPE8R{zn&hPCPTPRo?H{ z+_e&r&9)4DZ{h~D1=*Wn%aL3?GkYml=L@fc zJ{57Wdta(OZl>nIk(`D0+CIF8$cMhulC|@329~{rW%Ra6Vz5nV4n>%w!R{reRte}? zRULpT!j3Ss3u%zJ_8Ggxv__{WG7m3qEwOX?XqTns;o}~;y8&Ll6P;gCZ$&{rxDQv98~lYz0i=;Un_#D$GrLWNU=i{<(1u_T=)GMZEJ$iPP%d$Q^jn~ z`y>386{WG=Vr>!~5cQs-K4%NzS6~+77qWE4KoY?GO+@-~gz0YL+=7A#-NC98K`Q0Q z=|&tweZ4FBw9>j3wKywPA*KoA$(yf5+u{&KW ziE=#qAN%Ua4QSf!9tU|9NSmr3$clvj)*QJEboOM5mDcm&K4gkpWmpevid9uIhLydJ6knolya zl!O0!;^wa+?O*Nn|GdTchfjaV#&x+av@)!qlkbRIXfNzZeUZxK*V58+VU}2B{KB?s z;t+19zSgbOTW)-OBEmxD@Bly)fcGZblKa~50072WK;1~`-aLv`WVDoi_UN^=pk^v- zuo}%C1!tiuWmloQprMr;cRLxT<~Y>|izB~DQ3P0U0dlNMSM7GCf-u#QrarW6s*d$K1Q}K(uzY2>4hv2vlY}n zX_JkM4AAwJVGAX3Ss(Tfv7jo-OL02f(T{HZwyD?=C7r(Qw>-TZhYb5ihobT1!a_}% zD7`$dw%InN6p`n3;|`rkNp+L3cGt>!LFD~=B1?iQd^-{?-R_d&OfU4c{+KqAwNI<| zpcOxabt4vB%RcGu9UuB2ZnFxjk6n8iJ4J(1mkCR}l@Ta|>6`dR4AB5T#N)*CU;b*;OV_qGbhKecf&M?bghi5t@<0bUz!x$i8fCH)F9- zcxgEK{nx0+tD9?t*@XN%&Ze0(Zk!{%8+YIzY@SSq3(6IKs^eB0iJFdi6{}$fyKxh1 z#wK@T24+>VpECJf3gglCLtlxRIjQE;999$`l#d=E|H#hwAi4l!HYj;5&GbJLE7jjo z;KG&_%$yfTZ&Fv`9D6~VCDpO;jMYNak4FGMUqo8wj_jg(R)MMr=+1I!WaY^{o_;t` zx`Y_ct1j(5{N8&H%Om5Ms<8SQ_vdO9)ttm0>{y?<3J21iME09W!&+i_VLSVaR=(Bv zMCXP&T^Dqlul+f4Uu7Mv1mdmoXFH$wp_O46qj-MLA!L*%H6c@*zfh>#r6FhxC-W5a zf|d7Lpt?hnh5FiB+l-*T+}yskh2C)Kj_=RWkK>^jqke1*@+QOYxy{Ww0lqYc^AU4Ide-t9kTi<|z3^0N z8R(I_G|E9HLt;`Hl`R+bdD*JVrJlotnMl*^^Xx3`v*nW1+b|$?tw5Zz0ti#ExuU-A z(xKooLS_9#(*r=V{-dfQ6*`YoUyJ%&i<&$==9+k72TChxoSK@&e_Sh+1BCS}A7~!Y zoTn^AV|=QH3O1#H5bd)<7$l6M2V%abD|0X6<1U5L<5S_I3JW`yc*4lRCL+%k89e4fBHy(LM2Fh# z(`H~|u6Z3PM$X}dO6`$PT^Q~r0fBQ}O7{sCwu=Q?Ztjb@^;Zzl@JUGTS451b)$ zT9^e{lPTM5K;6A+&XKS>k0-+rNF-Mg(T2kg52Ghw0GIs{Be?EMRLu=qqTmzj@6-~8 z!999*wPNr+n4+qrq32yh89$t=4N-S)K+H(>jdA=yS05I};yhw_QNMvU04f%*pw!oB z>t0(Bla?VbzN!}G^QiA8CeQq`jIksUvAZa9vpDB4zr6p0uyA^N&md|wt}7N9h*SZ% zUA|3+l9cv=`?#SO8GGhI7S@`u$6I`!)m8=`v!l0JaL8Ri#5KhHqhjBwF8WAFB1QFc z?B!DzY;t+wv&E~gbNzhW+Nbx=y+s{jEH9Yx}Csc&YHo(@H?CY62Q)PBuezp z=`4ryYm_|4Qwh7B?T)d;f1}8ssTn|)EcspY9K}RMyu#sq;pelnyT_`;|MrWIKGk`1 zW;tJd?sW)0y07+FYQ#8E8GU{uH{KwUDT)dQ$sb`0#n&8*r{@dU2pyl6$xaR4O{)39 zIkhf?dD*ObR2=ApZb%(&lCmO=jbjO05YNzfqAzmYh&@n?7*G_ca#6E^s=Cb z)?s+{<5Jurc&OH(jT80fa%SV&kA0zU%H@kge1>If2;RmSND_9iheV`|9}JOr770fc z6l-mr+#SyeM^{Y_#7fZ;_g#sz4n>+_2c$!OI6BDk6KO7e)B5X zyfBEYi+}}V2KzUPoFmpG22b6Yv z0AOx+N5INjCL;g-o0=gxE83Da38B@`t(#$4Y2l#j($*z?|JR)eKEg7vIwji79eJ;+ z>&u1!8V}w#=evv1xCJ9`eo(ynewHuS`R3wzyX>muT|my&B!Pk1r+zSTyW?&xdxyVe zmd~1&Xfj-QLT18dNd^IZW8Vw~;-)+)7)HCyQ}A|6t>RMv^S z0lBX;Lo!eF9LWg$9W~Y8$kO7OR_V|^#vmUDJb~*Lu2%#gl#p5Q_BIWytjg|}x-Ya| zI~5-3wc3z6KxgjahyeVV)r2qbHa;L?U??;)o8@~+WW@Ix8D|4EQaPiE!SSX+-pO?FVRt+ohh#U`KGXHKu%6pR09*wZc>f?KiwnZ_Qhf zh#DAVv@#rwrHOqm!l+Z`EX1=K>hB-UfD)w`8`VGpsps$M8vD;^@3#u&{~0CvUn!@x z-D$2ImWZ`;+5|wO>YDlR^BwzzbsebBN_|a0`zU?q$Hs(x`SR}A$;Mv*miY|S)Xr6G zwk{WrO=}^|{!p*EwV?Rk__w1KwH(7BdYh<_kL@kBSIXjMY>xr2yj4q#^DEL$xWUis z+>0mpZ629xcoU7h8?iByt*UrToBf7$9ZMmOOj?*P58B6Lh0xv2%1b9?Txfx~TsTl% z&iN0=k>B+7&))v$&aWC=IxG>X=|l?le}jv*DxkOG#r*@jl`C(g1;i`_^MOnk%q) zXda_0#1gqe%@m%5=(L<^STeOHptd{%Mh+2kAuR)&>x-LE#8-eIx_f5xy~k({)l8e{ zx<>sTjvCLq8FXsLW1lU5DG0<&?}A}X_`VLw4!IK--US8a-|F=L1d;NTP;6<3SEtWw znvdM2p1b@Y)Urn5xbBxDTr<#gw22e@#})axZ#k<(kuhlsK{?GDhjQzRi!W z#{5!8Q`-X3F4s`B#37V<`Ah2AsO%mCbaOqws*H!IVi0TF-Q`n**jH^mi-j2iu0Rq+ zG?x@#p!M25_KtfpG3GibX?qgIHbF&j^LNunOXCAl-SHPGScE&F6(z7F`9npmAzLH zR0!oyO7iH-x|bKY&}!uk=ju${7ARV(Tg4HduI4JTj=Ot25fUrdp*IDTUUDw=Bgq~E zRe~>5%TW{b&zT03K0(<@%b{IdHo?>5E$}|R-7Mt5dhPjrsPIlr;g#e3b)=rTc1)sB zk_}qCy$H>x*y!CM189=0MhLLbaI>-Jb)jY!q*RT{XBGt)678-h*{g9p#JCG-+NnKz z9F{qA>D|M`)%B{nO1bpFf%09cO~MqUlj_*+3T4G6VJ?MEb2N|R|4J`|h3i;4SlJ{N zGr;}%&!Q;``;2e^V!hS(ReRULw-;ZStHh{c4M|o_#L2X#Xeol8VOnwzKaLW+csgJX z=)TP*M2qflkc_N&qi<<*e8GHcJzZlxGwM9M-{CsIDw+1Wsfy~$mw&ri`JZPhhp?lC zAc_`ZrulN*muq{lUb^Gg)Cr0Nl##(Y4lBuMc4G!CoRVaf#wYeagem`5Gw1o$WV%K1 zASyFT07rW7y|+kJsz7K`rHL8@0wi=sI-(S%w}2pJkkCU7gpM?6DpDhXfJiT)7a^3J zxoh1!UuG@m)AjxV&$stoXP^DsXJ>ymo$o*AawZhxZBXUIhMy%9eox~c*PdnHwc${m zxROOlt@^5f4&-32;9XWbDsmIjsS0wj%mlAkD*Cv4dSvEj5Qa1SH)(03Bi5LzB!i7w zeedb+3Dm{AU@S1+)Lm`DB&26U3?M&jeLysi2bvNB_vpG)7#E_sn=&S7dz14B9inDu z!-k-Ak?$s>oBR?8(0XvlE_DpMV8euL`)?~F1yHI4`HZTjO{()i-nb?B<3@@ITv@Me zPU#!Buf#oD(-i2jb+4{K4-3hM3U!`bM@($Vrx)2gc;|+*OXlt2)SqNUNBJVd47dcNRcB_aQ} zc$PvdK+#6ph*4z|pE8Iz5Acop9k*}+u;3Hpw1sR{ zD-4aKU}{IMZ;D=sa`9ASuJ53KW0O~BAz0qi z7jB2NODPieWD)dLq1x{7%&trpPDRp=!AjR`A*4D4CZoZW^sS(&x?#6NxY8TKa#u6> z0)YATE9S{~j)M4@%D~&f8`CM=qIp{4A?iB>w~*~*-T2HBIaeDEElP2WdD;b_mEy$b zd&_JSc1k$3vtsmo$k*}Vi>~4f;KXVr8`7C-1&=SBwlXyDd+0Q6*T>1nJ`$iJ+u-#m z6j9$?*0om~aB4a6(Tj-nl9k;8SVNPhyMV1_{CVL@xe66G2AJ-{QM%9~WyH(N zd#c?De4jb>-$?X_EqG^dIaekgdz67Kf*rg-C;2zzKAFfI_{ZKS!E;w*b9bbr!(!oXyJs8 z>ChwscmElD|2V%16GT8TP(@K(zWOJDiysHxS{aYKX<$cUaGvZEJDE@Q<_#is5Srue z7Q+Spnqa`gUsD$uC2e;L@_KhHs-mRbYot>fVTJQP2`n9@>xzive6Fi0z?Sy}C(C#> zX@pytd|nKPjDOq~rpW_sx^}2pf=8R|7bwoG)Q2jpdMzMK|dA#SK@J&Q{tIoM@d)TG6%HdEr*v$qwo{0Tf1Q@dQ+E>R!^= z$q>&DvAGmck!sQ*?0vNF7woBUuKZ ztDCxx&;<`9jgJr|FYtMOiKj^+&H8lwto|&+_R>ZCa87||247Bk-RSab`c-vjY z_c6O7zh+%n?2?l$2B_y8be^Q z9oae`Vx^zRy!2%n4WqdL=uwuNm9h=>L6!gmx&nSKe6sU=fka#Y10#AAqk$92SpDH- z`E&XI-z%8^>{?dF-!1c5NNxdHc4*loRGTYUS^oW8xA9Ad`=|ZXng-o)@3Y~LvRRXE z6&fx|aamT?^-at{wVmlO7XA7qt-VY>e3Mp!(f|;J0Gv(izfHrfIZZUy1L!(e z3617lPd9rk6J^w6pmjCVhPq0@ozA#}sAx!Xu>3tw)|)CVv2<9sVMqYmS^;5N_2IQQ z+XhtPieLlrYkD@n%L{|0Wj_Y=-bZ}9&ZIeV-(bPE*wS9uSf&bLPU(riF41Zl6IY2f zp-pMB0|q2_{s{K{3^(DENbgYqDl)>~GfyC^xR*w<*lt6?cq3YkYAx%vB?YgK3UD$I z42C-zmR?T}WbN%Bn07O_MJ<2-gV!cWsa#An#H>dB(p-VQ_%B{-1*HgMY8AcWe55m- z!NAPUTH4F%uc2(Ry%X)#Zd{L3Z0!dUtG<@cIzfIt<@@4FDhyr1uIKTYvr;F?mop&eMdrl_%QCWUH-?c9e54$CWbxvb&{N2g42FQ(j*xoSfHBvBwN( zBNkVX{dNMfr#}seg>Q(n7Cfrpib&-@Eyg~g#hP@4CJLQ>^YXJkIy3a1tBjT) z$eE2SrR@{gmu<#$8t&w-riXP~4Cu0`>OTRvMO-zRqI}F5N zy{Mxz(W-`5ImrPP{q|x5D zR3S%f2jZW;I?@|c9SUY|W{fhtM9x*8K{R02EXp`Vy!7<+>*vz%VOE+>&YW+SAioqB z5|uV*=T1@=j>|D^Sz@I6xu{V{kuryo-nTF7f5pK5{-gbg0L6@y$d%7)4G)t(0E)>` z#B0M)aRaxZG<~u6B0c;P*+MM7#k9p$YUt=mJ~+5FRGX6Q+GLAzAF*!OY*}l2 zQ8Q56QZbTWBO05Ceib(;+y@aS=V+0MGQu|q3q8=a(mm^&9FN;vF(LK4odJx%hw+wxvQxKo4u@6il9Qtg{8aLnSf)?!zvqN=sBDet}^f63%OM4tt1=9OP6P4huqE%n}mIprJ3 zS_{*nE&%TBU?OgEbM`%o$vHxm0Rv@ztd@r7b&|B9cZxlY|G4-hn@mYpfl5Ctig&c7 zehG3|Manh*`z2xh<1DI;!+VZwF3>mCR)kEtS*PY$eo2i70NRp*i{H7V{pm?(ZSnw6 zq#ydB3#V}&WOA&4IwD%z5nR10{S>Jix5q%qynBrZ=g@IoUSpk|`83?tjYrRuzwmsR zv*&btv4CFk+hCty2C0+OJKxt)@-u*)4Hon`ze;g)M^gyJeJe*-N2ryf6NRJ(7ln?sgSq1)D8RwS!7jkg zCcw@`!NxDZ#U;Sa1lsc=8~y}AN`RQO-|;Wbkce$|0d7$bNf!y){%5&6%4z}^1C z4>-C%`vJz}r@e42@V|9q@cvK-$blsokssyNpJNHbfz+!l0Q1@>V@YKI{%RY*zHR~L zPhpZ*lfZKT6BQK|4HXj&4HFjw9Rv5)4NS}%w}@`uymj*?5iaKS@w52R_*V#rg@J*E zjfI1ajf0Pkjg1cj*!Vw0aQ_npSMLE_R6qboMS!CM;Bny)aN(||LATMb+TcV%E0N%? z571!PfdD27A`&tRDjGTlm=1p>!U6CIzlb;h93mV7JR&?23Nk7Z0wyO>`)#A8Pi zS2MXu<@}5T89zL&OoE!`mih-%PA(U}FOn#<5gOm-cg>*O1nK4ebW)L;=F%-0t~`VT zS__cx@1C~~x-IU>&_`tkR7AH8ecjKhY#&}a(6(@Y5tCijF|vHfE30Gq=;hm->dw)X zBU}I;9<&zm+9niaB<^byDB1B4K?|IzZX$6!3&)39@In2{H%=PUc^AJ3NsU`(P_A8C zWS9Xcblfcipb;5_=F(a`u8@V_Cj9pn(Enk=)dYZvaNQ>^APS6KJd2&cU|roG`(|X< zb*A{hjx@>4Xsh(}BE4A$5`6^(NH|L8_js{7Dij@WW7A=P-Wcwu-2LM3NNHbO zCTQHN8P(#_kJDwX(A#JXZJarpeAO@cx#VOk#wTo0llin@q<#BgaZvCyQ|i}uP<<2M zG~@cI9ea0<=KbW5;IfkS9WnQNwhkD&%Av&Ag*(=)P_q8PkhAH^nC5++%djEPS0$1B z3&Wd4n8Rr)3jOI^wz6x&dDuQ?saHVK72xCFC3YaX=euJh(4U{=mBU?C6*P4*iBSFk z4Ro4kTVzM2+4~B3^HOKHugJ|W-#8fZ@&zYjHDBN2zTVEUpy*cRl-U!5)tchG{lzUG z;SqDqt(};rxqH*prAWph@*q zkZ?!w((z)WbNIlKOP%r4>SmYE%FQ=DRU=0T^@Am0jya9VSHLlIN^fC-Ptn1>Nec&3 z_VH)*wwEKFWzU*3%8I+Dmg^y}eU7%TfT}AX!EolD^Gl5$w+F=;lrt|&VwaK2k8J}) zlIojP4`qBpXB}7HavK_A43ah#t*|v_*OTblCaq0$>+W9|5A4hyI;#4vIyZ_gKVdBU zLbffYnk~B5yfe(ANQ|Iy1@K<(b~Uf(7R=Q8~X1LBoIh-MhgiT;z`9qV=C!YvR zOfOA7n0<7(S!O79F3T#{y7~6vyKHNjTU6876)@8v!#nIyWT;Hc$JE#&+}l;uG}cwL z|F(W=*6qM}^9n#y2uaa#=pq~p7~CjfY=(pm!&I9Foah%Aa>mjgQuZV%Qk_9lx9(8$C(U^eNTVw=b?U zHhfATT#L!?K#c z?Njl~Cyhb-G+@g9o7BDfyjlM3G-Q1HQ%1o>%FWxKrrkO9P~lUVx;ksmnx)svW->04 zbJoB_I2CY??l>??JSC3ksS6>9c_HfIXXjL$-L~zpl9c?_UvJ_a2HPRt21Xp_9cz&U zU*%Kgvqs<5*_;c})#lt%Vr{_(pLgkc_FLP;%@mEFQ_u{l~NkT+B{TMHWODD-E?9REw_>W<%6I0z5AW&Zx+Go5J>t=k3pDSC|e&;*C;nc{Pywb7D?>l0K-LeB@<^jaYB7 zqhT?dwRY-wAnHsEUx-OUuAkeHpQC*_<5-7(xhPt1&dDJAcR7H}JGJmK+Th7|5Mx17#?v*kFyQAjVa`Pw1> zUZ?l79O45Mo~l;K1krik+U9xD1n?TD>^F}n$CWI2HIsCZR%q{AnNL~e70{7xSnX}M zkCC{-zrS@rymQ{KdQzG9qNi@x*22E1>!X!OQIBKxD6ydX@JVuSk*qwGycS$3%qHWj zFPj{tz>?t64PK7R!ELE zjM3pn8OnV4*53P3V+H3(+I+RzsjY2szh2!1>$m(ax20N*(2*Mcg*Rm68(1OZ_8GRq zytH7Z**9ecnZmb3r*=<>zvXJ4*@ z)khXXcH3qa^N^i0I%!>zaQJ@tEuR4kghN96Q*2q?M*a&iNK?-KdMTSb|J*2PNZN2& z+8L7Fk+sUDw{j%eg)}?(;=w1UqAGptG*rF8B$@ zBg3`2iAiX6V^hwW;ris7Pw>8NMfK{P^*cHH9zs{ZhkWc)-@}*T{R8(gAl%u_e#jS) z)m!|rJL{NGf6`MO)tYVw(&Q2F22i$Rb|KuHvd(OZ%I1l|og#Mye4M(I(a-&a_23e^ z*)6wsvG(F^0GLm-pxx+dOEh5&RKJ=EYMT+VqtydLaD8 z!+nim{sVKZ^B8OcfD<@r__qhbDvbPZNmu_jwCh-)!gK>LU%}}Nyx5QPXWVXsSm1gZ z^dpWWuqJ{c2!8kjYXYz-7Q7}{VgNdT6<`9m0=j@B;0CAw5b$FTQeYFMe?9($yO#bn zQTxsJCx+j+pJu|^4sLdKU;&$JBAYt8IhaFLO>NA;)=xQgGgo%71silg6P!=lIXYOv z>aarzV&VGdwuGyr{m=Hb8S0i+Kk3(|Aw%t~%^*;1J0&P64EL9QkgjPU8V@{Ct{d`F zf6|R&>Eh_-^cNJeql>krwFAV#%^sA&l-7W0SG~r8Z7dTvS4UZh1H{F|6#@$$py}xZ z`N@p_gBgZ_QRM9{DZt|YYyT14T%U>w{)PT4JBF#Hq@$gq%b$Xa`$NBk>~8`n0;o7Tz+#Q&>gc5E<_d-UIdRbK zz(o2lSWHt#S1_Ue3myX;Vyk~;zrF_V!a4#l!4qEe+jafN^@e+0hx>6qLd3sr!4ewo zS_;MwU}(VF;F$`Z0$_Cx0PNm_cwT@ATxR%ZVd9^KiRHt87AF2#nD}R5;-7_ye-A5Y(u;1e|fXn`mA1P%Z<0C8|K>j;>FlUfSkJ~-8N1gE-= z0Q9;BPTql=f5SlmNCFxF7fj)g9)8TCQ2{6T(!m7xT>QU>0y`f-mOTcm+6knc27m*w}usDm$86 zTX_CE*6Vlpe_;jl(A>n;*nQUF=Q37+Tw#SVCBBoFJA|6u*Z?;Cl8B_6(ZG zd2NQWvVgn;)YZhn3?eTjOa=PDVr^|MASuo*$H9{h#T8ji>;q0_@xbb{Pm$u~Si8BOkz$!_Mso zlAnhSJXqOb@F!jOZ|HxJwH&O$NOJs?Qvv&Nhe%mlLZC1#HxCmBH!pbdfhRu`c=0oF z@N$7Ch~wj8;^t%nPj;{l;`un4IQV&(IN8{lINABYlb?y313Wo7Kpd!tmkm5YUS3$6 z8>E9LCl@D&AUltQ1Sc1}w74Wco0Jr{GzTY-IJXQNn~b=Wgp`aFHx&geT?Ou2dqC_S zNLhnJ0lkg|H$TsxNxFX`aj^fAx=scS@TLi70}nSh=P&$U9p&WV^w~1kTQX3z{JDE%f!dT&jf~sot=rDgNdD!iJglH9Ac1(otFvB2M&-I z%n~q3!2IC`QwhujFloSC5CpaI%dqitNOJSBvr9?yv-3)EaZ7S@aPji+OL6dX!M+f# z@38`M)==<1<@xg~Mg!vfvu0;aaeZ4AFfsdaNtnUzXb^L%pJ?mPxW*R(%<`~6}Ezfxh<-{0mU zzrJ(-DgTMUKN0vR0{=wdp9uUDf&U*x;E(kHhy%C?@CcmkgBy)0e{M7ugDo^505}Nn z2!DQE4_xm({;{+7KZ5Xo90J>kG;ojaQNWT z2pl2;9Bjem=Ya?h?l?w3LPiI>1UDJOI{rlh5Ru?O=Ri8RF#r(>9svmzGzJm2Umdm^ z8Gr*xVraPdL>!#b1SB|Y?07e&ZlP09aud^t%MkKXamj&iSdp*IN5euwLP7yWer{vN z1@I_uvg4zPiBk>Ta*HP5Q8P&^b18q<`aFh^cNB;183#3mvjjcSw{T7x+Ie+T4K8ml!yR3p12y4NjO!+O)=+ZX=PtHIPqPFkZ~la zx!AsaQ1>fuf%>=7+(Mz4ciA=7cqc`Wo}n4l7C_5Q_+u+E9JqrT_B#0RP7S!vYJ74+ zuuRLc_zLjdo2=AvJb68S7B5e-KR1~y<2kEn#QlAbYOJ7+?K)6!kv^7#vwDh?2%h4h zGr@hAWrR<}t|^S&bmz_aUk7moT{<`FNCXNx1ezL}uSta8gI4&2RKM_Eob2ozi*7d% zxO<3tK4i5%RnB-cv0tNAMc_5_PIK1i_|pDZ)$_T}V5{}yW-~c#%L|xxZ?s75l~&+P&pz>J6qUH zdwIR_0sd0c$YF9W#k(Tr0Dmi}teH^7&9)L#CnRsJuhIR&x;1eyQN3Nan;3I(&%g(w zt?V-Pz6-!eQ8cr2u>ERXO)wEETz1?pyD0kFX9d%ohy&D`)wedFH(4Jmq-QU4sy(K%u-n(QB*R@Y^>CouNZ7M8UZ2%P7?WF#fl(lv>>UkI~5h2yrMu8uBI zBs96MTa)n8`-Y`NO7A*Xzkhh2Mg68tb}M}0Lr>zzo1G52g7C(6i^aiwlm2PdBe#~^ z`xxlQ*Myim&g&Bw&ZXntt?MgB-tGcWRLyh>XU^TDRiVWjUWt_Dd24mMTOK=zpf9T^zO)gRwZEcN-sax64__>sYJ9AF#_)3HU-<)F5wia9cJkM`iT zRr|ctSv~!Eqo`{2^v?ID$k}hnpyeoLGlP7-_6KRAl4mzTV3CaW9r@{9QK7-PL;O%d0JeMihwMWyuN&9zreY(3gyauwzxBAEJt9%D1 zF2p=$O<|1SfzUaj^>|}?@Pf(nhHjdu>dL{E^U7vh2-D%2p|ZzC#rf>oH6Q3>Om)f0 z=Z5T4?cK{}iOUFo_??C(ZpX^BUMVIj6qpOJcQ8 zUDqnWy(Q3s1@TgRHqi%*=Fsa4=-QP)yfSXcRxkgv-?(?pncQ|PE zL{}@)Gx7|s+PjrF>qHKZkPd&J@%wJg1o~dK1JBe)VxDw^*2KMTihfCnAuWk$3J@i_ zzEk>f*qTaKj$=Q&t0j55e3=~2H^%b z2X9z_nKR$faPKtN)LxImN!{eT!m&oJFyld@=~+cp9s(~dUE6_~;$4aGN40V_WJe8T z0_&mc^%OPk--uV@wDxFhzCYorC-YisIxX?MbUAfz1n_cn+WjBGj02Bb)*X;UIqpM> z>lUn6%zO2D9oXuJ?b2?UqVmfJ8JpG?$vEQAL^}Ybe*+~uN`5oxRb3)x)C=9$LRL>0nd+~ z066VL3*9z<<6mjC>HR*8HOgjEeAqH#*UtSV2u)(WSue{RDLj~{n=k=%2n5}7++$s9KnuKC$B07MHkF_Hdi6{v7BEr^$t{cO7n&&fpISxMi{ z8QPsRy%lp(q;K_$6`)lq2}XYZv&lI4x|{Ef?WWj|HhGu_?vnFdQF)HwTxGOQ>EPpr zXRPRjCKf$t1|GcvTPMkLKN$ZufU`p;&HSW!d)2eTBF&#{Z;uqqOgdHjSKr_)o;AOX zF=~W5Lh&D|LiBu`p*?|ZpK(d<03EX~Yo?p3GY}Md^g|d>NHQkCdit}Wl|vlkOffq4 zvkkimxf~YYA9qb2V6F68mVS?2;-F7nDc0THTK2M--Xhum7P+6Y*!b#){ND}dP;bcc z#^!~pNR2Z~EB@K|am=V7aZ>RmsnCwan<{Sf#2;$@E=#}fr5~T3-3#k(s%K%OvTc2T zG;l7nb|(6QH(|7>(lNFZd&9nbqR#iTYUIJs0Q~(} ze-F{OV%D)L+v_ixEJ1e7A!_=PJyy%+m}&j~hbezM&_+Y^vn*%X;FS__;3KQ2vtO!tfbTtZfkus^@qy8$y_JJrB;1NTc3b@!Xj65RiuZ+O5qc? zw{ci*JFc?=i=KI*53lw7-DrW&a;R|d;Q!76GN`g#g6gLW{8jW%QnMhE~kmm*F^ab#jiVplZ!BeFf+OiMaz8A;TTk5 zG`Nqcl*$6(Id5Uk#ru03ep>6x&E$|!6s3~)aGY50qXSf~C4d`4A1U!u)NbW@Thw}| zBsWi8?AP37)8eF^$0pK>wVGJ@am;M|?Orrw7Ng1U$&&F$g%)>-@#-H62$1YX1F@P92xIN$>v`3i%p z+ukz0tQM!!z3FI) z@m~w3pj1S<$s=isr(cyDn4n#@P_}NWqHl;QQu+&$LNBWygI3bAWZXqv=Xe_WpBJgQ zVSTJJE-RxvS@w(`(jozk_sNJN@A@;0qA}WAFd-zsuQ9U3JF{!(vsrwvO~QMg&yYh! znf97Sh*txe5cXD;#Lv1@lFsYq=b~JqLEewGXp9pEv2U`3+vD=!3HPl(CwGOTXGGJ- zLxiZGJB8x5@JS3vnQXrfj<;-K*q17XI$PMWQ{}t0GsqDkY6NxMqTth9d>s|rmqyab zBa?@??%eh~Z|wkegxM7Ht)RwZvL)r(Kt+F(t!MLO*9s|>s5BpRV!u|wqMXM+#oxvb z)*~IteDMBjRQ3o1ou|S3+-QkxgG`&lC9L$HA}xHfsIcl2@k?RT!O7Waa;^Hj7lbrj zbm(sonQ8YkspU4~YSpw*<@HG3q~5c1VkrBdxF!}EAfvbTqCg3WPQ>>U!#J5gz4<*H zMi0KxNJdvP%4c&g++>?$yfIVM@JAZAlB_Wuu(lrRalG*TZea2}dHuX`Wvrv2YV_th zM;3}EX*(@L4{J@jj#Ar zYh92p)xpqC-MS}}CsP|Y6!Z1Xf|?k+7Z)e%tZLQcZrlUex0i|Pk43p`DlmwWG3P-a9zS5rERa#C|;#@cdH9qumoVIupRnrw8MmFGRV zgKy~~bz&yEX$t)Md3jv9&ZA=VUD!D@vqA;1SwU@dsGfzVh+P9*;y~snZwOk6Vrp>##f0-c0%#dpPv$H@zdkkg!D4K%9~P;ipX}B z5qHynX2O!`OlL{P*}m=baSoYi+skYqlsF!KgO`9)Ezt$ZoGdWjqDM~qxr?n!+$u?k z_QH~CATff2!2`Vqz5KQsjBjtzakDbZk&PfC}VVzbRx^d1xH3?PZ1U%`9qoF1bf;0!brPz8XLIu@Q;?;Y(9F}IdGTt z)|Oe_)@#|+Sf^lyxJ2kcr%2w=xjvEe2zp|U@FoQ(b~&WIf)l}y9`Rlh>5vf9V>1CI z?7YdFvHRQ+%jij3ClzQpy!&{1wz)FSH$KXnL}fAf{?VYa&nU!MS z{Vwy2K2F6Ui>oY$nUm8?efG_psl|Ng4%fS_T6fnJ^Ai@9=gk!HE;d?7IGeSCLDL$4 z$kl#QX&xoAx7J+e-U^EEF!wX~%oE!r3AD1txFlX$qsU#Slgs9sNO!D4Om5y=o?_>q z2htb(A*KFJ?(+;%sbWSqC!5*S7vLM!pB0M!!T)jNyrh&8f8ilnYAKO(dE&}l%xv~9 zIwca>-(w{yPR|&VZq1P7sxIH9fj;@ZXepqfVqGa`uqZC71((5$GL;owLMPfa^vQC} zlWLZ}$jziS3L!GCaT3}W2Ax7>ieDqW$ z6f}`kReBT`&1&mEIE8(q4{@Ru@&ZUcyeT7I#m5Oyh`2?i{k)4GuOfAT|0rJfaH?KF zwH((fByU}URL(g_SCyW7DTg^wLmfUkFssR;DUG6I{2fhv`CwEQ2VKn5DoiS1U*)%x z;GOFVaAsY{zzBSb#%iL%u`Z4-HAEFFpC?j1=o2m~e~gHuEQY@qwIqD=x1+4rPg&r? z8xmmjeoe<Zf4D4 z?fs+soI{%RFL{djE1i1_LhTKpHj4>5WD_;Z!=3*v(rKrc;r@|Beg|W6ZuUB($;JlX z1yt3U>hb?9_SX*ZDt)PYhVwLZ237I(dsSxQ@|;9jIO*td6eQhZPqO~X{NGtKsdWRwBUDGIbaY#U_g#P<2;IKoVQX06@sG4WO0`Jp#&Hcb!Fy z5({>+$vkUl<%l+n6`-Ar=NifUqiJtdqE~Jw7L0eQXzjZ$8Oy4;ZMv=*DbR;TPi5pN ze5Va{`?}ztCSAek&^Kny(CFp3jNX1PPWnB;vohF|C1f3@fjE&qmgT6In(4au@}aAIf+g%5hB zP)--OV2+QHE~TC$jZC0Fe^Xu{fK8*s8GkZdFkivh4PBD1aY=l2L@4f~j|Xj(*_wEq zVoM&We%iWBnQkVl(t74+TBA9y7t-_v%;Pp+O_Z#{sGny1{-oXgs>ZzCR?}z9_4PDQ zLduFvmF8>z$023?u3m|_&D3bdr*YZLeF}pVZpI8zhjPR5-?-&IxeLin%w!|)Hb%jR z*X)(g-Tn0;dQLLDVKS-Y4WAuT`kP!}&bn+d7?M1YDk2QnXTbV4^iW z-OV;J-4HZ0Yt4t4_SzEZ6VH)bh@w6@D!*V-q>~g|q0=(edsaCt@HWa;yX8g^)fEt7 z+G68g71aygR~UrS*t}m4vOi?BcT&;KV-{R&CFL;prt8X z$L&+f&x}@dX~D3U$l_}bEKS7YA&I|PO^m(TQAv!M6C}lv%n1yY%XORbr3YK6(S)qQ z$MM$h66Bp1eIV?;tJ%oG5a4B!Sm}Y274b$1;6g~*JAv?@ebqQoM9pM1ZO{80eS+9* z&`ukpC*7W}N(~Wp=9e?>8IzrP`aaaDNEbDbG(s%d;BM_$WI5#E+cw)cmec<^@#w3eIJrm)miE5Z1sm{VrBK9lM8$Z6pEWQ&6 z87&ztvjBEyxyvFMl{jX`^Ni<>C#7MHhpI1cIms)8NDKi!YUH_ikoHFo4g|@wZ+FgWgNbTnwy|YNi3_6_NJCp(5o4`z})dX zrUf5A6K8Xi?qj+)dc^yM#1^gk9u9Q(ZG)(Sz zXYS%;ti+C!(v3tgA_n8D*sFj^D>2y3-)Iw+VeJ&p&V85drKSwYjG}i?hFWE^t_(L{ z2#%f>Ubv#oe9usZii`GAJ9pH?*9S?HQN>a5ez6&1kLFuG7mf#H$$cRhVtKGUYz9R< zetAr>X|4SH$#!I$o@nP8mQXp}EhNl#baW+M3&i){{u=n!Zp`H11w--27ZoPv#zcD&|d113&pW z0X?Z-ykTGNrv*G?9Ll50V*2u$!igAQwnD!z?x)yYeC|4s^od59W`5vzpRjmf6T`*c zppjrR&$>&&=yC#wa7v3T`vK?kS0 zap@n?{t1^ZvF#*Nd~6^A^=z8{RW-p)eYCqsD6f~WtgI?2nX~&|H@EA4eEK4o?|$aE zWiab5f&Us`)nsbqpJWp8NCp{bb?+~)9nVMYBjB@ohAV&fRKcM)()0f#&UjyXq12=g z6YyI1oV$C@@zR$Gq-E(aIA-pW;=id@f~7s~QJpF-_=6{RHt&}{PrvEsL=>%N(=BYx z&@0yv5OFNdg4WL~_nQ%QD5hOCe$GbpxfW8_2RwGP78M04@nVIRK={Z{_x1By?`^!! z`edLJ*cSeHY4wdeS9XiYk^BO)kSN)W$ge3MDyIWe>9sYFhfW4!SR@M>#AFj0aBvtq z62p_7GJAR4ir8q8ll3B9<$gu;&h?`f?8l&h{7vS8ZsAWVt>+=gOh)JIsv%q-KCvj6 z?oLciDfu^44@?`}Xqm%mCMt_$c!!-+S<@W-HvQ0?{7u)nUWrhz zW@lV}>>jtqZHs)fo?#`=_`EPqE%s%b?7t^9C@{Y4tp8Y@tfkA8@^J&nb65LRd7(O@ zGW4L^0~Jih^X0*UYPcP%V4m-<#+|WD_5kqYozqjUF{60kJKsD zo=1M-fEdiCX4JnSt?#?Jwaivew{e~V_0~|?mbZ1h7t)O_K9(>J0e=`~%4(v$8;gzt zWA+L-(V#q(>b_U+D_tsL_iEe@CH~osQ`Fw4uN$9oN|k4o64)SqZ#F*vtirmF65mXp za|Ljji<2T)|K-;eg=IgLGYe9lg&5v@o1vr zL!}Ya3sKKEMEe&O*QWiC>R`727lw;lOP!ZDKMMI(cw|{Rg$eAIICbCaP^&*QSnA`c zUufE_d;+KVWwO>pTH2oT$c^NHJP&(|$fdYd%X?t12*IivXUea(wOUPB2?fAt2LiTSycbJ+9V>~`uL#bNrcdC3>$vuvq-PVC3Iy$hf8S5qd_D+P!l3JCD>D}Txhx1*SY`vstmE6ph?w>NQ z0Asr-i{hhKliCGS6WtRR@fa18>Hd~vIkhKl0s2-Bk^wr7u03eo;P18Ye5sszp>KO$ zu&$n5VohG-{X2An$U0=2AGa{ zr7OSCstj<;FKxwiSL+JaGB7o2WqJ@E=~~rsRCaR&-R{$wpZs0Jcn*co4yA9Ck`zq0 zGTz$}74GU)Tv7%d#^h||8#@T6I%C*I@7DR;97=d;aw}&%VqW`zId`0=+0_GixHfgZ zYO8iE`c`?lW>pLKO>OE4FJZ&FikY1jt^Q|Mz(H_*gu6hi=1{WCxWf9u(rv|wLcvna zB)MU|fr!P{lP++a0Ey6;k*@F zxLAmu7%MzR1D{ziR9+W3cwT!Li%XjJH8F$D9(=2c%iE@JNqv(XUsh0fNVBVnvz**T z9|p-(mN@7kpP#e^&m8D}6+WMcIGs^!hu%wSa1fEuAbo9Je8RmRY<7C`#k)(LMdK~n zWe+88f~fkT1}A&LqoOiaAG@#I5I?P~wwnDB#M#%C3@A;QhAkTzMV%XKGLF>p0-c*} zxM3Ej@EuEid^_S*2M2u(&ufS{9~fYSD9<(Sb>oLL9kpnwUdr}ibEcpvAttarr(i>h zdSD)R7eXbETG8zz@*zey?G&Qq0>GO`=AdRK@j-U{vB^tJQkmW>m0x^IInFJO^LKHN(<`F;_e@J#0f$#^rz#l>dFzCYh^ci;s}qsvFzqV2xPna^j{ z%`%rqj2n9P#p_$c9O-AX6FRP5gf|$N7N+K_zAkWzEUFx5HCBF~UK{x+I&@MLKGN-8 zuy;l{whmW7zVn1W;*fCaSYcen(!8(zSSL!SNM*v~z5RJBnS9ze_YY^ZES#6dSm-;U zQEZ#?j2N9gG98#|RJ;gnFO(KK^uwx>8fvtL)!r=;YHAS9TIU;6U^RG)>L*MZn;U(` zJQ3A5OiGD&SbAS8Xs9u4n^b7~*fh1V|M>{FFt1d(UVXhyNfs_?g2NrdIRQJ$sqI;a z8>L;jKgo_V+}p|O$pTwN(wNu^L#`AKXL4nXM8P+c4I1w;siq~;x~861)w%_}eRIA( z-7%_bz|)5{=p2k$c2MBdz)t?O<|I6jz$2e${&V&1fJw7Q_~60_BV}U03LAQh( z+UC+vV)z5g3+&sA8@0WT2-X_Yy?oIov7Ck99kU#-Nn(>1(Ldj& zeb|b7DP93(q!lM?0(J6WYTk<^44}%Wt zla^D@Yhp+r_0SgXjEr!(0}lLgZ+)?NVm>NR1r%4+Orw9bpD-FEHtMVjX>AjlcduXF zwK#pKqGLigt7{T6DYUpepicz;h$!6s+sSRpab+`;Tw%QOZ_TaHS866VHEcsoN*2>Ip(dsx+ zQ_L+#54sc%w;WdDcqcZuC}T$@1;0Txf7ygr@;a<>lFK|OI78o#H*9!#ZgP;cxQb%I zA@5PA0+G{}wGgciaV`-ilXK!(;9}_X{xa!`2&ZLx6|qdT&~WdWufG&g?DxJndIbwa z=^Jvt^xmwUe%tl><%h->?1Wrw)hN_Hgd|O`N{qJ%<{!Ph{Z?=4Ge`IGbYj}QLhhoi zY^#ik%d(F15cf7g1XJc^cHKrQ*kv+5G2o#9ZsZ%28v0}*_f7KMq?y3|e6G(I8V}3z zg=Tv>E_MFYp@?3?!(u|alv+b*oe^c{rt%|4q)5+pyCLtki^8Ltp7_)Ag^l9x)r&U} zBzBs;a?f37B$l?#RfdP~yo*x~MF)N-Xj4_SFm3v35XaP@0RrDAA+?1#gs=%+0{ey~0ujtCdO5{Q9EUaf$3BGEY_FI3b85xGKikIpIf3RU~yg@vf z((55=G&bFyt3Mt)9#=c*60^X_W=O$K%Pz0q>f|DJF7m zEk5c&E#@4kC^}>-`BGnHPaN}B5MyJj7z)3mee7MWwOy*)n*B$P&YrQ@@0V9;NnTu( za}|B=w0r%I*vxXfNnsr0j{X=~T$!h$@+XbNEo{XHXD8~`gufE(kx;Qq{>0o&>)S~1 zrM~|x(M34ZUXxfbwuAk*F6C)f4ax{fdd|j|~Fw$fCfBcbM~SqO~IGvTAEv zy^lAPSII~&16B04i~O>xX`l*Y8DI>Y~{>r{SY{bbGi6) zHN0f>YZr=?yP$^?mGA#4x{dAA22bzZ2Or?f?;QHLY$|;_ z!6-bVYbfgz3-R!^%xYIFX4w}n2#{vqEFyTZgeQR6Dwc` z8h#7FzjccS{q=*#AKJ`ch>*MGl${Y0)3EOAjdjY^Y>!u?1Ukuz)0Vy|^~bfHSPr1< z;kI#E-2UcjA--PlIGstCH{YAH7pG9`r_fIFYj=uY{%iHtk+&xyVuY>4iF>RK)1~n}m+6COBsBz@?P>_RKR(nkCB(#NUSjFF0qzz?B zMy!N`i@$guhkLwsiq4!Z>)Gbk1TXYU!7;<+sW-(TN67znbB|&TH7jc zhoINPVc)l5zvPNsai9A5U2Kj5ySE(K`~FQ=6+a9)+T0cVyv~o{Z+IGsi(Ubp&W{8r zDZFQx4Xkrn8jKF4>`kQmUrbE#ZK zd17%E$lc&z?SKMFmjIyhCajymIO#a`&fldI6219-ERsSuTjQ74agkV1UH09d-UEO5 zw7UVBpR|?IJ};ED;*HnX)WX1m5jumq%@-|%JFK&EvWMQrZ^=Fiy03-w`HlB_lNMIr zXVD9(Ie9XrX;{59dZs(qq6L+#mn_hZvNhO$u$GC(*dRJ zeYL?vO|bPIe`zy743FaD;w=IGf^NNt0UG(OlPIoAMU(r32|5#+@Rh{mHJgT5yTN;k zCd?)D6?B5U?LNyfPXkKBv$Digt(2L$5h!f#z52>#bokl|OMpWAnHb5FG%@(6=$Hau zd0%1pC+xT89!+oETWmF^Z9MDNLNjNP&XcaWo1MkUKi4+KI*CTJhWVT;GEUV>LS-Px z2|>RNAbm~^{z&J05)Z#*xq9$5oeG>S-!nuOHE<8d)I#9``GY2UVrOhCANt#2IP2}Z ze2DK}9he>`dXdQ%5Vk#}<_ptiebs_`9CQVAum-lg*u@7|<&AF!U@p)%jD{krq&MV+^~|AsbJ6pcrfpj zH3R7TCf$~oIZTi?{xr}9A<1~wz|M^8#t#$-(~lETtQHuq0HP+C%gezF&a1oFR!#J% zq-_Fak5vd&aio<%rr!dfrvJGPuZja8l!HAHaI|_H8cZqPlO2D_U2MmU7&;WDz)Qkc zB!m^gR}%kC6K6~u3H82oi{JCmU1}`r>PBYlA>>y$f_qO7g9I-(Pn>B&LeJ&Cg;JIl zypUU9x%GiTE1PTbgHHz_!TqKJ*pXA8Q=5HY?!Lei>lhxO|K(-+qw3Spz#Tf!XqA2A zn_;rApwhd1-LIgM|L@@C{P@fnYq`5Z`1){(&LyMo9>;R~R%HbyN8QpYBZ7pEy|ur+ z8!1A#xTIRMfF_w2^RmceXZ{+0@^NuAuZoz@jDCy-rJGYaWDMs7oYXAOn1^G(T_Y7=9j!^5evJEEH-@~9}p_g|a zdY(iPl)gE`W4eCV5H*y8w5U%e4ok^%rC#JdJFhMN{c?Kho|g(ND)wH=bV2F`&ura@ zZlJB^zmY>9XvoLjl*B49+GsSS(3btdYD|D-6&`CL`(INEO{9rZ-C@R4^xW=+P6mBBOfp4no;C zgde3G=eYY&+|B7wnVsq3!@IfI83RkoQ#Cc!w8iX=y_(zuat4R#i(ees_x@Y3&_U{< zXYp7G(9V3;{pv|E9dg$&azha3SVqe2V9&4=9C4@FGDekXB!J%2=#bR+12s)T+D-qD zj4I~0@JZnrKa-)<@l9Uoktx3lgF*-sQ5U%6ET_F@r^#80oHJ7D%uC(2s|%O>6Q+ z7gb!5Kmqa*j{@j?`O$%k6)!|ef4n~&erhTI4* zmTk!O5CjyMk!3L#*kufb-k=u4KMl23Km1?By>(Qa-?BE?xHb|T8fe_zLm-X2Lm*g! zyEKhUXxxIk2DbzV?iSqLB{;z~XhJ@I=X`U{S?8XaHFxgJn*X}r)$i`wPt~qnYwvoh znB_P(&_$!69&R@iF-kB3tQj2z0id0fWTG@$DzGEH5{52~oEVQHash0_sy=quPEzz% zDqCS2Oa?W<(f6i}7ew+#?eryxoTSNVucJcQPc!YnFgi~zsAGvJ;~|7s0EKrYhOD=! z0d^ORKrbgig@RB)#HZ5K60~UPS9^-QeL_w}#V0`F9z4Rj9kcqKN4iE}!T z-^aRNSSwyQ{mmf6mw{Z1pl) zDT{dBxb#Z#Ad|f%XPH=3llzlAK}`{lS$}?YnvvMP-aMYGx|UVgrRhj`rsg^WVIeYe z^@|UqNkFm(`Lc;9ti2Yi<^C;n_c#ep$$&2oeDP+~@*Oy&VK z5{E3rt5xz&Pm1!op(Zc$ey=4-5DHT^aKkd7m7kQL&XAW-bXXaR>Y+CkCH3^lHxqzA zFvge7($BqUOD3&utPWGj!Um&JrOLC#43~)@wEot(gZ5a22Y?&Yy`*E*sK7A?7b#!; z)Frc0>h>OJ+Y)&9_=UlFpOySMISMufP1hM!8Jrp1@Y{XUl9rZ45+Zk559!I zCw%|R(1ZYl>$ew(%>Z>qiy2G(~mg}z@8q$si`tCc6b@YgF$fX+b*Lk zzP#ba4+MA5F-_F3j`Js#G)c+Zg&$3R zZ7&NVLdKt4FnJsnmuALrR2olm>@i6t(J0R(T0NSw2}em+X>0NVl8c97n-@($NPODT z=C>r(yHg+jiE1mx?qGV`)!cjW04F=DGQ6y=T&zX{qwoY`k=zW+-L+=6@Wn|1kuJ=@ za(9}ghzdreI+F@X&+}6L+kwsP^X;(5sw+zPR#3d;?WFw!g_9j|^B^-J3PfQ%9Vx<5 z`ke(uT~p~xLwAsMIGTt$4UuRFtarW^VVOIxR5>M07wqH++1Q78s*J(x?${)wRY_X$ZK1icEj+5F+_1($^ACIG-tG-}ks=LqLggly71! zNlL<({;J65Q)tvg%MJAK4BQvxsSFJZees)yWoo+h(j&zEHy#N;F?<)zNqXy4Q2$Y^ z)-i1%PXIZYj{n>wB&9`fiQFW)x*iVLFa&L~Mg7FoDHyxr*hz_RA9F?JJ*SidfFj!o zS~;4w-A8Lez61)kXXqo~INLD2@k*3Ym1sPldbYOUgT;nbE}wd~EknIO09$)gKej21 zsn=(nKN{5wl>PuT7Pk-@I!3TK$w9i)ENK&fOfe4o>a;?86-RSQ)x>&+L-A^BR*9Kf zYHlMH8;WdDwKbQuvm9prbiqZj@LcoPU|wM}PnrNXylKTd7wzHa<_lQA{FnOf)P76- zN}C1|5r!$NvE9uKI_LulM0=B{(+Dvp8$*C%+=# zb7kE$u4j^AZSKFRB2*4k{lEEB-#~j4{{YC<7=;4q7lp;+rGM8wS%Fwyht14~Y z2gw{Drw+((b?g(#4r13Ea_A+aV+fV{f%FDY=Q2s~wQ|)ty(qpSMsRq)IhU*WkSY9w zva=0+lkD0{4V6)5{>qv3cc`@0&8_y1H(Qe@tO`YZpeXGf;ii*4F^pEuAf>58!nlH5 z1mt9lY$(k7mS?($#xBaJl#o9kaXLNF$f)%-$}_|R_`BKLec=@&CUMoJ19+8I%=FBC zbG0DQ0h8Ux&F*x0rRyJnJwp<-z*(oH7m3DaGs6+CkBb*EqtkG>!GK zwBOr_cs|+>kAeq8^tXpOzLj8nt&Lf_ifLB=h%8)!P(oEFanuxON&H)E!q)6m^ z&(_P1MTI+7aEA1?7Ea6C*x2l1`ZnPv48I>Bw3$k0K%I=VZW7FrU6xOd5x|Jkp}6d8 zGfMhWpDdrK`X%y3Bq8UVvXcD@MOb_KL9~i{{_e=&mrv98gZfOhy#}LOJlkLcW4<`p=c4VV@MJ}nb~F?Nf<~^=|zjz@>L#Qqh#fD zvm@k7SjQ_fogI(8hjcN>jzTAI_ek}**fl? zHqxvwKGj6b$Z}5O$T0fmkoE^qCrL%`3~sJqwHq_O5f*Dp7;{;HF_uip*|sP!i6MWC z3qo-xsB9#XiavNe zLTXS7)Q<|ZrLTZ#g;$-Of;RMt`irnsc*k2)@s^#J*4QXqmWnvZ>ZqkT?dpI_G`*7J zr^3OBf|5$cO0LP|@x9D45VjQ0n$DZR15p-%<_b@R6WwBsf1dO$9ek`BpABzu)>KdeU1#PMH0^HTFvOOoj>LxBO~Jlhy2K0w}lK4GinZH z^`|}a=U*W&B)SWu^wN*?(lHsahIEkww-Xr#a5fu~8bU=t`aq;C47L&N9cAaw?i2KT zBBLSWirj%ns^bxI+N=RuS}_!_>$@C+FNF(TY)xn|k2qzhGonQigj;>%qEbZ0k4HvA zQxaSc$O8dDguVX2=_ls?mx6Vr|B6*h5Io=Vf@J5@yKsM|7Sn$!T$l|eCGCK z)|&da;NcIz(?9VbZh#<*{M%M0px4ROoTnG=PKfoi7=z^*wd;~gHc9Zbh;YL3VG;%k zOFid|P8846+O>-V|uVlOXr?>KW?r6ocHuCKIdgm9k ztV0&KDle0qXrTEUE6ef~(-=0adUjJxT|kfDAd4AIs%{J-HxKOC-#nM~MPF(tn8v^d z!QQmVLu^~m?wH-evC%!?1b1GNJ8xejrMn%aZb3tAdct$h+W_MX)aZHP#Zl;JcTz=( z4`rn%l0?COO{3}VA@Cne{`nM_dZ)|UEGm}v?p$9HzauR@POT^b*_m3;;};X2^udG$ z*^iHPWyohzPXoMObypzHFB>oAEe8l1o~!Q3|Ehy?|XDl^sL_e z2XJG+2!NZj3HIuqil)Rux04yE41I{5L@r=|Soaf1dwr?H4EX^yiAe#Ia#+isf#ijL z*QFVNyokg^rNAliUw6OtqhA9aTo-=;Rv-wznzer-v3>jITK!zBocY!kfVMyv{0CsJ zxb-7jDqID&{8?dgHqU0IFjl6SmXq4U0jaIe>HfZcnt? zKlV|$Qxokc?u54%N=tzAn4lkl;L=n81!1a)Xb(s0jhW0j5VeA?5M4&y!=$D7y>xc>_e$N`XSQa|WK3edbNrz^eKlzzA%r*?BW!gP zki3Ej5J3bNYa=z=|1$#lrw&M-&i??Irj>M1eS^GqD4Z*8fFcBSPM|b%jk*>&u5f~T zZVYw#u3mc^-DZy+#Il{9Lj0UT;%Eb+Zn?)>j{jhiK0*q4mCfEc5F*iinvu|z9>Jy& zq-MzkyeqD-A6plzvDI$P$fszp2NHob$!NGSOC}t79N6j#D>_;YO&cUCK3Vq$BHk$1 zZv0vn%wEu*O7>kJ(~J$C&2CHwmDS8<8K?y^N!9GVAxJ>_DN3@Ozw#kOy7K+zdGF9F z87QhOXj+oFEgg6lXTb9f!S82nf&1<|G?%5P-5iP4zoOFih5MM(cYkf-#sde=%}})V zuraouVtr~;ZX~$JT^YO!c9^<{B@C**Y1MX=PD{O$bJKnV2Od)@tQ{czvESSYl->}V z!3-Qoh$?NOPE&l0dNgg@1_vul^i5jKZk%5A2`iSFvm2XKnpO^lx6IdO;B;Q0W^3(u z>#gbK`bO9LCQsjx3h=idE6M=H2g3bqh;i-_#|-ylf| z!9;ACF4|Pepg7oZU&u(I_>}WL>Of(sG|?_oS40lGr{6E9=$iW8Bq`ZM8B!mP69yl* zCjNk&tocmA;$Ko6V}&1a_2Yt?|0Ndv7fT@dh?M8@bJtcIN0WEAyA>}~rVur{l<>M) z`xBxcNWB9FstOLpBPk?tU%rQ4Uf=4XTn(^2Dw$hEUB5EmxmGIqU<* zIE$3l5AnZRt@nL0bEA`TGTb5;z!{AzETtsKu-C2>&aQ5DO=!6vH%<63VPd>8rKWQvhX%kxNtnYg|Q1OkTu!?V-xxEv~M4`o;^WOwNJUli4!9E zLjN0uR4p}+Ar{anYUvu$FjyG)PLH`f<4#t%+M!CK;fs%CGx@%NJW&;GC56kPk1_3t zERX?W`AskVSW`izPPm13;=%u9b5nr$CRr16c7{Rh*je6*-A&RSr;M`HTDFumFZY@J z@Cm-G=B#h#>okADEA|lZBQ%B0AX}Jv$d2752seccg-GTMS{aV{){&VWc!vD2yXIO9L4T#_N2O{npQU)KVJ0Cj zSMbwCk(N=XRCwMqdEagkH8?rn+l>DleA|n zqdLAX^O4kvrb?`#fUdMIuh=SLDBeh);z|p*)Ze>{Sh@m8nuhy%J|{GtyOF&xDLTexJJga; zeDLzTaG*a=iAG-Ucg|I#mp=XIS=(5~d#&}zwF=Ih<#=x9vO7H-#$06X`^4dE3K*F3 z*fNV+A4pe1*QNK(si(y8WO$DDk3M?eKWtNVd}tb|`c(P<^a1rD)2JUbHYL|$%D!fC zg`b2)@_;h~E1?ikf<-&GlJXnZU9f70omt!vRxwYVSQlpm^Gk*~BGdrg7ZR6eq?zG* zX^TcI5hv4z<=@az$ldd4D74YR=0e3bm;$+!a@QIfh__!Cv%C74;^6%i5^2>$Fc?vwLcDZf z{1fBi82;0Ccb)3nqJkB=f{X#iFp=t}_5lT&rAd%)_tmUE?oB4vDK$t8V(z!dN!Wh zOtQWkE-TL~&=sqos=5e$GQ9WR;<4$!8!L$ZWW$Dk(bOi4K?ikG(~?AnK`LJlbh59> z4_riHaqt!N2E^ED)%vWY0}z)_b*iAhlIh--jef0jF~>cJkW5XSNMI}L* z(OiDfO9#(ou%lQ#R0ohM&iM}t-K1XDSf7097#5T%I~)?gXMb2G#|#q*2&o_a2o=pF zUKZN(i9sIX!1_)RX1AhUtKEuzFFG>>B+~}aT}G`)y&lq*|lySOrcTBQ+JDh z_R%+s;#2L?AHcrhZCe3>@03{&=m{SiPqYrl(jF{qbAt4vc~=cb+%@5~;_DO|_7f#} zSkO*oLT+4ujs$eg&_A%l`aX1z_`+t*dC8KdqAXChI^+w`NugJu2w=8a;wWyinVs%L z-h3aQz-=(dkRUiAs_f`fV1qy&jK~c1@_ZCFt1!Ak{xB@u9gQ@IZA)93`wikhpQj=~ z+5Kz}ory65{l_IWHx#Ww^!6Us6%?zR6tg)OyL7=Q%-8>$YU)RM+ThqHK%D`$>NUFV3xN+ln?w`x4jGiupi*Jhq5LXw# ztRFf)C2Jvo($-|xz~X&JH~!yO0}ua;5s>& z6m_F7@RC=^)y~&fE^k(I;T&fsx+xWUgw=xgdIJ*AZ^FFg?6i<-0vvEqu6(wT5@ z5D3L}0Oo`S>cB;Y@y*`s)1>G2(Tkj%n_d3H??Ss#Rz-BgSf0-hPO3lQ94VbIPK3D? z-Pv39v`qgJ6yv0rPu;B6bzZOg1dQwqjh#u{tUmbrH__0ag?IKe9>90^&u^)Fy8Ja< zzprQoSC5%Ec>jq3V_z7B)9^Ym4%5HiuI^Uz0CXZI8ng^U(h{A?2QD(+Lbg3uF5A zFVp@@{;zC*!{F~SlbxSF|Li}!|F4sz^KWE*a_t1hfL6SYS2*gw@VuzvfGyEaI8)D5 zkkNS^g!SLsV>_Zt`1az-)8jcz214j5pKIAMRiEIK7!C)QVI3bvX%E?j7o0rj%~9tI z3`hR~gx{Du1#rhEh6$%4=?;6Vd{$elC9B&LG?iM>WBVprA|wxtdCDp1gkv?ixg7jA zRq=moT7S2N*9}KCH=pcJG|Xp8YNtEOH2jYzOm&H2$`JXWBmm!1J%P<0W)Ni^Z`Nd_ zC4rB`vY^?#Xy0C6miw9JEiLs8#}F^Sa2v_kdtZd|4HC_h(ma(A#mP5mO+JM*Q#SNm zbKo}>XfG|^;niiZC?UiNFl&q2rqsne-%?Se4(`nA&q|f?u@ncms2F&F&UY%$_7p}E{HcW^?Er1*YX!;3%yH8FF!zv_M9s;h z302c5MRG)_EuDQVZ(Ur(SFXK|D)cP~FbTE9HRR&iE=%~PhzKp>R zeHlogK5Z*rn}rC_(PrcP2wFU6UU zl!2*5i$Ebn%tfr57U5;Wfw?JxKfaJbOk$CZFV#VD*8n1?;y}}q z5NuWW%L8{=<hvb)i;O`&XXCFd0!qi! zVv#$nM5@1%ybLU4)wUTDuP0|%t+p|_e8He`WPfjhGKqt=p2>!}KyqJgFr5|+Djnc+ z!vyFdl(VdBM=o$QrL2tucpa4Uqgfz$A9qVtib{D)96Z=4yUT}auwK?Daq^hI03K4xe_Ih^x>_Oje0)@MA0IBpU! zm!~0Hn4de6ENRMvF;(8zgaVJSyXdg0esNCIS}*UL#O=y7yVctBU9Q`j;F zQ--s-(-H!rfjh@vPsd$2+{&ae3y-?Vrjjr$X>q%h(K8g5~nv%(Plj|{;hsLX>nyMPlk8;WYKhQX~X zGSht8jGcP-X^e6$>Nzzdlv46YEGa%rUIZ7#BB-0GxqOc#P{0kQQ7SU2g6MIXK_cj?ybO^@>hkV>tj2*(^WaqT>9P zjFv z^r3vyEMGxcGj##AA2=D>n_)Yy7J=dLIhN0kZW^(NOfwX8$178s84!KW_W+kl2n+4j zfPQ^$N;b1R$8@-1q_!i!OY*pnZ0myvja@Vf^;ZY};z}LK;E%?Vbk@nEGKhuD%rDJp zqc^5(c_^^tY%~>V6SVKOx`*!6CI@ht{s2lG2{FAsgiD`E-U42+8UN&Y1@vF$MmgUd ze#Np)>Nfl3B?6LIYiYApd{-QzL0y1yC*`6n!Kgm~13_-{G?MEl&Se(i(luEnNXjNo z)Al)^eZ}QEF3@f;NqM$H&}B;jvp_N#naLmDkeiZowWL3&AC+RtB_W-f^BtTV&aB(> z)UdVn=^^nU`kT-X*Q=)RZ=Qoxnmm-9HaC^h`IecFk0dzpyLkCjCKIZ@QqJTMPwlbS zR5wDvsK(Uu?|D%s)|&4uol-B(tm1?Xri&CkO4GIcIhC%Rf0((5O%N?Lz68zj@-IUB zGFQUZ8U*on)d_u=o>@c~~+>JZgAOM;~8 znBt=6^(&1p1U?&YC`L=e)y>hs5<_*j0vAE^HxsI`dmfJyr#D9cbB{`+n_OnCcxQ|W zTx=J@38%tQgyo~S-fJ*Qo`WLlemsNxn5tgfkZT|K$SlWDGo1&&Zb+o7Qdr;%Y;bU$ z9%ot}b24{|74yU>sqaf=vlnbOnLaS1OCw$d$TqGz(IZ4s8R4VhqCs#)(U~f){5h(? zUBWc4ltFgbBCRCxhnLOEb} z2@9+-?g@shzj`eZt9Js>UKx8fw?4SL@%VR+SDqa(&QZU_EC8|&lC1;^tQ-%3mBWs8 zN=ada4-02eXZ0hKWO6rje7RWm%q3A zMyAT%lTH#a<94uwbTn44KJNW6LD69z=3Pa%+T1dt4l`=?-gop2Gxy%3o3mlv3Oj67?JFImZHwqeWrstJ8Du3j~4mxI@02f zYqBpiar(yn4`2p2{w$7UZ$5$CT^o_4t)5Ul2w(ngT}qFfLKCBH8~d`~%pKDU0q8lm?#f=Mnxr6StPY{whjtnYcOQ z&fvpnB!uz!bZQGV467U9VZSeT>^))(g|T&f*jDk4MQLd=Q!sN%9Gt*1 z*>Pe-{d5twQEGnO_v>{eI8Rk{mkzFcULN&+lu7Q5n=s`J*k67?ND%!M)DPN=rmqbN zwSCWd;u_>tDsI(kRQ(|yL|MRjJ*+o97&eim{*tGtFN@pOmz})Go>8e$pm^t+L)4_4 z#LjKX+%ejsZo^R4pb|7W&EU(3!bt>-9*4ngQM2V{aHhYoLx17^F#RUyO^f>UIyM9a zx-7Y|=k$G8M97c$#G%pp0F;yRY}=c{@mG>`Fsc~x07&F?DG_98rxMs>8Cyek-6xXT z*AT+vz)-uAcNxkIKGV-D0ZVp>WEqmvV*Y+U1YI< zty}^Vu?`qRG#lo?41E<`Kk+f&7Kyv8E*9qj84Z|zB|HD=aUdnAJ8c=2^q$jfa-$>e z(&{eyj<2Trx2E7-+pkEQ2!sZXaCH$K6F(YTC!3y-clEa2RZWPJg<&@l;|)uz9kQN zy=Dr-;Sq^(W(=6i)t1iNVQD16^=nZaXwJ)s;N;+-&Xk$u+g%Iap%FkCgYB+7{mp4S z+wtqRPqR)gPoeB5J>i^Qd_UZMh|i5gJWFAEX_k^}EQKF(?@%ftW>t8t$g!f(D`TOPML$r@Ah)02^4P*EKn++)s<8(oxewrilQ)Y?#d6KszDOcy^3 zDK;bMM#t$x!I!`cY7Hs+?06Ll~pkZ6Nmb*;+x zWtv9oEwrkOs(wC%G^&)rux#b#=u8n2SkukjO>ZtY*F-?`k(h7@MkDxh zohXR~1w~=30+?D}I6<wOVwY`b zeIdcHk{W}xm%_I`l56C!?A-QT`00}j@1iUM)_+l=uzzdgLAd;3dv`&HKO=o~k#X-k z3`I~3cC84h;ARc?XZc@P7QXMBkagI`;?Fnu&BYT5ma!DbDxcIIDC@ccE`mtWQEZIv z@AHd-qraQT-iS`l4wo^+i90%RHY2GYAFG1=L{9<3%~@#ExrYmxUvZ2h(g;C+DI~ zFNl;56^_W?TM$)MMb9hMnr1GCI#7;un@@(pNnQdAi|rsJ&8O{~>sqU0y-%0^8to$w zq??DYuTT0Y0wi^vdsPzz5_of`vo><{X?hdU`y1m5^H30hQOTGI=sUfW?kYUuNr9zd5Wd)#LWk)&Nv(|RjUdkb|Gk<0ZTr0C`eE92LVVJD zWxx}-P7)`TRM zqWTsolAPR}m)>IY`(cez-`IM>yT>P8K3WQcf#pKK@zTjk+q*22 zDy%VY6DbR6gI?}<5CMYMA;qJJ-xi_^@gRK-VFIJR(YArPw)!*effw4C>X9Bdp4vY?8hKK67W;Vk2&6IF~tlj!d`DG2iKPsPW}Z6|I*2>N9C4HkBuEWZ-i>;h6?v z#&q+UMC#Q0J;?soSIj`h+kY3z01vuG@(&)pn>n)l}m!Jbj4E9{M+V2XJrQfAiE>29@S&PdFQu zyeI0(Go@rerx3yPI5*s&Acw=9SmL@71rKmm@jYVuoKN8gR?3f$=W7mjdO#w8vCK+L z*R-_uFPqiIe?wex^vON2T5 z9SakfA9wMP+?zxJrBAMh3#I=&&+3aaSL6B9_{9`tJ<4Wl#K@we@8; zd0{zz$7S;Glu9O&v%IA=PY7Yo1qR1{{+ja#Fl9;?rERjU3;wP{IHvLnD~16XKW13A zQGxkIh&nuZFhCkwCe4>fSn)~GHHtqDLE4TK!FDjT4D`@0gaO`Ur6tHzs^Oc!r9DV{eLki4kPQi%%zO1$A;U6 zBqHsuo4e~af@&7&eB_nd_kGa!WAaav4kOb{Ls4u4qVgZwS(EHk4HEhGmcG3^!Mx>l z`2&z`K>(8zzLY`KX5_eDy&hR#06RctGYRN()u@2ARFgB#Kc%6tz#q2Sv*iGyfho4| zZ4$sB7hbScekmcN`IMHW=awuPdg?Mx9N@V~Z&5P|P)QSA64i$q8`5DpI$vAZUk@TY z)P|}PwV;>p6<|YI<$w9E4g9;JLcg?UH#u2m2r?QY_YqLW65X4HLPb-B-BnvsJa?>F zsX>0h)z^iD=7aYRs{%cm8+#r87Xu^h)eh)>+e%qmq?cqT(mYik(F@G=eK0ydhew4G zz)k-ED158)S{}tzTVaxvLQ>|gDCf*)% ztgemoTP|~@Oc$!2lS=l4qY?t0vBtl~saX+hdeAhMM_NRJd$}zvldek@!^S_C`UrE1 zkL4z3{&Kk7w^Y(8R$9~d5%ZP~{gINbUuaiH;=>{s^x!;e^LpDURcffzyfK;+mweB9 z#`VhyNI>?-N6UGjI)nl%oVc?2ufM5#Jdq4o&`53H3A3JoRdUYS;5G0zIN>Bb7lF&d zZp7=x))4Ehk4L0?&lvrl6FSX{xleh@*#7T;4*b318p3mhfC*1r(D#D!d4KTh1d?)Nt#l!oalUb=dn_N3h9arN3KVM!=A+~ixOPz3t2AuWLk=i0l33b41uTnn;PRZPCz@Q0a`NRAZDo03#DA$j z^08Y&IgFoa6q-^WE5EWg!^T1&I#HnS1+IQ&HKzQsaSv?KIAClG{G75Hs4>V6e<|Zd zq=WcEjA(^o6#RvLCo8t?m6@^(fo(%qQM}{%Y*D3TbdEc5<(J7~bNXF8^WWjjaqx&o z>`i>Fwi2YIHi|E`0dEZ|8V?NBVA-X@>2TTMh~nuW7js(W*0lLZlCRl;1u;GF zungK)SPKfBt<2Lrl10qNmG&Jagw;uxzUjKAxk4Yc%YWgs?kRgcu8RrUCN!hSIZ-5U z3kE+dbFTBeKn(o-SyjxUhwwS4Cxr%uH*pg-AeYLvGT2iHqi^ftu;G_-lm9BDL9~m` z^gd&VYY0c;Sw5CdN#k{boZk6L7zd=XgzKaDxb#Wj|X>9k!g^%Vq&b*>u9 zFUbT}`Qx3p+RWa9)AL3$tHw?~8#dHl;hu=?^?!c{dgFg-vz8Jx6( zEC0Lb>u*u6i}8->#Shu3cl+;b>|d*n{DM{0HBgsWJc%qU8{p17SH=Pzop6L5EcwT{ z-K6wfA9oJCd{i=daDTU*?f3us2>K)X(bdEJZk7waScDPD>B%xp^5dNuF(fhLfstv7 z8rp;+y~RRduh7HvoU}_E)o-x+UD4-CQxq%mH`)cXDHR6gRGQ*_81Tt>d+Qm$XuI5% z)oAxQBELX{QxPmbIyZ7bTXjuo7!6wn5*AqW!c>?x6$IgY?1{`}78=pk<})s4b;wGq zS}q!Z9l9|ND1G=B;FF5R&u9^_Aw-%YLrg~TYCaf~7X8Mn9~Tb{hes?84X@I!Gh#%3 z+@)SL%Pz)@i2RhcbU4TkQI~g|dI1Mv(rV!0ct5JVe{eVW{{piAzS?lzGJ0-1e>Q9R ze{=L7z3QnjsvKCd$P}+V>VFaJxan1mRpK!&cLThi-sq@U2&mxyhL*5#s#F0I=+;x4Y;b5 z7{9)@o_v$O0&D48j=7|LyTP8dYSxslz0|5I$Q4M&q!?FL%!rHwW0ydX6w|3ebm7#b z9V@<(@{q<9!Z>Q+)keX2AozmLU+P;Uo&o7S$ z|I-6Dli9a|MXO$ls{!$P_|%D2FDh-+uuH=ObnF9RPqx9-SNJ^Jy+~zTmfw^HXh=MI zL@3Oc(`7Nf%o!wgtPs%m7w)!D=^?fJV&jk0qK1n=ve3^R+gk|Mzb5u;EX|-yn!T~q zVqWq*WOc)YYMm0{N!)-g@7Z)+K8f! z=|kvdO2yjYh-FA&jE;28%kSPKRuo=Y@b6WMr0>0E+KN0Yrm2b0~pmt#rS$IZTCsBbYq{%Div*%uPZyD1;ie;TJ!W8t~>17k+T zRA_rp;V=SCoO1BZ@r(GD1=Z3+09dKQyE~j$6@P?dRU-FY0y}ivAma}}ckD&rWLXn& zv38k!i*9{S-<>?mTlZKDa4~D2rftIrZ?uDk^OvnCvl6|@04EN_x^0r1IE|rT^b8K; zXV$b1Rrdw19dY&BZ8ICM6{ib}mZM7U%aoQHv_YM)H(3c8AGaA7Rhn$bX!x_18Gj1Q z(IsWZ9es(H4+yJ3UpQjFwx2ic@}BwW!O0in8~iP~tgPs>d=I2E&D4rr+k;e|3CtX? zl@;Mm-Nyjwza>t;OG3L7`r?bx6$lZU7vY@ZW@ey?L^?x1can>J<(9+VSn@UK6aV-o zK3WwX?x(K~G`1#Ut!_wi6xVfv)z^7&h*RbiJ{CiBHmyknYy)(LVW)%?(3*kEt(Hlj-O`>a>ZuH&3%uq@mM2t)wVYTdBI#9W2<*4cW zp=@8-tg@mL;YK2oOrgAHrpEbAYFL%nw9C?_hn91Yc5~E)EznT#DyS4;ZAiY^esmwC z+pa#9;2KtB^_H$#bAUI20fio%IUe8Rrg|L8qTH-3p?nwz@xBv}_33HsS-*RIe&@4( z+a&eNpMT9puKhWSyS{cH#`6y#gZPXJSrKVqb?5aurJhTuyjMtU(`7d8 zVXmnkve}MjicU1*Z9?uU)_5iZFugq#B9YhJbLVX+?vU}3&zU6f4Zi-UZTZ^zXI_SY z%D0UQ-x#Apt6qeiVg4c?AaBAjB=BPonE3pYYKZ{YS1;18h4|0(Z? z+d{EJa$)fYC@Ah&h?Mb4i0$*+hN+sWb{F9U`MQDeDeK>aUHR{2X5gnAUI-UQ6N35+ zW?ajf-rY5vawL=?+@P}i?O5OM>?Hs^u2+CC@tkBDm&&H3s93hRW+Ym|7_@k*ePnER z6vMTC{GAQD!SHLgKLFzTMWV~$xG#ob2`EtcK*|WrqJhBb1TfrL)e{E^{#?6fIAb*d zRfI@~(CHqZU%6ZoSw-iC8YK~p4jmz&7;h&Zq3e>8`A4&Xq5Q#nyth-lIMQ@QT?0hQ zSR$OFD9K2yCfv8AUffp)N25|Kb}%=rz0Z4jeuLKK8tVNo4>8Nk60%UVvZS}@&V!}p zK2as9-BnbI>-Y1_tK9{~zbcA>A&j~uK2hGV!m-1cL22ZNwf>TxA)xPgyOB=gO$OECICSH7A;u)ITaTNF2!WW ze5uY2_2|T9zy}}!LIv^g@To+O0){fi2CusOJUZ?!4z9QTeZC*vr+)bZ=(zoU&}l5f z_x71Nbmnt*{XG|V%eeN^3F7C`=^`Er!4cZ=qj~NuE2%!zGRz3rfpC#b;o(TPh5G}4 zA;sZ6Q;l31#Z~7ulf-E4`mO=uXSBl4Ee%C(A8>drYjJ5-K}KgiLq5T6VZRzLa5lC- z$%#|tbf#xaOvHvQ*qwVLD~e}1osl9xuTW<)uDAD(0Un*`&uPn~t8$f6jCMlQzt8-h zGJpOWnfL$xmvmKo#i2EOGb`@l$>QksS^MpS z(>xbx=1m8W7#&c9XEmbGu`w9CZcd#s^7pk~L2M4CUKpxpTrLcg72U%{^Wo*ys`-hn za})Hb<+csp@`TL#UfiW==kO9E4wFh81%L}dlXVucGm;sGg9|=wG0Oc+#uQH|?L5C7 zS1-6l{zgd)voLbn&BJ^%OaV5R;FEs7Qz5Deis>Coq>+XI0@VT3I59O#at0uFX9Nw4 zDlZJT7%emala8{B8@3kUaeLv4@9iIe-&yC(2RItz5jXlRI?SUZ3=Bcw{tLkqgq;r? zu3!9ijE_MfbcIkO8o)w34Gf419)Z>+Hbp7-NWG9 zG04K);bH4XM8VIsiHtLVB-m3IJ86S!j)O=MkvrMN5-Uyh6x$1yIsrrc5M@>Ak4#8# zH~?_%!jUF$V#O|$f$f;K=otq6xQh!}uZ9+jn+$~@{GOjW#B9qA&9;e!x zJgD&^wP}~pd+yOUq&y}%Mlt*4_y_zk(o^Wtz2_I ziaRKE#^0X0ldC}Me+d-}E59dkZfFrQGF;@-;gvkn48>yCa@}Mgu_E3TVdPjGZbNF) zX;G?VzSP8%>+%@>bn={F*OUHkX$hJyWq}+?m9~$uXJ;j(isrqvn<8ti=%dhQ3rYdEdI_-dp&@Fjo`Q;UcMlkiIl{yYzOQ{GA|E)m^AV4(r#pwB=W6zwftl z^976x$K(XFdMHd&Wh|j*GuA_&lQY@D?^XDiiC#^`F}du%Xf;_(H`1z^t4MQP116o{ zxCaFokFm*&E>2BhS(V5O1k{kvmezo7D!l@-5C8{FW!uASkBZf zQ=cy%b}m$4<{xsLyp8g(E&owUi~LM`ei*hZEaN2#o_Px+p11ZC^sy?i_o_wT z0lfpJ)VDNTlPc}AlYiOKYa3d;M~f0tF0X^cYV-9XR|Mk@svfVW_|Fn|zTLdKDbr0` zh{c{Z)o2d&{{J@$I;Z0zw;fKRB?Qa z9~+#q4&_udX29KL*g&j3VWI!fUaNn0>JkuM?H+ljP;2tF*d?{=8!OHOt!yKbRqeHzQ)977j0I%5@ORZtDshydB^46D? z2INAY-XPwSuxTWyQ2+~T;e?OuK}mDZJhDxrVTV;1bv)NoDEHh+ST&3@Q0+u0wtu{P zjndG$g^?8B`Yj%ce$jH<3IAD-PNPy+9@8CPNg%XwdfCV!1q}5m zZZzm+Bu58*H$o@Dv1h3)?Y`s_u_Yy&)nAoCMAqcTzs7+lYwAt-lH^ND|BVh!5o)UD zR?8(zjHRVV^NF_Q-VQeLTgp4iP@88LRgw?r@rd_%yH8z46*{~}{}>~NcPKL=J|)2z zfeqGRU*R#zK*ZLV4O%VObo)V$NK+3Cgbm(A@RWJqssJ5{0R()3UXtD`)1bdKb*?Z` zYK`ojBG0xw=Jrc%@}80G>hR`w$pf;cqJcPP-5C9RJq*|*`VLG0NL^J;Eu>)`7%mj> zNNCh{Nnk3oPgz3RVE+dj%vjm5k%_;~J&SMa{r+>iRbMVnutyICL)nXJRYsZS(t0sD z%g`drjWknC|Gp;l<9Yjl0)a(g?=IZjmq7B**TO^^`S-6mTsM$_|*7M$kaXdw5x#yhM4rD{x7x@x)mike16>Z z^iD!S6PpK0@^M!b4O%-;YwGP7TE{`y;ug7m@b}-xB9kjA!LBBv4@ulZT8a@X`?#1k zcuB9y4l8=1EvbqX$dyOFRT7UupkC_{m9+h=Y6}k`z zOl_#rEb_A|noOJSNgv+ke8zqCW>#JMA9J@MAy;Zr#&2!~r{yO?d7&4A<=IvZTvfSF z054e*Rq!1VV-%*eZmTFyEpL|QeLdq2->&p*Mo`=lVZ(+9-@%w?MN?iqm7_{3hF<%& zD_1#Hly+~5 zs0Jujf8%_ykdoJ|dktfCA)~BOT)s%m3K9#o>(s;eu8C$X>@u6ck7&q|s>fu^#$7?N z+z92=keBP6B*ZO6F5LX7VJ(23b5+!2@V&#d%`za_n`6W04WGS)xX}7`}Uk=nc+9Owd%GlrQ$I1~L-rtbfiT^Fd_^O#IfWtc@ z`a6*LD8({GXI2Pj!KPSog(yt{1E(OCwfq`t0UeSGkwL%z{)chx z^(apjf>he~9q;;&lyti>lac)D+0Q3Rp(qzko37NDs91MqW`16Vz3?{<;MU75V-EQ>&+o^VP$v%~kqpzx^i%i(ZQS7Mp&Lgib0|#l^ry%jQ z!h5i4%>3W=2fEE63xOpo)!^2@Tz=;|5pLASc!o}`a6l$YhVd_gi)xG#JNh#bD~VPv zMW9sJTbgfcs@4d_UI%Zn)WtDMnW((=GqRg2u1YqU5!Lgb#MRkWUm@#d-@f#6fS#ok z)xC}C@di7z8l|%Mr^k&7hD)7Z*?v58z79*Cy$~2jD38NPr$jWQ+`apS9(9_dKJ6z} zHj!3y`XTI$G~j_&d2zi?@_}IlFZV(M9(d_jTxCN!h3j)^0c%XgS%=3k_C6?Sw;hq3 zC^=o}(QDOaTq4reR~)?>YKm}e8Tg-&9#VI$d)7s4A?bT&cE3rrZYzV^!=- zXB&i~pi%5maMkMkdP6RVCUG=P6h3?qKye9W3r>1coccwwlL~MDNHKY*dA|P4YrCn` zVKmCq{jkDySW9=0#-1;uK3RP&yV?Ma%$U%sxi<~Tf*W}T{r(I!HQlYm8uPnrCwWjb zmF|FO&8ZPE(`6_!Oa~%hGP6TGrXMW3u;`@TiOtVgf|Ag2bXj zU^kLEnIMVZ?j>`Qr4|FSMnHl{SZ_9M?iiS<|AC-c#lkIAfL$9PGiZl~!vDzhGY zlFf$q!jriHSD}S)|4`+b%X=AAy$`&iq@>f}8wVo$rVMZnSHKx+rf0WFg9xd>Ltf4g zF1p`xOv{Q>3^O%8s66OHTa~RQM}FFPmn8htch*VQyXxcm&vVq3`jC24?~yW3AL=CV zo7taNW}>>9#2ft3`)K@L_AGQ1_sYBAhgPA~jy0FbmR`((&PwmL?y>GmR^M~ft_w_V zcRm6!%!rN34@)F--%4AulnN>l>*O?ntaa^q5Or?OK2*s)I+O}c7>AA2$!GKy< zMvAiTgIiHFt(cA}>@Oh|v_O=|fs} zTgWMRr%0xui(70WfR#*vY5sgn@pD@m^#}dzxc4|<`=ADU4qH-gV(?_eT1sYEUSdJT z2Cze>H5$cPaP!LY{OY5Ji4P@buGj45KS1o(p(OP7vLxt99GGequU<5_yKlZ_6ql$~ z0Y?=GpEtr2WzUu>2KI!^OWsMWwei`9?O3G9cd$JCEh-WVwQXHF2OKFfS}_!{WjZW0 zQF$F@AS$IH_C}Gd-Z6KHf5j(HX1#A3e8$x2=%as){)%lqClqqcduG325!d6-Knd%P z)s)XR^hzaf4Y(v)^Vv~C(5p{oBRk+%+$)Mrf=|yxV#6+U2yz79n6~LOLNr#~$cK5N zp~kIz`3l|pwq@wl2EcZLAe+p@1DE^aw+0>Dw-pYM*fsI+4%Un<^{BBX zd{)0R0Td84iz9E(CJ$L6!v9&+eeT)dfsp*{*qg<7q{_aL$n7n>P208tRSPj5<_p5t zg-Ib&+eJjp3Y9RGp)+AHq|X#2>`0e+DgzAl;U!yk#l@3(W}9cnT6fk8tQsoKHccva z!CA!EPtt$UAv9Bl^@l zWC$<3aP_Mm$QZeHsfs#P00u0rQD;r{8Vy3^I3$%VLsOYNFaoZCnZk_9DkN zyLe)fao_07Pg?{AGVCD-5wEQ6fV8QjcdwU}x4oy{szW(Y{k%mX32V;W_{;P>38^OPX{XF>$#to4+k1^I=hWn#wU(`C zAi-u1G4^RLM~TAERHMtAvyLEdb|HqK9tvfpygI>mLa0yLQc9Tl1jf8fIF$K~^|Pm= zNURrg+@tFylswj4G9~BE)wpT5QlWGcAbUZeu=mszOGN{Z6_}tiN}fF$&HNY?r@9jZ zcY%|{VY9H@zkT^A+}$v{*fPsRy}!~r6~LQGsJA*zqQZN-(e3LEcnVxvOIWPOvjo)daa#g$?n>8WZPqb+O&X+)6P+iyhuBP;Qf zUkE}LVEp#9z@0V&fzkmR$)O*vwHd1f(=zodTjn;tSq4NFEb%rOYJPU|RmLh#@9ngF zi2ebPe2*(T1rT-q0nSQ|e3}q^l~*q~)A^3+x_9BG2NLcrPg`Va_7ZRDc)wtZYiVoYs4O1!lA2cRYv z{Ys{wp`ju7Eye9Gkg_>EBxJE+)4JLxs{+upVU2ZmP23Vb6`v!&2G+&4 zrq&r?mZM)TeW$=SSMB|Sq@3MdJ)aFZYckZuS-H5|;ljYfSOqU{roMbiVZB9u;485A zZ&#Sq9#V>vnj|jV|K69BV2pO9tb~pvqTMoxA|Ao=i%!a|%J?D0J*iK#z<0HNLvXHx zH}!R;g}&k&7rAqq1Ur1eRWpavz7e{eNKL+&fKSh_k0NzDDe|swroB% zXq!^}jSj~*1xlTwgfw!S-ceW=@Ia7bCy`S@IeEil%7s9Nmd$F7o$*w%v$NCd_7=7y zI*Z~=BBl;?=h~XErg*Jgar6)*jMjLjI~2>Yp#S{{Bez== zeT3C{dD6V=|J!$O&i%wa@FJZmDQ}VAhDA04>8R2ZEV?b2ov%DM7SjzbK2C(@=g=70{ztTTPQM;7 zl5}JMbPNdl0LkBy@4EQ`V~Aw3ARWF)5dC*T(h_8a=Ldz;eg2l4gI9b>q&k8x;_bpg z1TOydc_ab?9(x3`D9sWYGhf872r@~9AMfnO+}7P9$&+vN!_^UKTu%6DW!OH)WLFut zV+WU|)Z^VPsh8;5?`ux5cZjLlMrIMW%TVX>h=o>D7DzsF5fUwUpdh2}1$vUQoMv1FJb3=35t^EnvMO$`UNDNwKdm2b^pL zw7Hq$Z_pRhMIss_c!0io+!`I&U1x6ea2aNo(@yJ^8h_9>D)d_w9%eW4!?mXFHaBoZ zZs*L%DzSb}Dy5UAU+B3;ggV^~`dA;dzt_Xip2!+xoB)J=o`{HlmU(#fhZy)?IkD7P)+OvptEGth$&ng*Gy0v{f;p+zM4 zr*&#zNEc>|_GpP+G%DR^V7nY{HXIxu)?aBdWA@JJL&faM3;_r_gG_9cC0Nv<2jf!P zXpxG@m3aJ57YImX0EKkCLu7?WgqP=sA1nTF?*ib>Fm;8$vYVMrzip=*SDYBF3cHiL zIt&IT;qv6=U=GWYucM0NT!Ins8xp0Uraa{IedNtiVKd4dYxFrBYI{l&At+!#5$1+V zmXCWLt-uVHi!vG#8G7Lut>RW)P60&X4bI6<^Ttgmi zj~kTS2oiH96#WgqwCPi8%O+IPt3ctO0^^_m15BvqbxH1Yp7^nuv{ezb1kYaaHm&l) zy$MQu9x&9efc2?y7|ws()9h%moGH`476qu)9+Ca6Jl~ufO&d{mYkH+wtGJyzm23|K z;<>Mc^~dr(WH^^E@?9K0hfaxd;>-_qS{2<75d{JNT>;Gj}8&2J_%n zgHNHjLMqNJ{_hZvpJN#knlvNFh-5OWBa6iUR1#b+M}NvTY9spx$bL>S{OCoeOA2=G z!Y*A0?dWau+6lS30(X1`ccHG0e6tR%hQ#XcC{`(3lbUQDkW+IcH6VdfDgz){hI)xqvPlYE9d8tR5Uz>TJ+;dlTV*<4 z^*W83nT3(2B>eLM1`n<#`Gjl7s{_+tsB)CSGvOS*6x>PjU0_iqGXzv2K7wxigO{)S zG%(tJY5E_(?n?+5s+E+07QOh)nXCnV`qB<{*+LV!5nScIxOuu533_?&5hc+h&RN$k z``Uka*kwK#x!VsTv%~n!MaUQ-rM8$ob!01Szf;=ARz+*yxEf562Wp5w_M3C8>4W)Z zM$9_>)=Z0sb-EDJzPTEJHmEu>+89$R@8ttzD5n(9mHGfWP9YgrwI12l6#P0%X$KCk zwD#zIE_$8EKqN)J+J<@52-zC_p0*SB8|(8RT=q@xuyr&3#-SrG`5tQvQHgVdb=#0U zq{A=D#!)=4?^tf(#4}z1;@2JtrA*e&WZwq~h|duz)8P{*4tK$D-kH2hm!4e*zCleZ zthb#3Q`}(Y&)kW=&wGSoOpGeomtX&>482{WX=UX0q-5|fAeY;)hz3Sbgp)B`uV|*q z3&@%*A2fma-iAY#4!uSX4;jrJZ8MrRuSk!{k@IA9=592bQ!3vqq3}tzZ^kw@;pV2? z+qByf!ocKVx3^*zL$-)!`{894#^5OKLh;mS&rm_fW|!t3GcK!-z<(_Y8=gR;pZMip zqjr=!-2THtz2L##-Mtx4>XS{c1G93)w=Co`{r-R`qElxQJ$!hM@C3DCT9%p>N=*5n zmy~qmL?6T1c&3NRX}=C-etkZ#ozKw^&QBdVkzvr$(OMVtTtvuCQ_ZIevoZEU#SdJr zLdDTuWyRA?3dAy2TGa~=deYVn{xCk$c7j(s@FJ8fZqZJ!)r(-aPHh65iHcx}^DZ(% zwy&MCjniy{X;|1fijb@G_39RbfyvE=LYs}o^>r!DsrInkEM_;+x_6jgDjGVb+8o=7 zO@yJ@eI4kZ%q(X9%(GJUh$3F~XMsVVc3*aWH)Y}C6C&wG0DL6#fq1@&C=>TAyE0uo$r(b{sB9Nj!Q27Ua z$}5lw!3pBWqVweX6BH2EwYM<<0@L62VBCp!g;<)}t{2gCUwi{r&QFaf0|ORUJS z6U`fEaAwKJv5iL~gvBBjY0;$P(S-03*Xu@RB5R>|Bp@vCK7siV09fg7_o1J0dls}H zVD<@Zu6Pc$tV8XRFgO_|Dnr;mzz`MM?xO^4p=mhm>n4-F;0j#=dOBpH7FjkJfq)7N zj7O^yAFrgNs|sOPjMdSjtI@@+h}F^9)rHc~A&skZK$bY_5-{tsfyDZXpN?MD+>y(8 z2gB(!5l0@|)ksu>KJU}DQ1xCLiBW!4RQBvw5?f*9^+8sPH0@$z@CM^?pl9LhY(KIk zAOHx&dTt1=8UH?gqI*Z?H-21ITJrp?SkI)Cd01w{kx$GT868a)k3<>;g)-1k`F>$z zgF)Cq^7Th}`s|8|SQ!qNcM@7m{{ZoOlu!hkSTH@Iu}{`ecNHYdO9w#UQ=6pp8ejPh z;pB^qZy5$f`EgFzk{>eEOpASlf7=dUJ z{$u;KkGXlddws0@;5H}(#e(YZ*>uF^^SS_#dc1AeL``0z7Ag{vI^zVc#oF7_hES!p zk0r{XVs|XIBO&)ut&%Aq79f!qvDDv6SLwGY;TcIgZ{A5LRBm1qA=4BO5{=6-be(U+ z!vaWy10EGxUc4Ucw*=CBik6(IvLe{CT|_uX1WZG>L!nmg^!FZv4}kiffN$xP{7&Hu zJ}U%>!!bafwRvOiCxXSRJ64_k9Ws^Co#u5-Q#+UqW#f>vkbPmfm~~xNGD3>9Ps1n* z5ink~=AA$PQ%Bb+_LXh8>iTsn-9`HMiLEV5hRg2X3!KcjxGZP-#R1*wM;-O+ zS8LvJj9JKa2wS~InC}T2mVActO-^IrVnCW+!*4zwGVqyYZlRpcnS1EDFQ-6B1S~nr zZDAXfG0_W3BjmVD^P7{3!#iL9l)~{O|2JiMqz}6>=*=cAty#zVo+j+UVxyF$mFk#n zB0f1}y)!6~m^z@vDA(zEmVQf#du8o1B{1Y;@NHYjC-@vY7?UmDS3AD zY}LQKs8H75w=xHbtQF#7&u9`15p+@ZK{f!h9BNGNUO0D}L^{-2`mMS6OOPL?fD<3~ zO|TmivAP^nS`Vg+JtZOD6=509qZMl(wV1A}-413rtoE>n5}!4%--a|>%?JN*gmFwS3$%%{lJ4nwgy}^d zkn2n6^FwLwAFVHcIe1FH!**@wC^&g<7wVVoymglSp}6k*PDT>H)dqT}G7PN82A^F* zf~}K}agO_)GkvsN&igxEWM9&W+0n!=RL7M8$6B{hPx+auei5g!&Ys9-IMEs_$AW*3 z2JfdZ7(+`zN!%S48@@-?pz8aRQk5JnFZ8?&c=g z(#ZbU#0Z!zRQ>ciD@oQbs3NA%@v^R$F!-N#7XD*eaWgH17i z%1#H6h3Y4jnw4ak*bfjecyM%?=#PrGpYBhNR$QbN3(3By6|w1(LNE42@%L2H6iP6e zktoxGFA+pvb#$~3pDri<0lJY!!o>H&(7qyE^Q&Bum0u@VJr2quOI<&{`A0Eo}e=MaM_rNB6nCXW9(tngtdsY`*VuYf?r0n|4vyO#%BX6?lajLLNB8DtCe+$hqV&%^WWcf841qVjE|dFO7CB5PthkHsj$mZ)(9SZBQiC~+1N6?);futK_r_ND^PH~rT{Z>s$?{zZ z{??%STB3cv6TbCZHs6iN=Mm6&j>bipNziG;E5 zX4#6nzWGG>CLg)+@1y-=FM^^z1TRZ;;taKhd{rHA4Idp{X>c5L&k(v}?WWha||5KRLfKn&QlS zd=yz*%+rcTM?1$3ed%sM_&b03weD=#tH+4mC7WYJJu}BtC{4473k#XaSH?I!CpRbE zxDWjS%M>=nQCEjo$*wY90ktyB`(>|N3_%(>t%b#gRK zE-Gv4B0MKpJx3u*xf~kstcpnJX9To~vqrK7-B+2$56_|QFyfxKAgh0t0(U5i>N;pD z79g1ANBc?q8Ezg7NE@N$JSZe7JeHDa{b0OlCRgO9!$wL(eU3(uZs5;5hVF7v$pX$~ zMJ$S(SQ@4I!KG{5o2+%@CeuRZ$Yx8?T9UDn&ep?HZd0j0PqjT)`qviDm!k_rCKm=x z)!-6&Fl$8r%1y@x_niJ6w(RWeD1I!*KSPx(Xs}%B83OB0FnOKc9czCV2v=}z2-PjC zLzm{?$|qK5ki(||9&HQrV!l-#T+RL2FzYX^R!>lw?4IsGo8|I2O@63BXx`e&2Gh5L ze+^fT1o zRHVxui(%3+zk#-+5i6|*6jFb#Ri`Kn5mrl0Y_LPkdH(5+Jim8Kn(oY%% zZ}tn&RfOx~>^4xUR0X4%3yN#QJ< z|J2cyT1t1MIRdLhQKsm@4Q5N%>Ug-O8wwc>x;F%Y<+OL8kU z$&H?HnIg}wbi{}`)QLtgmr`2WjA$tYdzNYJFn!PG3@Qy7FPDuD+STsIn&}yVndF;W zN{5u`Dw+^|hNRJ(8yxG~0Ff?39~POGVVYs{u7%@rcj9L%F#=me zS%#vh+PwJ7S3yxbS@GM~t|eETfmPEByJLsOPviMoj#lTKGoO_Oy^Lyp zxr`K&XJM0MFlT4pk28x5DSkUjeL!NlV4)qq3u;2Gbe?P#e!lm?c#&%OdQyJ2tbwvX z`eT9HgW|+C-)@R~xQ!`ujb}-do<9X3aw6(z<+Wr{&op^&q88 zUQaZ{cKwczs!loU-E4K}I!$t> zT!hufc>!MJ{1!ZTkV}H+P*_FUjz)kZt!t18Cn5mO(PLFdg4`^O# zyR5Y03n^uob<2Nv!8e0W;>?SN>#ZOE&c7tcxvPP}-Gb$uB)YiPWsXSvJF-)+Bn$QAmgLeD#lugyz9 z=OIDJo*{CHH^+5)YM7}g+EPe>>L}T-wCOtX63}i zuyDrEjDq=`$rafKCEfW`v9(6b(bUyORMHX2v~8^A{Jg`EbL?yS;DMzzyk3(-Rjx6z z00$EXRn?EHG}`w}E0-1c5;RP0k2w`qvqkoOre{OHza*a$TX~`RaOLpR!8~|!!u7~u zy&;D>Wvq9cA*y4W*=GVfm1$A#el#)&ydbwEXwK*14q3!YjAILiq5ZNI)uEzGid#~e4gW0JF8nd_?aZ2>S&@8Fi0g)WZVY9vQrz(;-AY~ zbs^pyfXW3<$p~0$Ff;rEB(kgiP{vQr?NA&1%3P!|X31%EXIn;PjIfl=tHWcntJ6xUhY!B`;`YJuFk0lp>gx~#|pP134 z0EY#9v&Zs%4F0Ga?5gEgNTAE;uq`q2-DXDH*dR0U`+|-?S1p2Pep|~|9bE0L}nxwPCZVK8vEO- z?v#(ac6$_}p}cOgB&H+ge+pS!Pt9Hfl?Hb#{4LGr1=aggvh1H7I(%~Y6`E2R1}W2l z##{^80&SB*zorlAO1qrBC@ZtyhMSD|*d|A8Xj-yJK4G%hN}U#^)#&)38m>+|OzYfK zk>9WP$33!Lun4)Pc^{qisJhe(Kq&is&W4hSh(Uu&Xwx$7@f*J6S~aGO4PC?bm!QpO zrKq^E$yT0=F3gWgB@uI~op~nT-sGL%>fi5wY?ZM~aqZM5q&Gyy$1~LuQOZeLgiDr) zEP{%w^4}AOLKK-SNx`-6w~A7jQ%5K72DqlF!63Pq!-=D*n=_Eoyd5$2$(ng zYAWm_AI5d{_yLj{_Gs>KqG;&v$i6RIE#^oKnQA@Pr)(R$xsz4KVVYQ}kLHIE==1X9 z@n|nQZ9~mQ3avBVcLHZMGHYi!Uh*TZ#Hy5)b$dX~CblLVb?%%@W`rjCQR>MeX{%me zAjp|wwd)7CR)I#ex^}?MmZQeXA-66=+U9KF6?l}r5-IEAmx93Za-9*JW>eI<7+c4J;3#J$sTf-)fvEHdN49i#idcm zHk5JVk#sPamgcQY;CShS(Vxw&pK*`;{{eoc{{#GNf9eXaulz*+>ATwB*2c=?Z#Go3 zlIY(J7U4IoX$}>00QS*gk?_-A_@a8mt-2-OnjVUMnXAf(SVEws_D`9 z7ZUl+tVGeVOt!i~mPDXlwW!J)wgfT8g`I665W3Y#pRM!WK8Idm2Hk-SY+DO4r(o~IpR3fDZ+It zxjr9aY$%I5BsHdsvoiWqRxtrCaqvB&)ZRy7;3LD7fJ{-&ECo!UgbG{7J8&p9%}oqy>EW;`co$Ebl%wbdsdA9-Jgh;zG6QcM2LUc?Tp>um~y#@YeXww)#EnaVr> zO#=2QgFfDg7jJu5Ct6=7=Lc* z^_t||V!jyPRWe7$eN>}p(_qaDEwQGgter4O`a)2U-)(%%+tT#GUUIZ#B9W9didl{c zUr}kiC+~kvF4GN>2^ zK>GUqEgk*Fg!k+IpT}ZQ{O7|Vq;K&l#a!Xs4mJ`>t(O+2@W>z-m zac-r04vXH{CD|>yA479=(Gj9HNQoQCI;DjlW2`#L_9xjkm~~oR|5_&pojtH*s09m6 zRmxL{m1!<0mSF}kSsY;jsBACtA;awjFS4(>iBuS=m+3QR)%@y1v&(W0zZOw!CBT9l zm#Z$YbRo>LSVZ)|pyy)fY~v?YPhZMgw3~R)M;GU(9|P0RfoHusu|I3D6TgjnWr10P z2s4Q!Mj|AG;%U;a5zaT?Wcm@1>ec@qzW6p_7-lF2nbQe4-L8ZS{}BO%vY2S~u;a9m z{QUltF@tav2C?#A)CY*47@gruAv8O$nlA5vt1e%XwcUl{EIrN}j$Jx5igiOKF>I@_ z_h^K`K(2@0as#=uqdWC{1H8k(AJQHef%+~TDe>IS6F+OmjsCI|weEcV73vAA{{X9D z$_5V))^B1?fM8G4ESyX=RaUId3Ok^1clX~4p?Uw0OTN?7o_~8~`F{Z05Y;V;i_PCV zV!;_58cD@6E1aOm0cooH&gs+Kc0?8Z`{D45TSM%+p$yR^MtTsziknTlw0f&`VfrEC z_7b%ndRG(XVWoh5CU6MG5s=8B>$2HNsCQ}7+-Slav^W-I@T|vN_m%5vH8#PvjYz*^ z0g-1QAUY4u&Xc)J$#UObgV~s+5cObl={Oy%)scV5H_f_Spe5U+f5?G3Ve{)`nkJdt zF##6zIM*qw*nE~&Lg&f6K>+$wtj%})sQ2GkS$^2^(%sk4-2VgJKd4jX4K7#W(tvE4 zWZ7i_$cl<8{Lqh2dxZ)KZ&IiAnGoQjzYLxM7lmlYE*G z-PJkHqe!SES6DQu9U6ByrhFb(SyBQK#wlLr6vgOUlT&Q7*6!-+QULQM1!3`eOmtu*Z=QP$do-!Nc|_-xs;_->VC~C zZO&SK@t3L>p^JNcN>e@jkh(|pb4Re+1VVx$&IN)(ReIV7H2kakQMH*@4%Ud9C`5P7 zp@%$74Qe?o3-nYsIG}nlVg3hbG_w7wmfgqdQ6RuM$l!v@f_Q(CGv_5*D0Fl-*Y_;T zDF$Vd+iP%_$h>lM?m*Ach2v}v4k6m1Y2n-XnIu(mbRKe1?Qf>QGa{-)j8#NX~$ck3iU{{B$NMTW%|k@W340uubFZezAp?d`UxwE+$z*hJnt(877;x zVzINk?S_4GpHK=nUiji=0NkkL_zye$rPXj+c?@yVZNNEU`of_>>r`-ZPw|0WSTzRY z2+wSk7(u=!I7~rz%=lZ{w5^O&8OuZS)YRAsRiy1^9pKdlhqbWwuHEow*lP$Q@j&Z7 zE~91HG%!<(8>N87Zwb%Xow0m{M*_`uyz`w~J65qf7jJy$KLB9D)nZ`X`_?O`n?ulJ zNVL^4se~o2AU&iXG;(*ep;A|tSk@hhE~Rsg!=@aJ<)2b!F0Zfmx)tI60I<1ozF4fA zF=5W}W(M9E&SxFz%L$NGv&%{lo@_V#uX6Df_Vp*z%L+iM(w*(VusN7^G%(WpBLvh%I~gy#TB@KU3akLD&KSk#Fk!gs&2}F{L;vAJ;wVxr$o7Dxj2n#yzZ6c_u z`L)uMB;yg8@osQ&UIU21+CQUXK9YZ0!tE}F4&pxg*;F`UHkH|KLq_$9fXw{r-a6{2 zv}|QwC#5(l?<1dFXpg?x-{xC|L#&L4m6EDF2x4O;qNWP?rnB_WXtXnT5iPD{75?Y< zuYSICe@DZcb;6IW&SSumqrcsr7Rt!Wi*M%0Xd5z-B<%=^vM_39(uzPF+91TM0%;y+ zm`WWMsSzhOWm8p4Bs-+n7Py1ndKs{w_E~AoD+?uF6bonZ_KEoekwN>`+-N?5_V(Q8@Y!@H>s_K{|dxH12>GLtO`396&! zP;x`mz))29)}?NnzO%RB0Ow=@Sb? zf~*)eFL-d>#YpNQP53JJEvY&!!QG_NJu!6z68I-KTCxn(+5X;@oOoktIyot~ZsYcl zL(|8%K{ETQ%sWv_RG1>XB5_%F4~lOmwDcI3Vf;c z{)t_~MfNbsH>*a;#7{e-Hb!>dt{6o_y!Fv8YZ;R;rL;Aq=7_0z2PLI-6AF07~YDm}h&g&r$ljhL&&ed~~a(nDguekrvRw5 zK4x5>R@C4l+c2tC=C)@zJg^kJcWk`{LnrDnNN~1lY`CHh!OZB^NO+&WE!*?bEV&r{ zE=E<-iKzj*!d5sMfS4TqS?UakAIF3_q=<9GviNmni}HpTCw~2;v%+VK>_LR0(nd+6 z&?rQcg#m(b(7xhoo16x>7;+ehrCLmZ+CDM{+LcV;VH&^({1tO!!W`P2^J<>4%<>5s z{U_Nn+w&4ptmO8nCejuvy~jl3bY*5|=WdjZ7b$@@?7RA`P9T?Vro582Yhi9+0H+&%%4i$IEchyG_R-k^9 zR%8scKl2$1K^3r)|A3FPF%wGaoD@bNc$vX>c)?3*T)EBfU{kGf@H~qoWIs?kpPiv& zRCSpmgxWsOW!Fcb?=rx;wvwKv#mx5Ed@#N1MQ>8`25!;ZSElFQt-{e=Dc!mZr*YUc zOM2iAb7FRc7JlJ2lq}@88FSK?kk^d(%r%m-Os9gWg-2_~^2re+D9G^fulwi&7eDk? zTueZE`j9nf)mn`d$5mv5^X-GTWc!qhjkPh5WDpT5|uW(Rwa8UNPpg@%tI(tK20<{2I0vi3_0tLO|a)tH1fM-W5|9zKp z8Q7NXuP_B_zGz1rAEKn@`Zg{{<58A9t$O{U(dr`@h(}}f%#|vXLP7Ffbo#PfZ zY4+Yl`*~OZDCRA{Bu-&#sxo53P!tx9ibtyGb==<22OWZI_nI9k;mMHK7--$i??X)m z>DMB0j|CE;L@!EJdQd`7-DqoTzM0V!c*7|H{*o<+pT?$d_16`-S4By?_+=~{D~@mY z3Gqaw_XzV+SF1@ZyqeagDeuMrgJFt*#aM+Em4l3!7!SY?oeY-xMDcAWdg#fN?%;u& zsabHwKmF|Cnb<^J)+9Y8rQMAwc@zfqDla24xS=O929gEyE5026uUpwxQl-9A={*(f zF@QN5yUN`CQ4nhzYeX{#d!|BF68hxvv7$mFsU7P&c25U4)vOBpOk^rnEIv39J6v364ub8yRfsuR!ladN`@VaW$sVNw4 ze)f;JPpYk-W8gV5Xyo)ABS7EsQ8HP;%4#h5s~^!hFC-w6w$lN0Vfj9;#;ki7^N;}X zAi`=?+-OW(5JkUF6I5EAfXz$+HZ_TBF1m)zf84Sxrza2c?+H<8t z_JR<4aL|STr3%eX7cz~<*GFSJH1Gsk>_9?JZ!y}4-QKA-Zn39SV@ThG{N`rjtS9OO z+&gu#MDO?TrI6sC$5(UY|7f7G{{iH+FUvb-#9XOt z#>s)pyQyhkCPc`@le+552kF}CAVcn3KrJKl*Z35d#8|z+c!1>3Y4i=BUGbQi#jHtC zB4NVUtulhiHX>C9P_2kpV%hJ(4ZC{piiqH22|0MoKfo04xU^-ehSd4xPLtBeTE;ZC z&GsUX53v}doTjQ_<%Clqlbz%iF*!)7DIt5hJM(r z^~ZIx&*G=-b1k^&z~2PaOPX}vhayuchy|j^;p15dsib1>1Dd_iL$m+aNV=ghmbtpx zaWNY5x`-&AY>Stj=8RaN%>71j$DzG1P}S&C=yzQ+jS5^`J!%fji@{~Z!NKnPkSx1- z08a{5pCwMKz9kmodY8_#-q>%)FjL)l4=JZhSqND5KQwOlDo3r8aM+H=(-DQd&`5;> z`35Y&Gt66z?%B7W(yGWCAXG|+qb>dWzvg6<$W9}WByPb;l<9k_h)DC2{*B>ZxOULQ z(%`~VE*57ZRg3>$0LlY3{3c3tTvTbA&w*iaTQ?+)e3yG)mva0?#$1OIicXuZil!)- z%J=^O`2PT7^6mBS8O?7*$Fu^cpK=XKiHM%gPe7p`eHjBcMsrSxoUOvb11R@M zuqtiTQ`Qwl%7hY|*#{AP&M)m!_aAE}$ZB?7wK=QNQJBnF#}~{%Mpn;j2DQL#8#Y!l z$|SqUg2$?=W)9;Iq-*-=ZoSzxmr1gJwGVe>TINc_8oKFhzRj|6LU9YK*6V!XWKza; z7geTB-5FNR5OX}?;aH&QGk>Vx3{j=_$;oy}{{XkPV_GlN70*aZB)8P&*A-(>q}b`G zSIqtvjArIdx)LWvy6Y>-NV!!)iA(L9%evvXYGYjYTK2F}qp{YSwT{JejCPqDmE0;; z7IJk-%H*C8D$UWT z&P%_`OS{Wckmt0%k4L!J{My^}hW9bQAb%Ki%jTw^p8MRtw|=XoJ2~#*w{LGf-QH0+ z?c>LueH|Om^E}@U{{T0;%=_Kl{{Rco_j(W!67m8a#pAdHLx2+V$aesbhu`ZHFMG&! zszseUV}fDSEQn)A(+)CXNG$3;M;RxO;bmn4?;3)K^@i}LO!6#4$#O#?0oznU;ha(? zUka7XD8#js4>`+8W5k;hLin9k{N-G3?kdfl99ErC0~wR3u`GOnOw6SK!6A-wmMt;d z$=*`da%04G!IFoOaErH@VJ&)IuTmj}pdZAb!{Y~0^I@|LsoSJ;9t6dlgzZ#`50<)x=uCX=g#Q4C3TE`k zsnL$r*0eQtVcUZ+zBWGsc~ZoaGx~WwX8;46L3103R0c zq^w@BEq*RlB$L)Ssm0MJboA^l@(>uV(1T6+f%>xEbM}H)!l{NWCRyJRqn%U8H)7Y+ zGc9P4%jspd4B{HvMVPv&9_ar7A~AB@6&OP*mgtOLdk#6o%SW_7} z2MG`gtH Date: Thu, 6 Jun 2024 17:45:09 +0530 Subject: [PATCH 054/151] HPCC-32002 Create documentation for bundleTest-thor GitHub Action Initial check-in of draft documentation Signed-off-by: Charan-Sharan --- .github/workflows/bundleTest-thor.md | 358 +++++++++++++++++++++++++++ 1 file changed, 358 insertions(+) create mode 100644 .github/workflows/bundleTest-thor.md diff --git a/.github/workflows/bundleTest-thor.md b/.github/workflows/bundleTest-thor.md new file mode 100644 index 00000000000..8d82b12660e --- /dev/null +++ b/.github/workflows/bundleTest-thor.md @@ -0,0 +1,358 @@ +# Bundle Testing on thor +## Why do we need Bundle testing? +Bundle testing on Thor involves regression testing of machine learning bundles. This process is crucial for assessing the performance of Thor. It's a highly sensitive test, it identifies even minor changes or issues within Thor, ensuring that any potential problems are caught early. + +## How and when is it triggered? +The [bundleTest-thor.yml](https://github.com/hpcc-systems/HPCC-Platform/blob/master/.github/workflows/bundleTest-thor.yml) is triggered by a workflow call. This call occurs within the [build-vcpkg.yml](https://github.com/hpcc-systems/HPCC-Platform/blob/master/.github/workflows/build-vcpkg.yml). + +```Yaml +on: + workflow_call: +``` +The [build-vcpkg.yml](https://github.com/hpcc-systems/HPCC-Platform/blob/master/.github/workflows/build-vcpkg.yml) makes a workflow call to [bundleTest-thor.yml](https://github.com/hpcc-systems/HPCC-Platform/blob/master/.github/workflows/bundleTest-thor.yml) through `uses: ./.github/workflows/bundleTest-thor.yml` when a pull request is made. + + +```yaml +test-bundles-on-thor-ubuntu-22_04: +if: ${{ contains('pull_request,push', github.event_name) }} +needs: build-docker-ubuntu-22_04 +uses: ./.github/workflows/bundleTest-thor.yml +with: + os: ubuntu-22.04 + asset-name: 'docker-ubuntu-22_04' + generate-zap: "" +secrets: inherit +``` +The HPCC platform needs to be built on the latest commit and the artifact for installation should be available before making a workflow call to the [bundleTest-thor.yml](https://github.com/hpcc-systems/HPCC-Platform/blob/master/.github/workflows/bundleTest-thor.yml). To achieve this we add `needs: build-docker-ubuntu-22_04` to the workflow call step. + +## Passing inputs to the workflow + +We can pass inputs to the [bundleTest-thor.yml](https://github.com/hpcc-systems/HPCC-Platform/blob/master/.github/workflows/bundleTest-thor.yml)(called workflow) from the [build-vcpkg.yml](https://github.com/hpcc-systems/HPCC-Platform/blob/master/.github/workflows/build-vcpkg.yml)(caller workflow) using `inputs:` +```yaml +on: + workflow_call: + inputs: + os: + type: string + description: 'Operating System' + required: false + default: 'ubuntu-22.04' + asset-name: + type: string + description: 'Asset Name' + required: false + default: 'build-docker-package' + dependencies: + type: string + description: 'Dependencies' + required: false + default: 'bison flex build-essential binutils-dev curl lsb-release libcppunit-dev python3-dev default-jdk r-base-dev r-cran-rcpp r-cran-rinside r-cran-inline pkg-config libtool autotools-dev automake git cmake xmlstarlet' + get-stat: + type: boolean + description: 'Run Query stat' + required: false + default: false + generate-zap: + type: string + description: 'Generate ZAP files' + required: false + default: '' + test-core-file-generation: + type: boolean + description: 'Test core file generation' + required: false + default: false +``` +- **os** : Specifies the desired operating system of the runner machine using `runs-on: ${{ inputs.os }}`, with the default being Ubuntu-22.04. +- **asset-name** : It specifies the name of the built artifact of the HPCC Platform. +- **dependencies** : Lists the required dependencies to install and start the platform. +- **get-stat** : This is boolean input that decides whether the QueryStat2.py step should be executed or not. The default value is false. +- **generate-zap** : Indicates if ZAP reports are needed for specific test cases. The default is an empty string.You can either pass the name of the test file with .ecl extension or * at the end. The later is used to refer the workunit name. Eg. "KMeansValidateOBT-240531-133100" can be refered as "KMeansValidateOBT*" +Example: + Caller Workflow: [build-vcpkg.yml](https://github.com/hpcc-systems/HPCC-Platform/blob/master/.github/workflows/build-vcpkg.yml) + ```yaml + test-bundles-on-thor-ubuntu-22_04: + if: ${{ contains('pull_request,push', github.event_name) }} + needs: build-docker-ubuntu-22_04 + uses: ./.github/workflows/bundleTest-thor.yml + with: + os: ubuntu-22.04 + asset-name: 'docker-ubuntu-22_04' + generate-zap: "'KMeansValidateOBT.ecl SVTest*'" + secrets: inherit + ``` + **NOTE:** The double quotes are required if * notation is used to specify the name of the test. It makes sure that string function properly during the workflow run. +- **test-core-file-generation** :A boolean input that determines whether the core file generation test step should be executed. It helps us to know whether the core handler is working fine as expected when a core file is generated during the tests. + +## Environmental Variables: +```yaml +env: + ML_SUPPRESS_WARNING_FILES: "RegressionTestModified.ecl ClassificationTestModified.ecl" + ML_EXCLUDE_FILES: "--ef ClassicTestModified.ecl,SVCTest.ecl,ClassificationTestModified.ecl" + BUNDLES_TO_TEST: "ML_Core PBblas GLM GNN DBSCAN LearningTrees TextVectors KMeans SupportVectorMachines LinearRegression LogisticRegression" + uploadArtifact: false +``` +- `ML_SUPPRESS_WARNING_FILES:`Specifies the files that require a warning suppression parameter injection into the ECL code before they are executed. +- `ML_EXCLUDE_FILES:` The files specified here are excluded during the run. +- `BUNDLES_TO_TEST:` Lists the bundles to test. +- `uploadArtifact:` Determines whether the logs Artifact should be uploaded or not. +## Steps Involved: +The steps in the workflow run on the specified operating system, with Ubuntu-22.04 as the default. +- **Download Package** +This step enables us to download the ready-to-install HPCC Platform's artifact built on the latest commit. + ```yaml + - name: Download Package + uses: actions/download-artifact@v3 + with: + name: ${{ inputs.asset-name }} + path: ${{ inputs.asset-name }} + ``` +- **Install Dependencies** +Install the dependencies that are necessary for the platform to install and start sucessfully. + ```yaml + - name: Install Dependencies + shell: "bash" + run: | + sudo apt-get update + sudo apt-get install -y \ + git \ + wget \ + net-tools \ + tzdata \ + unzip \ + xvfb \ + libxi6 \ + default-jdk \ + gdb \ + ${{ inputs.dependencies }} + ``` +- **Install Package** +Install the HPCC Platform from the downloaded artifact, set permissions and configure the Thor engine to use 2 slaves. + ```yaml + - name: Install Package + shell: "bash" + run: | + sudo apt-get install -y -f ./${{ inputs.asset-name }}/*.deb + sudo chown -R $USER:$USER /opt/HPCCSystems + sudo xmlstarlet ed -L -u 'Environment/Software/ThorCluster/@slavesPerNode' -v 2 -u 'Environment/Software/ThorCluster/@channelsPerSlave' -v 1 /etc/HPCCSystems/environment.xml + ``` +- **Install ML Dependencies** +Install the necessary Machine learning library dependecies. + ```yaml + - name: Install ML Dependencies + shell: "bash" + run: | + sudo apt install libsvm-dev libsvm-tools + sudo pip install tensorflow numpy keras + ``` +- **Start HPCC-Platform** +Setup the core generation. `ulimit -c 100` sets the maximum size of core files that can be generated by a process when it crashes. `echo 'core_%e.%p' | sudo tee /proc/sys/kernel/core_pattern` sets the pattern for the filenames of core dumps generated by the system. Where `%e` and `%p` is replaced by the filename and the process ID (PID) of the crashing process. Set the LANG and update the system locale settings. And start the HPCC Platform. + ```yaml + - name: Start HPCC-Platform + shell: "bash" + run: | + ulimit -c 100 + echo 'core_%e.%p' | sudo tee /proc/sys/kernel/core_pattern + export LANG="en_US.UTF-8" + sudo update-locale + sudo /etc/init.d/hpcc-init start + ``` +- **Core generation test** +This step is used to check whether the core handler is working fine as expected when a core file is generated during the tests. Here we force generate a core dump file by running an ECL script (crash.ecl) on hthor that sends a signal to terminate the process. This is an optional step. This step runs only when it is specified in the inputs via workflow call. + ```yaml + - name: Core generation test + if: ${{ inputs.test-core-file-generation }} + shell: "bash" + run: | + echo """ + boolean seg() := beginc++ #option action + #include + #body + raise(SIGABRT); + return false; + endc++; + output(seg()); """ > crash.ecl + + ecl run -t hthor crash.ecl + continue-on-error: true + ``` +- **Get test from GitHub** +Install the ECL bundles specified in ` BUNDLES_TO_TEST` from GitHub. + + ```yaml + - name: Get test from Github + shell: "bash" + run: | + IFS=' ' read -a BUNDLES_TO_TEST <<< $BUNDLES_TO_TEST + BUNDLES_COUNT=${#BUNDLES_TO_TEST[@]} + for ((i=0; i<$BUNDLES_COUNT; i++)) + do + BUNDLE_NAME=${BUNDLES_TO_TEST[i]} + BUNDLE_REPO="https://github.com/hpcc-systems/${BUNDLES_TO_TEST[i]}.git" + INSTALL_CMD="ecl bundle install --update --force ${BUNDLE_REPO}" + echo "Bundle Name : ${BUNDLE_NAME}" + echo "Bundle Repo : ${BUNDLE_REPO}" + tryCountMax=5 + tryCount=$tryCountMax + tryDelay=1m + + while true + do + cRes=$( ${INSTALL_CMD} 2>&1 ) + retCode=$? + if [[ $retCode -ne 0 ]] + then + tryCount=$(( $tryCount-1 )) + + if [[ $tryCount -ne 0 ]] + then + sleep ${tryDelay} + continue + else + echo "Install $BUNDLE_NAME bundle was failed after ${tryCountMax} attempts. Result is: '${cRes}'" >> /home/runner/HPCCSystems-regression/log/Failed_bundle_install.summary + echo "uploadArtifact=true" >> $GITHUB_ENV + break; + fi + else + echo "Install $BUNDLE_NAME bundle was success." + BUNDLE_VERSION=$( echo "${cRes}" | egrep "^$BUNDLE_NAME" | awk '{ print $2 }' ) + echo "Version: $BUNDLE_VERSION" + break + fi + done + done + ``` +- **Run Tests** +The bundle testing process is initiated and the logs are processed through the `ProcessLog()` function to identify any failures. The `ProcessLog` takes the bundle name and the target cluster as the input. It checks the log file and greps the number of test cases that failed. If it is non-zero then `Failed_test.summary` is updated and `uploadArtifact` is set to true. More information about regression testing can be found [here](https://github.com/hpcc-systems/HPCC-Platform/blob/master/testing/regress/README.rst). + ```yaml + - name: Run Tests + id: run + shell: "bash" + working-directory: /home/runner/.HPCCSystems/bundles/_versions/ + run: | + ProcessLog() + { + BUNDLE=$1 + TARGET=$2 + logfilename=$( ls -clr /home/runner/HPCCSystems-regression/log/thor.*.log | head -1 | awk '{ print $9 }' ) + failed=$(cat ${logfilename} | sed -n "s/^[[:space:]]*Failure:[[:space:]]*\([0-9]*\)[[:space:]]*$/\1/p") + + if [[ "$failed" -ne 0 ]] + then + echo "Bundle : ${BUNDLE}" >> /home/runner/HPCCSystems-regression/log/Failed_test.summary + cat ${logfilename} >> /home/runner/HPCCSystems-regression/log/Failed_test.summary + echo "uploadArtifact=true" >> $GITHUB_ENV + fi + # Rename result log file to name of the bundle + logname=$(basename $logfilename) + bundlelogfilename=${logname//$TARGET/$BUNDLE} + printf "%s, %s\n" "$logname" "$bundlelogfilename" + mv -v $logfilename /home/runner/HPCCSystems-regression/log/ml-$bundlelogfilename + } + IFS=' ' read -a BUNDLES_TO_TEST <<< $BUNDLES_TO_TEST + while read bundle + do + bundleRunPath=${bundle%/ecl} # remove '/ecl' from the end of the $bundle + bundlePath=${bundleRunPath%/OBTTests}; # remove '/OBTTests' from the end of the $bundleRunPath if exists + bundleName=${bundlePath%/test} # remove '/test' from the end of the $bundlePath if exists + bundleName=$(basename $bundleName ) # remove path from $bundleName + + if [[ "$bundle" =~ "LearningTrees" ]] + then + # add a warning supression parameter in the file + for file in $ML_SUPPRESS_WARNING_FILES + do + if [[ $( egrep -c '#ONWARNING\(30004' $bundle/$file ) -eq 0 ]] + then + pushd $bundle + cp -fv $file $file-back + # Insert a comment and the "#ONWARNING" after the Copyright header + sed -i '/## \*\//a \\n// Patched by the bundleTest on '"$( date '+%Y.%m.%d %H:%M:%S')"' \n#ONWARNING(30004, ignore); // Do not report execute time skew warning' $file + popd + fi + done + fi + if [[ ! "${BUNDLES_TO_TEST[*]}" =~ "$bundleName" ]] + then + continue + fi + pushd $bundleRunPath + /opt/HPCCSystems/testing/regress/ecl-test run -t thor --config /opt/HPCCSystems/testing/regress/ecl-test.json --timeout 3600 -fthorConnectTimeout=3600 --pq 1 $ML_EXCLUDE_FILES + retCode=$( echo $? ) + if [ ${retCode} -eq 0 ] + then + ProcessLog "$bundleName" "thor" + fi + popd + done< <(find . -iname 'ecl' -type d | sort ) + ``` +- **Generate ZAP files** +ZAP report files are generated for specified files if mentioned in the input. If none are mentioned, this step is skipped. + + ```yaml + - name: Generate ZAP files + if: ${{ ! inputs.generate-zap == '' }} + run: | + IFS=' ' read -a ML_GENERATE_ZAP_FOR <<< ${{ inputs.generate-zap }} + if [ ${#ML_GENERATE_ZAP_FOR[@]} -ne 0 ] + then + for test in ${ML_GENERATE_ZAP_FOR[*]} + do + test=${test/.ecl/*} + wuid=$(ecl getwuid -n $test --limit 1) + if [[ -n $wuid ]] + then + ecl zapgen $wuid --path /home/runner/HPCCSystems-regression/zap --inc-thor-slave-logs + echo "testName : ${test} wuid : ${wuid}" >> zap.summary + cp zap.summary /home/runner/HPCCSystems-regression/zap + echo "uploadArtifact=true" >> $GITHUB_ENV + fi + done + fi + ``` +- **Check for Core files** +If core files are generated, create a stack trace using the gdb command. The generated .trace files are stored in the logs path. + ```yaml + - name: Check for Core files + run: | + NUM_OF_ML_CORES=( $(sudo find /var/lib/HPCCSystems/ -iname 'core*' -mtime -1 -type f -exec printf "%s\n" '{}' \; ) ) + + if [ ${#NUM_OF_ML_CORES[@]} -ne 0 ] + then + for core in ${NUM_OF_ML_CORES[@]} + do + base=$( dirname $core ) + lastSubdir=${base##*/} + comp=${lastSubdir##my} + sudo gdb --batch --quiet -ex "set interactive-mode off" -ex "echo \n Backtrace for all threads\n==========================" -ex "thread apply all bt" -ex "echo \n Registers:\n==========================\n" -ex "info reg" -ex "echo \n Disas:\n==========================\n" -ex "disas" -ex "quit" "/opt/HPCCSystems/bin/${comp}" $core | sudo tee "$core.trace" 2>&1 + cp "$core.trace" /home/runner/HPCCSystems-regression/log/ + done + echo "uploadArtifact=true" >> $GITHUB_ENV + fi + ``` +- **Get test stat** +This step generates test statistics, allowing comparison and analysis of test performance on the specified cluster. + ```yaml + - name: Get test stat + if: ${{ inputs.get-stat }} + run: | + ./QueryStat2.py -p /home/runner/HPCCSystems-regression/log/ -d '' -a --timestamp --compileTimeDetails 1 --graphTimings --allGraphItems --addHeader + NUM_OF_STAT_FILES=$( find /home/runner/HPCCSystems-regression/log/ -type f -iname "*.csv" -o -iname "*.cfg" | wc -l ) + if [[ $NUM_OF_STAT_FILES -ne 0 ]] + then + echo "uploadArtifact=true" >> $GITHUB_ENV + fi + ``` +- **ml-thor-test-logs-artifact** +If any logs, ZAP reports, or .trace files are generated, they are uploaded as artifacts for further analysis. + ```yaml + - name: ml-thor-test-logs-artifact + if: ${{ failure() || cancelled() || env.uploadArtifact == 'true' }} + uses: actions/upload-artifact@v3 + with: + name: ${{ inputs.asset-name }}-bundle-test-logs + path: | + /home/runner/HPCCSystems-regression/log/* + /home/runner/HPCCSystems-regression/zap/* + if-no-files-found: ignore + ``` \ No newline at end of file From d83da9fba06c7f628ccc89311296da25ed88d6b6 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Tue, 21 May 2024 13:31:16 +0100 Subject: [PATCH 055/151] HPCC-31902 Adjust Thor auditing info to contain pod/container meta info Signed-off-by: Jake Smith --- thorlcr/master/thdemonserver.cpp | 8 ----- thorlcr/master/thgraphmanager.cpp | 59 +++++++++++++++++++++---------- thorlcr/master/thgraphmanager.hpp | 3 ++ thorlcr/master/thmastermain.cpp | 14 ++++---- thorlcr/thorutil/thormisc.cpp | 2 +- 5 files changed, 51 insertions(+), 35 deletions(-) diff --git a/thorlcr/master/thdemonserver.cpp b/thorlcr/master/thdemonserver.cpp index 5f7d7f8b989..be154603883 100644 --- a/thorlcr/master/thdemonserver.cpp +++ b/thorlcr/master/thdemonserver.cpp @@ -273,14 +273,6 @@ class DeMonServer : public CSimpleInterface, implements IDeMonServer unsigned startTime = msTick(); graphStarts.append(startTime); reportGraph(graph, false, true, startTime, getTimeStampNowValue()); - const char *graphname = graph->queryJob().queryGraphName(); - if (memcmp(graphname,"graph",5)==0) - graphname+=5; - LOG(MCauditInfo,",Progress,Thor,StartSubgraph,%s,%s,%s,%u,%s,%s", - queryServerStatus().queryProperties()->queryProp("@thorname"), - graph->queryJob().queryWuid(), - graphname, - (unsigned)graph->queryGraphId(), queryServerStatus().queryProperties()->queryProp("@nodeGroup"), queryServerStatus().queryProperties()->queryProp("@queue")); } void endGraph(CGraphBase *graph, bool success) { diff --git a/thorlcr/master/thgraphmanager.cpp b/thorlcr/master/thgraphmanager.cpp index 4f8a9f1315a..32154ef040c 100644 --- a/thorlcr/master/thgraphmanager.cpp +++ b/thorlcr/master/thgraphmanager.cpp @@ -423,10 +423,7 @@ void CJobManager::fatal(IException *e) { IERRLOG("Unknown exception in CJobManager::fatal"); } - LOG(MCauditInfo,",Progress,Thor,Terminate,%s,%s,%s,exception", - queryServerStatus().queryProperties()->queryProp("@thorname"), - queryServerStatus().queryProperties()->queryProp("@nodeGroup"), - queryServerStatus().queryProperties()->queryProp("@queue")); + auditThorSystemEvent("Terminate", {"exception"}); queryLogMsgManager()->flushQueue(10*1000); @@ -890,13 +887,8 @@ bool CJobManager::doit(IConstWorkUnit *workunit, const char *graphName, const So JobNameScope activeJobName(wuid); LOG(MCdebugInfo, "Processing wuid=%s, graph=%s from agent: %s", wuid.str(), graphName, agentep.getEndpointHostText(s).str()); - LOG(MCauditInfo,",Progress,Thor,Start,%s,%s,%s,%s,%s,%s", - queryServerStatus().queryProperties()->queryProp("@thorname"), - wuid.str(), - graphName, - user.str(), - queryServerStatus().queryProperties()->queryProp("@nodeGroup"), - queryServerStatus().queryProperties()->queryProp("@queue")); + auditThorJobEvent("Start", wuid, graphName, user); + Owned e; bool allDone = false; try @@ -904,13 +896,7 @@ bool CJobManager::doit(IConstWorkUnit *workunit, const char *graphName, const So allDone = executeGraph(*workunit, graphName, agentep); } catch (IException *_e) { e.setown(_e); } - LOG(MCauditInfo,",Progress,Thor,Stop,%s,%s,%s,%s,%s,%s", - queryServerStatus().queryProperties()->queryProp("@thorname"), - wuid.str(), - graphName, - user.str(), - queryServerStatus().queryProperties()->queryProp("@nodeGroup"), - queryServerStatus().queryProperties()->queryProp("@queue")); + auditThorJobEvent("Stop", wuid, graphName, user); if (e.get()) throw e.getClear(); return allDone; @@ -1285,7 +1271,6 @@ void closeThorServerStatus() } } - /* * Waits on recv for another wuid/graph to run. * Return values: @@ -1358,6 +1343,42 @@ void publishPodNames(IWorkUnit *workunit, const char *graphName) } } +static void auditThorSystemEventBuilder(std::string &msg, const char *eventName, std::initializer_list args) +{ + msg += std::string(",Progress,Thor,") + eventName + "," + getComponentConfigSP()->queryProp("@name"); + for (auto arg : args) + msg += "," + std::string(arg); + if (isContainerized()) + msg += std::string(",") + k8s::queryMyPodName() + "," + k8s::queryMyContainerName(); + else + { + const char *nodeGroup = queryServerStatus().queryProperties()->queryProp("@nodeGroup"); + const char *queueName = queryServerStatus().queryProperties()->queryProp("@queue"); + msg += std::string(",") + nodeGroup + "," + queueName; + } +} + +void auditThorSystemEvent(const char *eventName) +{ + std::string msg; + auditThorSystemEventBuilder(msg, eventName, {}); + LOG(MCauditInfo, "%s", msg.c_str()); +} + +void auditThorSystemEvent(const char *eventName, std::initializer_list args) +{ + std::string msg; + auditThorSystemEventBuilder(msg, eventName, args); + LOG(MCauditInfo, "%s", msg.c_str()); +} + +void auditThorJobEvent(const char *eventName, const char *wuid, const char *graphName, const char *user) +{ + std::string msg; + auditThorSystemEventBuilder(msg, eventName, { wuid, graphName, nullText(user) }); + LOG(MCauditInfo, "%s", msg.c_str()); +} + void thorMain(ILogMsgHandler *logHandler, const char *wuid, const char *graphName) { aborting = 0; diff --git a/thorlcr/master/thgraphmanager.hpp b/thorlcr/master/thgraphmanager.hpp index 98c394e654b..d705259b17f 100644 --- a/thorlcr/master/thgraphmanager.hpp +++ b/thorlcr/master/thgraphmanager.hpp @@ -34,6 +34,9 @@ int queryExitCode(); void addConnectedWorkerPod(const char *podName, const char *containerName); void publishPodNames(IWorkUnit *workunit, const char *graphName); void relayWuidException(IConstWorkUnit *wu, const IException *exception); +void auditThorSystemEvent(const char *eventName); +void auditThorSystemEvent(const char *eventName, std::initializer_list args); +void auditThorJobEvent(const char *eventName, const char *wuid, const char *graphName, const char *user); #endif diff --git a/thorlcr/master/thmastermain.cpp b/thorlcr/master/thmastermain.cpp index b08a8b65ce0..f889e0bd7b2 100644 --- a/thorlcr/master/thmastermain.cpp +++ b/thorlcr/master/thmastermain.cpp @@ -596,10 +596,7 @@ bool ControlHandler(ahType type) if (auditStartLogged) { auditStartLogged = false; - LOG(MCauditInfo,",Progress,Thor,Terminate,%s,%s,%s,ctrlc", - queryServerStatus().queryProperties()->queryProp("@thorname"), - queryServerStatus().queryProperties()->queryProp("@nodeGroup"), - queryServerStatus().queryProperties()->queryProp("@queue")); + auditThorSystemEvent("Terminate", {"ctrlc"}); } queryLogMsgManager()->flushQueue(10*1000); _exit(TEC_CtrlC); @@ -968,7 +965,10 @@ int main( int argc, const char *argv[] ) getClusterThorQueueName(queueNames, thorName); #else if (!thorName) + { thorName = "thor"; + globals->setProp("@name", thorName); + } SCMStringBuffer queueNames; getThorQueueNames(queueNames, thorName); #endif @@ -992,6 +992,7 @@ int main( int argc, const char *argv[] ) masterSlaveMpTag = allocateClusterMPTag(); kjServiceMpTag = allocateClusterMPTag(); + auditThorSystemEvent("Initializing"); unsigned numWorkers = 0; if (isContainerized()) { @@ -1000,7 +1001,6 @@ int main( int argc, const char *argv[] ) StringBuffer thorEpStr; LOG(MCdebugProgress, "ThorMaster version %d.%d, Started on %s", THOR_VERSION_MAJOR,THOR_VERSION_MINOR,thorEp.getEndpointHostText(thorEpStr).str()); - LOG(MCdebugProgress, "Thor name = %s, queue = %s, nodeGroup = %s",thorname,queueName.str(),nodeGroup.str()); unsigned numWorkersPerPod = 1; if (!globals->hasProp("@numWorkers")) @@ -1126,7 +1126,7 @@ int main( int argc, const char *argv[] ) PROGLOG("Persistent Thor group created with group name: %s", uniqueGrpName.str()); } #endif - LOG(MCauditInfo, ",Progress,Thor,Startup,%s,%s,%s,%s",nodeGroup.str(),thorname,queueName.str(),logUrl.str()); + auditThorSystemEvent("Startup"); auditStartLogged = true; writeSentinelFile(sentinelFile); @@ -1140,7 +1140,7 @@ int main( int argc, const char *argv[] ) // NB: workunit/graphName only set in one-shot mode (if isCloud()) thorMain(logHandler, workunit, graphName); - LOG(MCauditInfo, ",Progress,Thor,Terminate,%s,%s,%s",thorname,nodeGroup.str(),queueName.str()); + auditThorSystemEvent("Terminate"); LOG(MCdebugProgress, "ThorMaster terminated OK"); } catch (IException *e) diff --git a/thorlcr/thorutil/thormisc.cpp b/thorlcr/thorutil/thormisc.cpp index a97fc94c5f8..fd4d721f2c1 100644 --- a/thorlcr/thorutil/thormisc.cpp +++ b/thorlcr/thorutil/thormisc.cpp @@ -1692,4 +1692,4 @@ void saveWuidToFile(const char *wuid) if (!wuidFileIO) throw makeStringException(0, "Failed to create file 'wuid' to store current workunit for post mortem script"); wuidFileIO->write(0, strlen(wuid), wuid); -} \ No newline at end of file +} From 5c1c92cafde08b2d6c3852f393096e879e9b70c5 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Thu, 13 Jun 2024 16:06:20 +0100 Subject: [PATCH 056/151] HPCC-32055 Avoid pathological performance of the scanning allocator Signed-off-by: Gavin Halliday --- roxie/roxiemem/roxiemem.cpp | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/roxie/roxiemem/roxiemem.cpp b/roxie/roxiemem/roxiemem.cpp index 7643b56a99c..b4b9e642478 100644 --- a/roxie/roxiemem/roxiemem.cpp +++ b/roxie/roxiemem/roxiemem.cpp @@ -3356,6 +3356,7 @@ class CHeap : public CInterface inline void updateDistanceScanned(unsigned __int64 distance) { + //Distance is the number of bytes, not the number of entries that have been skipped. stats.totalDistanceScanned += distance; } @@ -3549,6 +3550,14 @@ class CChunkedHeap : public CHeap : CHeap(_rowManager, _logctx, _allocatorCache, _flags), chunkSize(_chunkSize) { chunksPerPage = FixedSizeHeaplet::dataAreaSize() / chunkSize; + if (flags & RHFscanning) + { + //Avoid pathological scans - limit the number of scans for an allocation to ~100. At most this will waste 1% of memory + //although actual amount is likely to be much lower + const unsigned maxScanLength = 100; + //if 101..200 entries, then there should be at least 2 free slots to avoid an expected scan > 100 + minScanFreeCount = ((chunksPerPage-1) / maxScanLength) + 1; + } } void * doAllocate(unsigned allocatorId, unsigned maxSpillCost); @@ -3562,6 +3571,7 @@ class CChunkedHeap : public CHeap const void * newCompactRow(const void * ptr, NewHeapCompactState & state); inline unsigned maxChunksPerPage() const { return chunksPerPage; } + inline unsigned minScanFree() const { return minScanFreeCount; } //No longer any external references to a unique heap. Mark so it can be cleaned up early. void noteOrphaned() @@ -3591,6 +3601,7 @@ class CChunkedHeap : public CHeap unsigned chunksPerPage; unsigned curCompactTarget = 0; unsigned __int64 totalAllocsLastScanCheck = 0; + unsigned minScanFreeCount = 0; }; class CFixedChunkedHeap : public CChunkedHeap @@ -3712,7 +3723,8 @@ char * ChunkedHeaplet::allocateSingle(unsigned allocated, bool incCounter, unsig CChunkedHeap * chunkHeap = static_cast(heap); unsigned maxAllocs = chunkHeap->maxChunksPerPage(); - if (numAllocs == maxAllocs) + unsigned minFree = chunkHeap->minScanFree(); // Scanning when there are only a few spare slots becomes pathological, so give up early. + if (numAllocs + minFree > maxAllocs) { if (!(heapFlags & RHFdelayrelease)) return nullptr; @@ -6965,6 +6977,9 @@ extern void setDataAlignmentSize(unsigned size) } // namespace roxiemem +//Worth knowning if the size of this object increases and reduces the memory available for rows. +static_assert(sizeof(roxiemem::ChunkedHeaplet) <= 128); + //============================================================================================================ #ifdef _USE_CPPUNIT #include "unittests.hpp" From 06492c595953487bf1fe0ccad21c91a7cdd792b8 Mon Sep 17 00:00:00 2001 From: Shamser Ahmed Date: Tue, 23 Apr 2024 14:51:33 +0100 Subject: [PATCH 057/151] HPCC-31648 New StSizePeakEphemeralDisk and StSizePeakTempDisk for sort Signed-off-by: Shamser Ahmed HPCC-31648 Changes following review Signed-off-by: Shamser Ahmed --- .../hashdistrib/thhashdistribslave.cpp | 13 ++++----- .../lookupjoin/thlookupjoinslave.cpp | 8 ++---- thorlcr/graph/thgraph.hpp | 6 +++- thorlcr/msort/tsorts.cpp | 28 +++++++++---------- thorlcr/thorutil/thmem.cpp | 26 ++++++++--------- thorlcr/thorutil/thormisc.cpp | 2 +- thorlcr/thorutil/thormisc.hpp | 9 ++++-- 7 files changed, 47 insertions(+), 45 deletions(-) diff --git a/thorlcr/activities/hashdistrib/thhashdistribslave.cpp b/thorlcr/activities/hashdistrib/thhashdistribslave.cpp index 6fcc98f6903..0b814f94b63 100644 --- a/thorlcr/activities/hashdistrib/thhashdistribslave.cpp +++ b/thorlcr/activities/hashdistrib/thhashdistribslave.cpp @@ -2703,13 +2703,12 @@ class CSpill : implements IRowWriter, public CSimpleInterface IRowWriter *writer; StringAttr desc; unsigned bucketN, rwFlags; - Linked tempFileSizeTracker; public: IMPLEMENT_IINTERFACE_USING(CSimpleInterface); - CSpill(CActivityBase &_owner, IThorRowInterfaces *_rowIf, const char *_desc, unsigned _bucketN, CFileSizeTracker * _tempFileSizeTracker) - : owner(_owner), rowIf(_rowIf), desc(_desc), bucketN(_bucketN), tempFileSizeTracker(_tempFileSizeTracker) + CSpill(CActivityBase &_owner, IThorRowInterfaces *_rowIf, const char *_desc, unsigned _bucketN) + : owner(_owner), rowIf(_rowIf), desc(_desc), bucketN(_bucketN) { count = 0; writer = NULL; @@ -2726,8 +2725,7 @@ class CSpill : implements IRowWriter, public CSimpleInterface StringBuffer tempname, prefix("hashdedup_bucket"); prefix.append(bucketN).append('_').append(desc); GetTempFilePath(tempname, prefix.str()); - OwnedIFile iFile = createIFile(tempname.str()); - spillFile.setown(new CFileOwner(iFile, tempFileSizeTracker)); + spillFile.setown(owner.createOwnedTempFile(tempname.str())); if (owner.getOptBool(THOROPT_COMPRESS_SPILLS, true)) { rwFlags |= rw_compress; @@ -2735,7 +2733,7 @@ class CSpill : implements IRowWriter, public CSimpleInterface owner.getOpt(THOROPT_COMPRESS_SPILL_TYPE, compType); setCompFlag(compType, rwFlags); } - spillFileIO.setown(iFile->open(IFOcreate)); + spillFileIO.setown(spillFile->queryIFile().open(IFOcreate)); writer = createRowWriter(spillFileIO, rowIf, rwFlags); } IRowStream *getReader(rowcount_t *_count=NULL) // NB: also detaches ownership of 'fileOwner' @@ -3425,8 +3423,7 @@ void CHashTableRowTable::rehash(const void **newRows) CBucket::CBucket(HashDedupSlaveActivityBase &_owner, IThorRowInterfaces *_rowIf, IThorRowInterfaces *_keyIf, bool _extractKey, unsigned _bucketN, CHashTableRowTable *_htRows) : owner(_owner), keyIf(_keyIf), extractKey(_extractKey), bucketN(_bucketN), htRows(_htRows), - rowSpill(owner, _rowIf, "rows", _bucketN, _owner.queryTempFileSizeTracker()), keySpill(owner, _keyIf, "keys", _bucketN, _owner.queryTempFileSizeTracker()) - + rowSpill(owner, _rowIf, "rows", _bucketN), keySpill(owner, _keyIf, "keys", _bucketN) { spilt = false; /* Although, using a unique allocator per bucket would mean on a spill event, the pages could be freed, diff --git a/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp b/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp index 9465afc6a76..3db86c42edc 100644 --- a/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp +++ b/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp @@ -1878,8 +1878,7 @@ class CLookupJoinActivityBase : public CInMemJoinBase temp = createIFile(tempName.str()); - file.setown(new CFileOwner(temp)); + file.setown(container.queryActivity()->createOwnedTempFile(tempName.str())); VStringBuffer spillPrefixStr("clearAllNonLocalRows(%d)", SPILL_PRIORITY_SPILLABLE_STREAM); // 3rd param. is skipNulls = true, the row arrays may have had the non-local rows delete already. rows.save(file->queryIFile(), spillCompInfo, true, spillPrefixStr.str()); // saves committed rows @@ -2944,9 +2943,8 @@ class CLookupJoinActivityBase : public CInMemJoinBasecreateOwnedTempFile(tempFilename.str())); + overflowWriteStream.setown(createRowWriter(&(overflowWriteFile->queryIFile()), queryRowInterfaces(rightITDL), rwFlags)); overflowWriteCount += rhsInRowsTemp.ordinality(); ForEachItemIn(r, rhsInRowsTemp) diff --git a/thorlcr/graph/thgraph.hpp b/thorlcr/graph/thgraph.hpp index 1624baa5717..b0adfbade47 100644 --- a/thorlcr/graph/thgraph.hpp +++ b/thorlcr/graph/thgraph.hpp @@ -1186,7 +1186,11 @@ class graph_decl CActivityBase : implements CInterfaceOf, im { return tempFileSizeTracker ? tempFileSizeTracker->queryPeakSize() : 0; } - + CFileOwner * createOwnedTempFile(const char *fileName) + { + Owned iFile = createIFile(fileName); + return new CFileOwner(iFile, queryTempFileSizeTracker()); + } // IExceptionHandler bool fireException(IException *e); __declspec(noreturn) void processAndThrowOwnedException(IException * e) __attribute__((noreturn)); diff --git a/thorlcr/msort/tsorts.cpp b/thorlcr/msort/tsorts.cpp index f2603ea7c45..55afb16704a 100644 --- a/thorlcr/msort/tsorts.cpp +++ b/thorlcr/msort/tsorts.cpp @@ -69,7 +69,7 @@ class CWriteIntercept : public CSimpleInterface CActivityBase &activity; CriticalSection crit; IThorRowInterfaces *rowIf; - Owned dataFile, idxFile; + Owned dataFile, idxFile; Owned dataFileIO, idxFileIO; Owned dataFileStream; Linked idxFileStream; @@ -100,12 +100,12 @@ class CWriteIntercept : public CSimpleInterface // right create idx StringBuffer tempname; GetTempFilePath(tempname.clear(),"srtidx"); - idxFile.setown(createIFile(tempname.str())); - idxFileIO.setown(idxFile->open(IFOcreaterw)); + idxFile.setown(activity.createOwnedTempFile(tempname.str())); + idxFileIO.setown(idxFile->queryIFile().open(IFOcreaterw)); if (!idxFileIO.get()) { StringBuffer err; - err.append("Cannot create ").append(idxFile->queryFilename()); + err.append("Cannot create ").append(idxFile->queryIFile().queryFilename()); LOG(MCerror, "%s", err.str()); throw MakeActivityException(&activity, -1, "%s", err.str()); } @@ -141,7 +141,7 @@ class CWriteIntercept : public CSimpleInterface if (!idxFileIO.get()) { assertex(idxFile); - idxFileIO.setown(idxFile->open(IFOread)); + idxFileIO.setown(idxFile->queryIFile().open(IFOread)); } size32_t rd = idxFileIO->read((offset_t)pos*(offset_t)sizeof(offset_t),sizeof(*ofs)*n,ofs); if (closeIO) @@ -161,12 +161,12 @@ class CWriteIntercept : public CSimpleInterface { if (parent->compressedOverflowFile) { - Owned iFileIO = createCompressedFileReader(parent->dataFile); + Owned iFileIO = createCompressedFileReader(&(parent->dataFile->queryIFile())); assertex(iFileIO); stream.setown(createRowStreamEx(iFileIO, parent->rowIf, startOffset, (offset_t)-1, max)); } else - stream.setown(createRowStreamEx(parent->dataFile, parent->rowIf, startOffset, (offset_t)-1, max)); + stream.setown(createRowStreamEx(&(parent->dataFile->queryIFile()), parent->rowIf, startOffset, (offset_t)-1, max)); } virtual const void *nextRow() { return stream->nextRow(); } virtual void stop() { stream->stop(); } @@ -187,16 +187,12 @@ class CWriteIntercept : public CSimpleInterface ~CWriteIntercept() { closeFiles(); - if (dataFile) - dataFile->remove(); - if (idxFile) - idxFile->remove(); } offset_t write(IRowStream *input) { StringBuffer tempname; GetTempFilePath(tempname,"srtmrg"); - dataFile.setown(createIFile(tempname.str())); + dataFile.setown(activity.createOwnedTempFile(tempname.str())); unsigned rwFlags = DEFAULT_RWFLAGS; size32_t compBlkSz = 0; @@ -227,7 +223,7 @@ class CWriteIntercept : public CSimpleInterface } } - Owned output = createRowWriter(dataFile, rowIf, rwFlags, nullptr, compBlkSz); + Owned output = createRowWriter(&(dataFile->queryIFile()), rowIf, rwFlags, nullptr, compBlkSz); bool overflowed = false; ActPrintLog(&activity, "Local Overflow Merge start"); @@ -262,16 +258,18 @@ class CWriteIntercept : public CSimpleInterface output->flush(); offset_t end = output->getPosition(); output.clear(); + dataFile->noteSize(end); writeidxofs(end); if (idxFileIO) { idxFileStream->flush(); idxFileStream.clear(); + idxFile->noteSize(idxFileIO->getStatistic(StSizeDiskWrite)); idxFileIO.clear(); } if (overflowed) IWARNLOG("Overflowed by %" I64F "d", overflowsize); - ActPrintLog(&activity, "Local Overflow Merge done: overflow file '%s', size = %" I64F "d", dataFile->queryFilename(), dataFile->size()); + ActPrintLog(&activity, "Local Overflow Merge done: overflow file '%s', size = %" I64F "d", dataFile->queryIFile().queryFilename(), dataFile->queryIFile().size()); return end; } IRowStream *getStream(offset_t startOffset, rowcount_t max) @@ -299,7 +297,7 @@ class CWriteIntercept : public CSimpleInterface size32_t idxSz = (size32_t)(ofs[1]-ofs[0]); if (!dataFileIO) { - dataFileIO.setown(dataFile->open(IFOread)); + dataFileIO.setown(dataFile->queryIFile().open(IFOread)); if (compressedOverflowFile) { dataFileIO.setown(createCompressedFileReader(dataFileIO)); diff --git a/thorlcr/thorutil/thmem.cpp b/thorlcr/thorutil/thmem.cpp index 0e74d371462..c3d2d795a88 100644 --- a/thorlcr/thorutil/thmem.cpp +++ b/thorlcr/thorutil/thmem.cpp @@ -233,7 +233,7 @@ class CSpillableStreamBase : public CSpillable EmptyRowSemantics emptyRowSemantics; unsigned spillCompInfo; CThorSpillableRowArray rows; - OwnedIFile spillFile; + Owned spillFile; bool spillRows() { @@ -245,11 +245,11 @@ class CSpillableStreamBase : public CSpillable StringBuffer tempName; VStringBuffer tempPrefix("streamspill_%d", activity.queryId()); GetTempFilePath(tempName, tempPrefix.str()); - spillFile.setown(createIFile(tempName.str())); - + spillFile.setown(activity.createOwnedTempFile(tempName.str())); VStringBuffer spillPrefixStr("SpillableStream(%u)", spillPriority); - rows.save(*spillFile, spillCompInfo, false, spillPrefixStr.str()); // saves committed rows + rows.save(spillFile->queryIFile(), spillCompInfo, false, spillPrefixStr.str()); // saves committed rows rows.kill(); // no longer needed, readers will pull from spillFile. NB: ok to kill array as rows is never written to or expanded + spillFile->noteSize(spillFile->queryIFile().size()); return true; } public: @@ -264,8 +264,6 @@ class CSpillableStreamBase : public CSpillable ~CSpillableStreamBase() { ensureSpillingCallbackRemoved(); - if (spillFile) - spillFile->remove(); } // IBufferedRowCallback virtual bool freeBufferedRows(bool critical) override @@ -338,7 +336,7 @@ class CSharedSpillableRowSet : public CSpillableStreamBase block.clearCB = true; assertex(((offset_t)-1) != outputOffset); unsigned rwFlags = DEFAULT_RWFLAGS | mapESRToRWFlags(owner->emptyRowSemantics); - spillStream.setown(::createRowStreamEx(owner->spillFile, owner->rowIf, outputOffset, (offset_t)-1, (unsigned __int64)-1, rwFlags)); + spillStream.setown(::createRowStreamEx(&(owner->spillFile->queryIFile()), owner->rowIf, outputOffset, (offset_t)-1, (unsigned __int64)-1, rwFlags)); owner->rows.unregisterWriteCallback(*this); // no longer needed ret = spillStream->nextRow(); } @@ -389,7 +387,7 @@ class CSharedSpillableRowSet : public CSpillableStreamBase { block.clearCB = true; unsigned rwFlags = DEFAULT_RWFLAGS | mapESRToRWFlags(emptyRowSemantics); - return ::createRowStream(spillFile, rowIf, rwFlags); + return ::createRowStream(&spillFile->queryIFile(), rowIf, rwFlags); } rowidx_t toRead = rows.numCommitted(); if (toRead) @@ -450,7 +448,7 @@ class CSpillableStream : public CSpillableStreamBase, implements IRowStream rwFlags |= spillCompInfo; } rwFlags |= mapESRToRWFlags(emptyRowSemantics); - spillStream.setown(createRowStream(spillFile, rowIf, rwFlags)); + spillStream.setown(createRowStream(&spillFile->queryIFile(), rowIf, rwFlags)); ReleaseThorRow(readRows); readRows = nullptr; return spillStream->nextRow(); @@ -1656,13 +1654,15 @@ class CThorRowCollectorBase : public CSpillable } tempPrefix.appendf("spill_%d", activity.queryId()); GetTempFilePath(tempName, tempPrefix.str()); - Owned iFile = createIFile(tempName.str()); VStringBuffer spillPrefixStr("%sRowCollector(%d)", tracingPrefix.str(), spillPriority); - spillableRows.save(*iFile, spillCompInfo, false, spillPrefixStr.str()); // saves committed rows - spillFiles.append(new CFileOwner(iFile)); + Owned tempFileOwner = activity.createOwnedTempFile(tempName.str()); + spillableRows.save(tempFileOwner->queryIFile(), spillCompInfo, false, spillPrefixStr.str()); // saves committed rows + spillFiles.append(tempFileOwner.getLink()); ++overflowCount; statOverflowCount.fastAdd(1); // NB: this is total over multiple uses of this class - statSizeSpill.fastAdd(iFile->size()); + offset_t tempFileSize = tempFileOwner->queryIFile().size(); + statSizeSpill.fastAdd(tempFileSize); + tempFileOwner->noteSize(tempFileSize); statSpillCycles.fastAdd(spillTimer.elapsedCycles()); return true; } diff --git a/thorlcr/thorutil/thormisc.cpp b/thorlcr/thorutil/thormisc.cpp index a97fc94c5f8..c6de269762e 100644 --- a/thorlcr/thorutil/thormisc.cpp +++ b/thorlcr/thorutil/thormisc.cpp @@ -73,7 +73,7 @@ Owned globals; static Owned ClusterMPAllocator; // stat. mappings shared between master and slave activities -const StatisticsMapping spillStatistics({StTimeSpillElapsed, StTimeSortElapsed, StNumSpills, StSizeSpillFile}); +const StatisticsMapping spillStatistics({StTimeSpillElapsed, StTimeSortElapsed, StNumSpills, StSizeSpillFile, StSizePeakTempDisk}); const StatisticsMapping soapcallStatistics({StTimeSoapcall}); const StatisticsMapping basicActivityStatistics({StTimeTotalExecute, StTimeLocalExecute, StTimeBlocked}); const StatisticsMapping groupActivityStatistics({StNumGroups, StNumGroupMax}, basicActivityStatistics); diff --git a/thorlcr/thorutil/thormisc.hpp b/thorlcr/thorutil/thormisc.hpp index 2bc8f0c776d..2a9c7290fb3 100644 --- a/thorlcr/thorutil/thormisc.hpp +++ b/thorlcr/thorutil/thormisc.hpp @@ -356,9 +356,14 @@ class graph_decl CFileOwner : public CSimpleInterface, implements IInterface } void noteSize(offset_t size) { + if (fileSizeTracker && fileSize!=size) + { + if (size > fileSize) + fileSizeTracker->growSize(size-fileSize); + else + fileSizeTracker->shrinkSize(fileSize-size); + } fileSize = size; - if (fileSizeTracker) - fileSizeTracker->growSize(fileSize); } IFile &queryIFile() const { return *iFile; } }; From 2116ab4117441615724e8bc4fda656578a2f2315 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Tue, 4 Jun 2024 22:36:59 +0100 Subject: [PATCH 058/151] HPCC-32017 New compressing splitter implementation Signed-off-by: Jake Smith --- rtl/eclrtl/rtlread_imp.hpp | 29 + system/jlib/jutil.cpp | 7 +- system/jlib/jutil.hpp | 2 - .../activities/nsplitter/thnsplitterslave.cpp | 88 ++- thorlcr/thorutil/thbuf.cpp | 530 +++++++++++++++++- thorlcr/thorutil/thbuf.hpp | 32 +- thorlcr/thorutil/thormisc.hpp | 5 + 7 files changed, 650 insertions(+), 43 deletions(-) diff --git a/rtl/eclrtl/rtlread_imp.hpp b/rtl/eclrtl/rtlread_imp.hpp index afa1ba7e0a3..a475e533651 100644 --- a/rtl/eclrtl/rtlread_imp.hpp +++ b/rtl/eclrtl/rtlread_imp.hpp @@ -94,5 +94,34 @@ class ECLRTL_API CThorStreamDeserializerSource : implements IRowDeserializerSour Linked in; // could use a CStreamSerializer class (with inlines to improve) }; +class ECLRTL_API COutputStreamSerializer : public CSimpleInterfaceOf +{ + Linked outputStream; + unsigned nesting = 0; + offset_t outerNestingOffset = 0; + +public: + COutputStreamSerializer(IBufferedSerialOutputStream *_outputStream) : outputStream(_outputStream) + { + } + virtual void put(size32_t len, const void * ptr) override + { + outputStream->put(len, ptr); + } + virtual size32_t beginNested(size32_t count) override + { + outputStream->suspend(sizeof(size32_t)); + if (nesting++ == 0) + outerNestingOffset = outputStream->tell(); + return outputStream->tell()-outerNestingOffset; + } + virtual void endNested(size32_t delta) override + { + size32_t patchedLength = outputStream->tell() - (delta + outerNestingOffset); + outputStream->resume(sizeof(size32_t), &patchedLength); + nesting--; + } +}; + #endif diff --git a/system/jlib/jutil.cpp b/system/jlib/jutil.cpp index dbed0920d38..d5c8acb8917 100644 --- a/system/jlib/jutil.cpp +++ b/system/jlib/jutil.cpp @@ -2759,10 +2759,10 @@ StringBuffer &getFileAccessUrl(StringBuffer &out) return out; } - -#ifdef _CONTAINERIZED bool getDefaultPlane(StringBuffer &ret, const char * componentOption, const char * category) { + if (!isContainerized()) + throwUnexpectedX("getDefaultPlane() called from non-container system"); // If the plane is specified for the component, then use that if (getComponentConfigSP()->getProp(componentOption, ret)) return true; @@ -2780,8 +2780,11 @@ bool getDefaultPlane(StringBuffer &ret, const char * componentOption, const char return false; } +#ifdef _CONTAINERIZED static bool getDefaultPlaneDirectory(StringBuffer &ret, const char * componentOption, const char * category) { + if (!isContainerized()) + throwUnexpectedX("getDefaultPlaneDirectory() called from non-container system"); StringBuffer planeName; if (!getDefaultPlane(planeName, componentOption, category)) return false; diff --git a/system/jlib/jutil.hpp b/system/jlib/jutil.hpp index 445899965be..3bbe204ac15 100644 --- a/system/jlib/jutil.hpp +++ b/system/jlib/jutil.hpp @@ -657,9 +657,7 @@ extern jlib_decl bool checkCreateDaemon(unsigned argc, const char * * argv); //Createpassword of specified length, containing UpperCaseAlphas, LowercaseAlphas, numerics and symbols extern jlib_decl const char * generatePassword(StringBuffer &pwd, int pwdLen); -#ifdef _CONTAINERIZED extern jlib_decl bool getDefaultPlane(StringBuffer &ret, const char * componentOption, const char * category); -#endif extern jlib_decl void getResourceFromJfrog(StringBuffer &localPath, IPropertyTree &item); diff --git a/thorlcr/activities/nsplitter/thnsplitterslave.cpp b/thorlcr/activities/nsplitter/thnsplitterslave.cpp index 56c5dbe0c91..a1767a5a3dd 100644 --- a/thorlcr/activities/nsplitter/thnsplitterslave.cpp +++ b/thorlcr/activities/nsplitter/thnsplitterslave.cpp @@ -30,6 +30,7 @@ class CSplitterOutput : public CSimpleInterfaceOf, pu NSplitterSlaveActivity &activity; Semaphore writeBlockSem; bool started = false, stopped = false; + IRowStream *splitterStream = nullptr; unsigned outIdx; rowcount_t rec = 0, max = 0; @@ -43,6 +44,7 @@ class CSplitterOutput : public CSimpleInterfaceOf, pu { started = stopped = false; rec = max = 0; + splitterStream = nullptr; } inline bool isStopped() const { return stopped; } @@ -80,6 +82,7 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf typedef CSlaveActivity PARENT; bool spill = false; + bool newSplitter = false; bool eofHit = false; bool writeBlocked = false, pagedOut = false; CriticalSection connectLock, prepareInputLock, writeAheadCrit; @@ -91,7 +94,8 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf unsigned connectedOutputCount = (unsigned)-1; // uninitialized rowcount_t recsReady = 0; Owned writeAheadException; - Owned smartBuf; + Owned sharedRowStream; + Owned sharedSmartRowWriter; bool inputPrepared = false; bool inputConnected = false; unsigned numOutputs = 0; @@ -161,6 +165,12 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf spill = dV>0; ForEachItemIn(o, container.outputs) appendOutput(new CSplitterOutput(*this, o)); + newSplitter = getOptBool("newsplitter", false); + if (getOptBool("forcenewsplitter", false)) + { + newSplitter = true; + spill = true; + } } virtual void init(MemoryBuffer &data, MemoryBuffer &slaveData) override { @@ -219,21 +229,50 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf assertex(activeOutputCount); // must be >=1, as an output start() invoked prepareInput if (1 == activeOutputCount) return; // single output in use which will be read directly - if (smartBuf) - smartBuf->reset(); + if (sharedRowStream) + sharedRowStream->reset(); else { if (spill) { StringBuffer tempname; GetTempFilePath(tempname, "nsplit"); - smartBuf.setown(createSharedSmartDiskBuffer(this, tempname.str(), numOutputs, queryRowInterfaces(input))); - ActPrintLog("Using temp spill file: %s", tempname.str()); + if (newSplitter) + { + SharedRowStreamReaderOptions options; + if (isContainerized()) + { + StringBuffer planeName; + if (!getDefaultPlane(planeName, "@tempPlane", "temp")) + getDefaultPlane(planeName, "@spillPlane", "spill"); + size32_t blockedSequentialIOSize = getPlaneAttributeValue(planeName, BlockedSequentialIO, (size32_t)-1); + if ((size32_t)-1 != blockedSequentialIOSize) + options.storageBlockSize = blockedSequentialIOSize; + } + options.totalCompressionBufferSize = getOptInt(THOROPT_SPLITTER_COMPRESSIONTOALK, options.totalCompressionBufferSize / 1024) * 1024; + options.inMemMaxMem = getOptInt(THOROPT_SPLITTER_MAXROWMEMK, options.inMemMaxMem / 1024) * 1024; + options.spillWriteAheadSize = getOptInt64(THOROPT_SPLITTER_WRITEAHEADK, options.spillWriteAheadSize / 1024) * 1024; + options.inMemReadAheadGranularity = getOptInt(THOROPT_SPLITTER_READAHEADGRANULARITYK, options.inMemReadAheadGranularity / 1024) * 1024; + options.inMemReadAheadGranularityRows = getOptInt(THOROPT_SPLITTER_READAHEADGRANULARITYROWS, options.inMemReadAheadGranularity); + options.heapFlags = getOptInt("spillheapflags", options.heapFlags); + + ICompressHandler *compressHandler = options.totalCompressionBufferSize ? queryDefaultCompressHandler() : nullptr; + sharedRowStream.setown(createSharedFullSpillingWriteAhead(this, numOutputs, inputStream, isGrouped(), options, this, tempname.str(), compressHandler)); + } + else + { + Owned smartBuf = createSharedSmartDiskBuffer(this, tempname.str(), numOutputs, this); + sharedRowStream.set(smartBuf); + sharedSmartRowWriter.setown(smartBuf->getWriter()); + ActPrintLog("Using temp spill file: %s", tempname.str()); + } } else { ActPrintLog("Spill is 'balanced'"); - smartBuf.setown(createSharedSmartMemBuffer(this, numOutputs, queryRowInterfaces(input), NSPLITTER_SPILL_BUFFER_SIZE)); + Owned smartBuf = createSharedSmartMemBuffer(this, numOutputs, this, NSPLITTER_SPILL_BUFFER_SIZE); + sharedRowStream.set(smartBuf); + sharedSmartRowWriter.setown(smartBuf->getWriter()); cachedMetaInfo.canStall = true; } // mark any outputs already stopped @@ -241,7 +280,7 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf { CSplitterOutput *output = (CSplitterOutput *)outputs.item(o); if (output->isStopped() || !connectedOutputSet->test(o)) - smartBuf->queryOutput(o)->stop(); + sharedRowStream->queryOutput(o)->stop(); } } if (!spill) @@ -261,7 +300,7 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf return inputStream->nextRow(); if (recsReady == current && writeAheadException.get()) throw LINK(writeAheadException); - return smartBuf->queryOutput(outIdx)->nextRow(); // will block until available + return sharedRowStream->queryOutput(outIdx)->nextRow(); // will block until available } rowcount_t writeahead(rowcount_t current, const bool &stopped, Semaphore &writeBlockSem, unsigned outIdx) { @@ -311,7 +350,7 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf row.setown(inputStream->nextRow()); if (row) { - smartBuf->putRow(nullptr, this); // may call blocked() (see ISharedSmartBufferCallback impl. below) + sharedSmartRowWriter->putRow(nullptr, this); // may call blocked() (see ISharedSmartBufferCallback impl. below) ++recsReady; } } @@ -321,10 +360,10 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf { ActPrintLog("Splitter activity, hit end of input @ rec = %" RCPF "d", recsReady); eofHit = true; - smartBuf->flush(); // signals no more rows will be written. + sharedSmartRowWriter->flush(); // signals no more rows will be written. break; } - smartBuf->putRow(row.getClear(), this); // can block if mem limited, but other readers can progress which is the point + sharedSmartRowWriter->putRow(row.getClear(), this); // can block if mem limited, but other readers can progress which is the point ++recsReady; } return recsReady; @@ -339,12 +378,12 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf } else { - if (smartBuf) + if (sharedRowStream) { /* If no output has started reading (nextRow()), then it will not have been prepared * If only 1 output is left, it will bypass the smart buffer when it starts. */ - smartBuf->queryOutput(outIdx)->stop(); + sharedRowStream->queryOutput(outIdx)->stop(); } ++stoppedOutputs; if (stoppedOutputs == connectedOutputCount) @@ -357,8 +396,8 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf void abort() { CSlaveActivity::abort(); - if (smartBuf) - smartBuf->cancel(); + if (sharedRowStream) + sharedRowStream->cancel(); } // ISharedSmartBufferCallback impl. virtual void paged() { pagedOut = true; } @@ -460,6 +499,11 @@ void CSplitterOutput::start() activity.prepareInput(); if (1 == activity.activeOutputCount) max = RCMAX; // signals that no writeahead required + else + { + if (activity.newSplitter) + splitterStream = activity.sharedRowStream->queryOutput(outIdx); + } dataLinkStart(); } @@ -476,10 +520,16 @@ void CSplitterOutput::stop() const void *CSplitterOutput::nextRow() { ActivityTimer t(slaveTimerStats, activity.queryTimeActivities()); - if (rec == max) // NB: max will be RCMAX if activeOutputCount == 1 - max = activity.writeahead(max, activity.queryAbortSoon(), writeBlockSem, outIdx); - const void *row = activity.nextRow(outIdx, rec); // pass ptr to max if need more - ++rec; + const void *row; + if (splitterStream) + row = splitterStream->nextRow(); + else + { + if (rec == max) // NB: max will be RCMAX if activeOutputCount == 1 + max = activity.writeahead(max, activity.queryAbortSoon(), writeBlockSem, outIdx); + row = activity.nextRow(outIdx, rec); // pass ptr to max if need more + ++rec; + } if (row) dataLinkIncrement(); return row; diff --git a/thorlcr/thorutil/thbuf.cpp b/thorlcr/thorutil/thbuf.cpp index d64d06da1a2..38054cf60fd 100644 --- a/thorlcr/thorutil/thbuf.cpp +++ b/thorlcr/thorutil/thbuf.cpp @@ -15,6 +15,8 @@ limitations under the License. ############################################################################## */ +#include +#include #include "platform.h" #include #include @@ -735,7 +737,7 @@ int chunkSizeCompare2(Chunk *lhs, Chunk *rhs) } #define MIN_POOL_CHUNKS 10 -class CSharedWriteAheadBase : public CSimpleInterface, implements ISharedSmartBuffer +class CSharedWriteAheadBase : public CSimpleInterface, implements ISharedSmartBuffer, implements ISharedSmartBufferRowWriter { size32_t totalOutChunkSize; bool writeAtEof; @@ -1104,7 +1106,12 @@ class CSharedWriteAheadBase : public CSimpleInterface, implements ISharedSmartBu } // ISharedSmartBuffer - virtual void putRow(const void *row, ISharedSmartBufferCallback *callback) + virtual ISharedSmartBufferRowWriter *getWriter() override + { + return LINK(this); + } +// ISharedSmartBufferRowWriter + virtual void putRow(const void *row, ISharedSmartBufferCallback *callback) override { if (stopped) { @@ -1142,32 +1149,28 @@ class CSharedWriteAheadBase : public CSimpleInterface, implements ISharedSmartBu if (!callback || paged) signalReaders(); } - virtual void putRow(const void *row) + virtual void putRow(const void *row) override { return putRow(row, NULL); } - virtual void flush() + virtual void flush() override { CriticalBlock b(crit); writeAtEof = true; signalReaders(); } - virtual offset_t getPosition() - { - throwUnexpected(); - return 0; - } - virtual IRowStream *queryOutput(unsigned output) +// ISharedRowStreamReader + virtual IRowStream *queryOutput(unsigned output) override { return &outputs.item(output); } - virtual void cancel() + virtual void cancel() override { CriticalBlock b(crit); stopAll(); signalReaders(); } - virtual void reset() + virtual void reset() override { init(); unsigned c=0; @@ -1195,6 +1198,29 @@ bool CRowSet::Release() const return CSimpleInterface::Release(); } +static StringBuffer &getFileIOStats(StringBuffer &output, IFileIO *iFileIO) +{ + __int64 readCycles = iFileIO->getStatistic(StCycleDiskReadIOCycles); + __int64 writeCycles = iFileIO->getStatistic(StCycleDiskWriteIOCycles); + __int64 numReads = iFileIO->getStatistic(StNumDiskReads); + __int64 numWrites = iFileIO->getStatistic(StNumDiskWrites); + offset_t bytesRead = iFileIO->getStatistic(StSizeDiskRead); + offset_t bytesWritten = iFileIO->getStatistic(StSizeDiskWrite); + if (readCycles) + output.appendf(", read-time(ms)=%" I64F "d", cycle_to_millisec(readCycles)); + if (writeCycles) + output.appendf(", write-time(ms)=%" I64F "d", cycle_to_millisec(writeCycles)); + if (numReads) + output.appendf(", numReads=%" I64F "d", numReads); + if (numWrites) + output.appendf(", numWrites=%" I64F "d", numWrites); + if (bytesRead) + output.appendf(", bytesRead=%" I64F "d", bytesRead); + if (bytesWritten) + output.appendf(", bytesWritten=%" I64F "d", bytesWritten); + return output; +} + class CSharedWriteAheadDisk : public CSharedWriteAheadBase { Owned spillFile; @@ -1514,10 +1540,14 @@ class CSharedWriteAheadDisk : public CSharedWriteAheadBase } ~CSharedWriteAheadDisk() { - spillFileIO.clear(); if (spillFile) + { + StringBuffer tracing; + getFileIOStats(tracing, spillFileIO); + activity->ActPrintLog("CSharedWriteAheadDisk: removing spill file: %s%s", spillFile->queryFilename(), tracing.str()); + spillFileIO.clear(); spillFile->remove(); - + } for (;;) { Owned chunk = savedChunks.dequeue(); @@ -1658,6 +1688,478 @@ ISharedSmartBuffer *createSharedSmartMemBuffer(CActivityBase *activity, unsigned } +// This implementation is supplied with the input, and reads from it on demand, initially to memory. +// It will spill to disk if the configurable memory limit is exceeded. +// The leading reader(output) causes the implementation to read more rows from the input. +// Once the leader causes the rows in memory to exceed the memory limit, it will cause a output stream to be created. +// From that point on, the leader will write blocks of rows out to disk, +// and cause all readers to read from it, once they have exhaused the in-memory row set. + +class CSharedFullSpillingWriteAhead : public CInterfaceOf +{ + typedef std::vector Rows; + class COutputRowStream : public CSimpleInterfaceOf + { + CSharedFullSpillingWriteAhead &owner; + unsigned whichOutput = 0; + size32_t localRowsIndex = 0; + rowcount_t lastKnownAvailable = 0; + rowcount_t currentRow = 0; + Rows rows; + OwnedIFileIO iFileIO; + Owned allocator; + Owned inputStream; + CThorStreamDeserializerSource ds; + std::atomic eof = false; + + inline const void *getClearRow(unsigned i) + { + const void *row = rows[i]; + rows[i] = nullptr; + return row; + } + void freeRows() + { + for (auto it = rows.begin() + localRowsIndex; it != rows.end(); ++it) + ReleaseThorRow(*it); + rows.clear(); + localRowsIndex = 0; + allocator->emptyCache(); + } + const void *getRowFromStream() + { + if (currentRow == lastKnownAvailable) + { + if (!owner.checkWriteAhead(lastKnownAvailable)) + { + eof = true; + return nullptr; + } + } + if (owner.inputGrouped) + { + bool eog; + inputStream->read(sizeof(bool), &eog); + if (eog) + { + currentRow++; + return nullptr; + } + } + currentRow++; + RtlDynamicRowBuilder rowBuilder(allocator); + size32_t sz = owner.deserializer->deserialize(rowBuilder, ds); + return rowBuilder.finalizeRowClear(sz); + } + public: + explicit COutputRowStream(CSharedFullSpillingWriteAhead &_owner, unsigned _whichOutput) + : owner(_owner), whichOutput(_whichOutput) + { + allocator.setown(owner.activity.getRowAllocator(owner.meta, (roxiemem::RoxieHeapFlags)owner.options.heapFlags)); + } + ~COutputRowStream() + { + freeRows(); + } + rowcount_t queryLastKnownAvailable() const + { + return lastKnownAvailable; + } + void setLastKnownAvailable(rowcount_t _lastKnownWritten) + { + lastKnownAvailable = _lastKnownWritten; + } + void cancel() + { + eof = true; + } + void reset() + { + freeRows(); + ds.setStream(nullptr); + iFileIO.clear(); + inputStream.clear(); + eof = false; + currentRow = 0; + lastKnownAvailable = 0; + } + virtual const void *nextRow() override + { + if (eof) + return nullptr; + else if (localRowsIndex < rows.size()) // NB: no longer used after inputStream is set + { + currentRow++; + return getClearRow(localRowsIndex++); + } + else if (inputStream) + return getRowFromStream(); // NB: will increment currentRow + else + { + localRowsIndex = 0; + rows.clear(); + + if (owner.getRowsInMem(rows, lastKnownAvailable)) + { + if (rows.empty()) + { + eof = true; + return nullptr; + } + else + { + currentRow++; + return getClearRow(localRowsIndex++); + } + } + else + { + auto [_inputStream, _iFileIO] = owner.getReadStream(); + inputStream.setown(_inputStream); + iFileIO.setown(_iFileIO); + ds.setStream(inputStream); + return getRowFromStream(); // NB: will increment currentRow + } + } + } + virtual void stop() override + { + freeRows(); + ds.setStream(nullptr); + + if (inputStream) + { + StringBuffer tracing; + getFileIOStats(tracing, iFileIO); + owner.activity.ActPrintLog("CSharedFullSpillingWriteAhead::COutputRowStream: input stream finished: output=%u%s", whichOutput, tracing.str()); + + iFileIO.clear(); + inputStream.clear(); + } + + // NB: this will set lastKnownAvailable to max[(rowcount_t)-1] (within owner.readAheadCS) to prevent it being considered as lowest any longer + owner.outputStopped(whichOutput); + + eof = true; + } + }; + CActivityBase &activity; + Linked input; + Linked meta; + Linked serializer; + Linked deserializer; + Linked allocator; + std::vector> outputs; + std::deque> rows; + memsize_t rowsMemUsage = 0; + std::atomic totalInputRowsRead = 0; // not used until spilling begins, represents count of all rows read + rowcount_t inMemTotalRows = 0; // whilst in memory, represents count of all rows seen + CriticalSection readAheadCS; // ensure single reader (leader), reads ahead (updates rows/totalInputRowsRead/inMemTotalRows) + Owned iFile; + Owned iFileIO; + Owned outputStream; + Linked compressHandler; + bool nextInputReadEog = false; + bool endOfInput = false; + bool inputGrouped = false; + SharedRowStreamReaderOptions options; + size32_t inMemReadAheadGranularity = 0; + size32_t compressionBlockSize = 0; + + rowcount_t getLowestOutput() + { + // NB: must be called with readAheadCS held + rowcount_t trailingRowPos = (rowcount_t)-1; + for (auto &output: outputs) + { + rowcount_t outputLastKnownWritten = output->queryLastKnownAvailable(); + if (outputLastKnownWritten < trailingRowPos) + trailingRowPos = outputLastKnownWritten; + } + return trailingRowPos; + } + inline rowcount_t getStartIndex() + { + rowcount_t nr = rows.size(); + return inMemTotalRows - nr; + } + inline unsigned getRelativeIndex(rowcount_t index) + { + rowcount_t startIndex = getStartIndex(); + return (unsigned)(index - startIndex); + } + void closeWriter() + { + iFileIO.clear(); + outputStream.clear(); + } + void createOutputStream() + { + // NB: Called once, when spilling starts. + iFileIO.setown(iFile->open(IFOcreate)); // kept for stats purposes + Owned out = createSerialOutputStream(iFileIO); + outputStream.setown(createBufferedOutputStream(out, options.storageBlockSize)); //prefered plane block size + if (compressHandler) + { + const char *compressOptions = nullptr; + Owned compressor = compressHandler->getCompressor(compressOptions); + Owned compressed = createCompressingOutputStream(outputStream, compressor); + outputStream.setown(createBufferedOutputStream(compressed, compressionBlockSize)); + } + totalInputRowsRead = inMemTotalRows; + } + void writeRowsFromInput() + { + // NB: the leading output will be calling this, and it could populate 'outputRows' as it reads ahead + // but we want to readahead + write to disk, more than we want to retain in memory, so keep it simple, + // flush all to disk, meaning this output will also read them back off disk (hopefully from Linux page cache) + rowcount_t newRowsWritten = 0; + offset_t serializedSz = 0; + COutputStreamSerializer outputStreamSerializer(outputStream); + while (!activity.queryAbortSoon()) + { + OwnedConstThorRow row = input->nextRow(); + if (nullptr == row) + { + if (!inputGrouped || nextInputReadEog) + { + endOfInput = true; + break; + } + nextInputReadEog = true; + outputStream->put(sizeof(bool), &nextInputReadEog); + newRowsWritten++; + } + else + { + if (inputGrouped) + { + nextInputReadEog = false; + outputStream->put(sizeof(bool), &nextInputReadEog); + } + serializer->serialize(outputStreamSerializer, (const byte *)row.get()); + newRowsWritten++; + size32_t rowSz = thorRowMemoryFootprint(serializer, row); + serializedSz += rowSz; + if (serializedSz >= options.spillWriteAheadSize) + break; + } + } + outputStream->flush(); + totalInputRowsRead.fetch_add(newRowsWritten); + + // JCSMORE - could track size written, and start new file at this point (e.g. every 100MB), + // and track their starting points (by row #) in a vector + // We could then tell if/when the readers catch up, and remove consumed files as they do. + } + void freeRows() + { + for (auto &row: rows) + ReleaseThorRow(std::get<0>(row)); + } +public: + explicit CSharedFullSpillingWriteAhead(CActivityBase *_activity, unsigned numOutputs, IRowStream *_input, bool _inputGrouped, const SharedRowStreamReaderOptions &_options, IThorRowInterfaces *rowIf, const char *tempFileName, ICompressHandler *_compressHandler) + : activity(*_activity), input(_input), inputGrouped(_inputGrouped), options(_options), compressHandler(_compressHandler), + meta(rowIf->queryRowMetaData()), serializer(rowIf->queryRowSerializer()), allocator(rowIf->queryRowAllocator()), deserializer(rowIf->queryRowDeserializer()) + { + assertex(input); + + // cap inMemReadAheadGranularity to inMemMaxMem + inMemReadAheadGranularity = options.inMemReadAheadGranularity; + if (inMemReadAheadGranularity > options.inMemMaxMem) + inMemReadAheadGranularity = options.inMemMaxMem; + + constexpr size32_t minCompressionBlockSize = 256 * 1024; + memsize_t totalCompressionBufferSize = options.totalCompressionBufferSize; + if (totalCompressionBufferSize) + { + compressionBlockSize = (size32_t)(totalCompressionBufferSize / (numOutputs + 1)); // +1 for writer + if (compressionBlockSize < minCompressionBlockSize) + { + WARNLOG("Shared totalCompressionBufferSize=%" I64F "u, too small for number of outputs(%u). Using minCompressionBlockSize(%u) for writer and each reader.", (unsigned __int64)totalCompressionBufferSize, numOutputs, minCompressionBlockSize); + compressionBlockSize = minCompressionBlockSize; + } + } + for (unsigned o=0; oremove(); + } + freeRows(); + } + void outputStopped(unsigned output) + { + bool allStopped = false; + { + // Mark finished output with max, so that it is not considered by getLowestOutput() + CriticalBlock b(readAheadCS); // read ahead could be active and considering this output + outputs[output]->setLastKnownAvailable((rowcount_t)-1); + if ((rowcount_t)-1 == getLowestOutput()) + allStopped = true; + } + if (allStopped) + { + if (totalInputRowsRead) // only set if spilt + { + StringBuffer tracing; + getFileIOStats(tracing, iFileIO); + activity.ActPrintLog("CSharedFullSpillingWriteAhead: removing spill file: %s%s", iFile->queryFilename(), tracing.str()); + closeWriter(); + iFile->remove(); + } + } + } + std::tuple getReadStream() // also pass back IFileIO for stats purposes + { + Owned iFileIO = iFile->open(IFOread); + Owned in = createSerialInputStream(iFileIO); + Owned inputStream = createBufferedInputStream(in, options.storageBlockSize, 0); + if (compressHandler) + { + const char *decompressOptions = nullptr; + Owned decompressor = compressHandler->getExpander(decompressOptions); + Owned decompressed = createDecompressingInputStream(inputStream, decompressor); + inputStream.setown(createBufferedInputStream(decompressed, compressionBlockSize, 0)); + } + return { inputStream.getClear(), iFileIO.getClear() }; + } + bool checkWriteAhead(rowcount_t &outputRowsAvailable) + { + if (totalInputRowsRead == outputRowsAvailable) + { + CriticalBlock b(readAheadCS); + if (totalInputRowsRead == outputRowsAvailable) // if not, then since gaining the crit, totalInputRowsRead has changed + { + if (endOfInput) + return false; + writeRowsFromInput(); + if (totalInputRowsRead == outputRowsAvailable) // no more were written + { + dbgassertex(endOfInput); + return false; + } + } + } + outputRowsAvailable = totalInputRowsRead; + return true; + } + bool getRowsInMem(Rows &outputRows, rowcount_t &outputRowsAvailable) + { + CriticalBlock b(readAheadCS); + if (outputRowsAvailable == inMemTotalRows) // load more + { + // prune unused rows + rowcount_t trailingRowPosRelative = getRelativeIndex(getLowestOutput()); + for (auto it = rows.begin(); it != rows.begin() + trailingRowPosRelative; ++it) + { + auto [row, rowSz] = *it; + rowsMemUsage -= rowSz; + ReleaseThorRow(row); + } + rows.erase(rows.begin(), rows.begin() + trailingRowPosRelative); + + if (outputStream) + { + // this will be the last time this output calls getRowsInMem + // it has exhausted 'rows', and will from here on in read from outputStream + return false; + } + + if (rowsMemUsage >= options.inMemMaxMem) // too much in memory, spill + { + // NB: this will reset rowMemUsage, however, each reader will continue to consume rows until they catch up (or stop) + ActPrintLog(&activity, "Spilling to temp storage [file = %s, outputRowsAvailable = %" I64F "u, start = %" I64F "u, end = %" I64F "u, count = %u]", iFile->queryFilename(), outputRowsAvailable, inMemTotalRows - rows.size(), inMemTotalRows, (unsigned)rows.size()); + createOutputStream(); + return false; + } + + // read more, up to inMemReadAheadGranularity or inMemReadAheadGranularityRows before relinquishing + rowcount_t previousNumRows = rows.size(); + while (true) + { + const void *row = input->nextRow(); + if (row) + { + nextInputReadEog = false; + size32_t sz = thorRowMemoryFootprint(serializer, row); + rows.emplace_back(row, sz); + rowsMemUsage += sz; + if ((rowsMemUsage >= options.inMemReadAheadGranularity) || + (rows.size() >= options.inMemReadAheadGranularityRows)) + break; + } + else + { + if (!inputGrouped || nextInputReadEog) + break; + else + { + nextInputReadEog = true; + rows.emplace_back(nullptr, 0); + } + } + } + inMemTotalRows += rows.size() - previousNumRows; + } + else + { + // this output has not yet reached inMemTotalRows + dbgassertex(outputRowsAvailable < inMemTotalRows); + } + + rowcount_t newRowsAdded = 0; + for (auto it = rows.begin() + getRelativeIndex(outputRowsAvailable); it != rows.end(); ++it) + { + const void *row = std::get<0>(*it); + LinkThorRow(row); + outputRows.push_back(row); + newRowsAdded++; + } + outputRowsAvailable = outputRowsAvailable+newRowsAdded; + + return true; + } +// ISharedRowStreamReader impl. + virtual IRowStream *queryOutput(unsigned output) override + { + return outputs[output]; + } + virtual void cancel() override + { + for (auto &output: outputs) + output->cancel(); + } + virtual void reset() override + { + if (outputStream) // should have already been closed when inputs all stopped + { + closeWriter(); + iFile->remove(); + } + for (auto &output: outputs) + output->reset(); + freeRows(); + rows.clear(); + rowsMemUsage = 0; + totalInputRowsRead = 0; + inMemTotalRows = 0; + nextInputReadEog = false; + endOfInput = false; + } +}; + +ISharedRowStreamReader *createSharedFullSpillingWriteAhead(CActivityBase *_activity, unsigned numOutputs, IRowStream *_input, bool _inputGrouped, const SharedRowStreamReaderOptions &options, IThorRowInterfaces *_rowIf, const char *tempFileName, ICompressHandler *compressHandler) +{ + return new CSharedFullSpillingWriteAhead(_activity, numOutputs, _input, _inputGrouped, options, _rowIf, tempFileName, compressHandler); +} + + class CRowMultiWriterReader : public CSimpleInterface, implements IRowMultiWriterReader { rowidx_t readGranularity, writeGranularity, rowPos, limit, rowsToRead; diff --git a/thorlcr/thorutil/thbuf.hpp b/thorlcr/thorutil/thbuf.hpp index f75b353951f..dc64aeb888d 100644 --- a/thorlcr/thorutil/thbuf.hpp +++ b/thorlcr/thorutil/thbuf.hpp @@ -55,6 +55,24 @@ extern graph_decl ISmartRowBuffer * createSmartInMemoryBuffer(CActivityBase *act IThorRowInterfaces *rowIf, size32_t buffsize); +struct SharedRowStreamReaderOptions +{ + offset_t storageBlockSize = 256 * 1024; // block size of read/write streams + memsize_t totalCompressionBufferSize = 3000 * 1024; // compression buffer size of read streams (split between writer and outputs) + memsize_t inMemMaxMem = 2000 * 1024; // before spilling begins. + memsize_t inMemReadAheadGranularity = 128 * 1024; // granularity (K) of read ahead + rowcount_t inMemReadAheadGranularityRows = 64; // granularity (rows) of read ahead. NB: whichever granularity is hit first + offset_t spillWriteAheadSize = 2000 * 1024; // once spilling, maximum size to write ahead + unsigned heapFlags = roxiemem::RHFunique|roxiemem::RHFblocked; +}; +interface ISharedRowStreamReader : extends IInterface +{ + virtual IRowStream *queryOutput(unsigned output) = 0; + virtual void cancel()=0; + virtual void reset() = 0; +}; + + // Multiple readers, one writer interface ISharedSmartBufferCallback { @@ -62,18 +80,20 @@ interface ISharedSmartBufferCallback virtual void blocked() = 0; virtual void unblocked() = 0; }; -interface ISharedSmartBuffer : extends IRowWriter + +interface ISharedSmartBufferRowWriter : extends IRowWriter { - using IRowWriter::putRow; virtual void putRow(const void *row, ISharedSmartBufferCallback *callback) = 0; // extended form of putRow, which signals when pages out via callback - virtual IRowStream *queryOutput(unsigned output) = 0; - virtual void cancel()=0; - virtual void reset() = 0; +}; + +interface ISharedSmartBuffer : extends ISharedRowStreamReader +{ + virtual ISharedSmartBufferRowWriter *getWriter() = 0; }; extern graph_decl ISharedSmartBuffer *createSharedSmartMemBuffer(CActivityBase *activity, unsigned outputs, IThorRowInterfaces *rowif, unsigned buffSize=((unsigned)-1)); extern graph_decl ISharedSmartBuffer *createSharedSmartDiskBuffer(CActivityBase *activity, const char *tempname, unsigned outputs, IThorRowInterfaces *rowif); - +extern graph_decl ISharedRowStreamReader *createSharedFullSpillingWriteAhead(CActivityBase *_activity, unsigned numOutputs, IRowStream *_input, bool _inputGrouped, const SharedRowStreamReaderOptions &options, IThorRowInterfaces *_rowIf, const char *tempFileName, ICompressHandler *compressHandler); interface IRowWriterMultiReader : extends IRowWriter { diff --git a/thorlcr/thorutil/thormisc.hpp b/thorlcr/thorutil/thormisc.hpp index 2bc8f0c776d..a3f394a83c1 100644 --- a/thorlcr/thorutil/thormisc.hpp +++ b/thorlcr/thorutil/thormisc.hpp @@ -55,6 +55,11 @@ #define THOROPT_HDIST_COMP "hdCompressorType" // Distribute compressor to use (default = "LZ4") #define THOROPT_HDIST_COMPOPTIONS "hdCompressorOptions" // Distribute compressor options, e.g. AES key (default = "") #define THOROPT_SPLITTER_SPILL "splitterSpill" // Force splitters to spill or not, default is to adhere to helper setting (default = -1) +#define THOROPT_SPLITTER_MAXROWMEMK "splitterRowMemK" // Splitter max memory (K) to use before spilling (default = 2MB) +#define THOROPT_SPLITTER_READAHEADGRANULARITYK "inMemReadAheadGranularityK" // Splitter in memory read ahead granularity (K) (default = 128K) +#define THOROPT_SPLITTER_READAHEADGRANULARITYROWS "inMemReadAheadGranularityRows" // Splitter in memory read ahead granularity (# rows) (default = 64) +#define THOROPT_SPLITTER_WRITEAHEADK "splitterWriteAheadK" // Splitter spilling write ahead size (K) (default = 2MB) +#define THOROPT_SPLITTER_COMPRESSIONTOALK "splitterCompressionTotalK" // Splitter total compression buffer size (shared between writer and readers) (K) (default = 3MB) #define THOROPT_LOOP_MAX_EMPTY "loopMaxEmpty" // Max # of iterations that LOOP can cycle through with 0 results before errors (default = 1000) #define THOROPT_SMALLSORT "smallSortThreshold" // Use minisort approach, if estimate size of data to sort is below this setting (default = 0) #define THOROPT_PARALLEL_FUNNEL "parallelFunnel" // Use parallel funnel impl. if !ordered (default = true) From d4da9d475bafda02cf2972501f1fc093d6a6ed63 Mon Sep 17 00:00:00 2001 From: Shamser Ahmed Date: Thu, 25 Apr 2024 16:24:50 +0100 Subject: [PATCH 059/151] HPCC-31649 New StSizePeakEphemeralDisk and StSizePeakTempDisk for look ahead and hash distribute spilling Signed-off-by: Shamser Ahmed --- thorlcr/thorutil/thbuf.cpp | 65 +++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 33 deletions(-) diff --git a/thorlcr/thorutil/thbuf.cpp b/thorlcr/thorutil/thbuf.cpp index 38054cf60fd..951b7db82d2 100644 --- a/thorlcr/thorutil/thbuf.cpp +++ b/thorlcr/thorutil/thbuf.cpp @@ -71,8 +71,8 @@ class CSmartRowBuffer: public CSimpleInterface, implements ISmartRowBuffer, impl ThorRowQueue *in; size32_t insz; ThorRowQueue *out; - Linked file; - Owned fileio; + CFileOwner tmpFileOwner; + Owned tempFileIO; SpinLock lock; bool waiting; Semaphore waitsem; @@ -141,12 +141,12 @@ class CSmartRowBuffer: public CSimpleInterface, implements ISmartRowBuffer, impl insz = 0; return; } - if (!fileio) { + if (!tempFileIO) { SpinUnblock unblock(lock); - fileio.setown(file->open(IFOcreaterw)); - if (!fileio) + tempFileIO.setown(tmpFileOwner.queryIFile().open(IFOcreaterw)); + if (!tempFileIO) { - throw MakeStringException(-1,"CSmartRowBuffer::flush cannot write file %s",file->queryFilename()); + throw MakeStringException(-1,"CSmartRowBuffer::flush cannot write file %s", tmpFileOwner.queryIFile().queryFilename()); } } MemoryBuffer mb; @@ -184,7 +184,8 @@ class CSmartRowBuffer: public CSimpleInterface, implements ISmartRowBuffer, impl size32_t left = nb*blocksize-mb.length(); memset(mb.reserve(left),0,left); } - fileio->write(blk*(offset_t)blocksize,mb.length(),mb.bufferBase()); + tempFileIO->write(blk*(offset_t)blocksize,mb.length(),mb.bufferBase()); + tmpFileOwner.noteSize(numblocks*blocksize); mb.clear(); } if (waiting) { @@ -222,8 +223,8 @@ class CSmartRowBuffer: public CSimpleInterface, implements ISmartRowBuffer, impl size32_t readBlockSize = nb*blocksize; byte *buf = (byte *)ma.allocate(readBlockSize); CThorStreamDeserializerSource ds(readBlockSize,buf); - assertex(fileio.get()); - size32_t rd = fileio->read(blk*(offset_t)blocksize,readBlockSize,buf); + assertex(tempFileIO.get()); + size32_t rd = tempFileIO->read(blk*(offset_t)blocksize,readBlockSize,buf); assertex(rd==readBlockSize); for (;;) { byte b; @@ -248,8 +249,8 @@ class CSmartRowBuffer: public CSimpleInterface, implements ISmartRowBuffer, impl public: IMPLEMENT_IINTERFACE_USING(CSimpleInterface); - CSmartRowBuffer(CActivityBase *_activity, IFile *_file,size32_t bufsize,IThorRowInterfaces *rowif) - : activity(_activity), file(_file), allocator(rowif->queryRowAllocator()), serializer(rowif->queryRowSerializer()), deserializer(rowif->queryRowDeserializer()) + CSmartRowBuffer(CActivityBase *_activity,IFile *_file,size32_t bufsize,IThorRowInterfaces *rowif) + : activity(_activity), tmpFileOwner(_file, _activity->queryTempFileSizeTracker()), allocator(rowif->queryRowAllocator()), serializer(rowif->queryRowSerializer()), deserializer(rowif->queryRowDeserializer()) { #ifdef _DEBUG putrecheck = false; @@ -263,7 +264,7 @@ class CSmartRowBuffer: public CSimpleInterface, implements ISmartRowBuffer, impl numblocks = 0; insz = 0; eoi = false; - diskfree.setown(createThreadSafeBitSet()); + diskfree.setown(createThreadSafeBitSet()); #ifdef _FULL_TRACE ActPrintLog(activity, "SmartBuffer create %x",(unsigned)(memsize_t)this); @@ -277,18 +278,14 @@ class CSmartRowBuffer: public CSimpleInterface, implements ISmartRowBuffer, impl #endif assertex(!waiting); assertex(!waitflush); - // clear in/out contents - while (in->ordinality()) + // clear in/out contents + while (in->ordinality()) ReleaseThorRow(in->dequeue()); delete in; - while (out->ordinality()) + while (out->ordinality()) ReleaseThorRow(out->dequeue()); delete out; - if (fileio) - { - fileio.clear(); - file->remove(); - } + tempFileIO.clear(); } void putRow(const void *row) @@ -1223,8 +1220,8 @@ static StringBuffer &getFileIOStats(StringBuffer &output, IFileIO *iFileIO) class CSharedWriteAheadDisk : public CSharedWriteAheadBase { - Owned spillFile; - Owned spillFileIO; + Owned tempFileOwner; + Owned tempFileIO; CIArrayOf freeChunks; PointerArrayOf freeChunksSized; QueueOf savedChunks; @@ -1446,7 +1443,7 @@ class CSharedWriteAheadDisk : public CSharedWriteAheadBase } else { - Owned stream = createFileSerialStream(spillFileIO, chunk.offset); + Owned stream = createFileSerialStream(tempFileIO, chunk.offset); #ifdef TRACE_WRITEAHEAD unsigned diskChunkNum; stream->get(sizeof(diskChunkNum), &diskChunkNum); @@ -1510,7 +1507,8 @@ class CSharedWriteAheadDisk : public CSharedWriteAheadBase mb.append((byte)0); size32_t len = mb.length(); chunk.setown(getOutOffset(len)); // will find space for 'len', might be bigger if from free list - spillFileIO->write(chunk->offset, len, mb.toByteArray()); + tempFileIO->write(chunk->offset, len, mb.toByteArray()); + tempFileOwner->noteSize(highOffset); #ifdef TRACE_WRITEAHEAD ActPrintLogEx(&activity->queryContainer(), thorlog_all, MCdebugProgress, "Flushed chunk = %d (savedChunks pos=%d), writeOffset = %" I64F "d, writeSize = %d", inMemRows->queryChunk(), savedChunks.ordinality(), chunk->offset, len); #endif @@ -1533,21 +1531,21 @@ class CSharedWriteAheadDisk : public CSharedWriteAheadBase allocator(rowIf->queryRowAllocator()), deserializer(rowIf->queryRowDeserializer()), serializeMeta(meta->querySerializedDiskMeta()) { assertex(spillName); - spillFile.setown(createIFile(spillName)); - spillFile->setShareMode(IFSHnone); - spillFileIO.setown(spillFile->open(IFOcreaterw)); + tempFileOwner.setown(activity->createOwnedTempFile(spillName)); + tempFileOwner->queryIFile().setShareMode(IFSHnone); + tempFileIO.setown(tempFileOwner->queryIFile().open(IFOcreaterw)); highOffset = 0; } ~CSharedWriteAheadDisk() { - if (spillFile) + if (tempFileIO) { StringBuffer tracing; - getFileIOStats(tracing, spillFileIO); - activity->ActPrintLog("CSharedWriteAheadDisk: removing spill file: %s%s", spillFile->queryFilename(), tracing.str()); - spillFileIO.clear(); - spillFile->remove(); + getFileIOStats(tracing, tempFileIO); + activity->ActPrintLog("CSharedWriteAheadDisk: removing spill file: %s%s", tempFileOwner->queryIFile().queryFilename(), tracing.str()); + tempFileIO.clear(); } + for (;;) { Owned chunk = savedChunks.dequeue(); @@ -1566,7 +1564,8 @@ class CSharedWriteAheadDisk : public CSharedWriteAheadBase freeChunks.kill(); freeChunksSized.kill(); highOffset = 0; - spillFileIO->setSize(0); + tempFileIO->setSize(0); + tempFileOwner->noteSize(0); } }; From 42e4591dd0b03cb83c3dc92dae27f72560449b90 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Thu, 13 Jun 2024 16:33:12 +0100 Subject: [PATCH 060/151] HPCC-32060 Rationalize fastThrough lookahead logic Modify info meta flags and logic, to avoid some unnecessary lookaheads Signed-off-by: Jake Smith --- .../activities/aggregate/thaggregateslave.cpp | 1 - .../choosesets/thchoosesetsslave.cpp | 1 - .../countproject/thcountprojectslave.cpp | 2 +- thorlcr/activities/enth/thenthslave.cpp | 2 +- thorlcr/activities/filter/thfilterslave.cpp | 10 +++++ thorlcr/activities/firstn/thfirstnslave.cpp | 5 +++ thorlcr/activities/limit/thlimitslave.cpp | 1 - thorlcr/activities/msort/thgroupsortslave.cpp | 3 +- thorlcr/activities/msort/thmsortslave.cpp | 2 +- .../activities/nsplitter/thnsplitterslave.cpp | 2 + thorlcr/activities/pull/thpullslave.cpp | 4 +- .../activities/selfjoin/thselfjoinslave.cpp | 2 +- thorlcr/graph/thgraphslave.cpp | 43 ++++++++++++++++--- thorlcr/graph/thgraphslave.hpp | 2 + thorlcr/slave/slave.hpp | 17 ++++---- 15 files changed, 74 insertions(+), 23 deletions(-) diff --git a/thorlcr/activities/aggregate/thaggregateslave.cpp b/thorlcr/activities/aggregate/thaggregateslave.cpp index 48c6df103f9..978c9611707 100644 --- a/thorlcr/activities/aggregate/thaggregateslave.cpp +++ b/thorlcr/activities/aggregate/thaggregateslave.cpp @@ -196,7 +196,6 @@ class AggregateSlaveActivity : public AggregateSlaveBase virtual void getMetaInfo(ThorDataLinkMetaInfo &info) const override { initMetaInfo(info); - info.singleRowOutput = true; info.totalRowsMin=1; info.totalRowsMax=1; } diff --git a/thorlcr/activities/choosesets/thchoosesetsslave.cpp b/thorlcr/activities/choosesets/thchoosesetsslave.cpp index 13623a5bff5..43029e92618 100644 --- a/thorlcr/activities/choosesets/thchoosesetsslave.cpp +++ b/thorlcr/activities/choosesets/thchoosesetsslave.cpp @@ -308,7 +308,6 @@ class ChooseSetsPlusActivity : public CSlaveActivity, implements ILookAheadStopN virtual void getMetaInfo(ThorDataLinkMetaInfo &info) const override { initMetaInfo(info); - info.buffersInput = true; info.isSequential = true; info.canReduceNumRows = true; info.canBufferInput = true; diff --git a/thorlcr/activities/countproject/thcountprojectslave.cpp b/thorlcr/activities/countproject/thcountprojectslave.cpp index 0727127b3bb..58b27bf9d62 100644 --- a/thorlcr/activities/countproject/thcountprojectslave.cpp +++ b/thorlcr/activities/countproject/thcountprojectslave.cpp @@ -235,7 +235,7 @@ class CountProjectActivity : public BaseCountProjectActivity, implements ILookAh virtual void getMetaInfo(ThorDataLinkMetaInfo &info) const override { initMetaInfo(info); - info.buffersInput = true; + info.canBufferInput = true; info.isSequential = true; calcMetaInfoSize(info, queryInput(0)); } diff --git a/thorlcr/activities/enth/thenthslave.cpp b/thorlcr/activities/enth/thenthslave.cpp index b91ca33a453..1a5529c2a14 100644 --- a/thorlcr/activities/enth/thenthslave.cpp +++ b/thorlcr/activities/enth/thenthslave.cpp @@ -110,7 +110,7 @@ class BaseEnthActivity : public CSlaveActivity, implements ILookAheadStopNotify void getMetaInfo(ThorDataLinkMetaInfo &info) const override { initMetaInfo(info); - info.buffersInput = true; + info.canBufferInput = true; info.canReduceNumRows = true; calcMetaInfoSize(info, input); } diff --git a/thorlcr/activities/filter/thfilterslave.cpp b/thorlcr/activities/filter/thfilterslave.cpp index a7bd4635428..bf475cb01c4 100644 --- a/thorlcr/activities/filter/thfilterslave.cpp +++ b/thorlcr/activities/filter/thfilterslave.cpp @@ -157,6 +157,11 @@ class CFilterSlaveActivity : public CFilterSlaveActivityBase, public CThorSteppa CThorSteppable::setInputStream(index, input, consumerOrdered); } virtual IInputSteppingMeta *querySteppingMeta() { return CThorSteppable::inputStepping; } + virtual void getMetaInfo(ThorDataLinkMetaInfo &info) const override + { + PARENT::getMetaInfo(info); + info.fastThrough = true; + } }; class CFilterProjectSlaveActivity : public CFilterSlaveActivityBase @@ -230,6 +235,11 @@ class CFilterProjectSlaveActivity : public CFilterSlaveActivityBase anyThisGroup = false; return NULL; } + virtual void getMetaInfo(ThorDataLinkMetaInfo &info) const override + { + PARENT::getMetaInfo(info); + info.fastThrough = true; + } }; class CFilterGroupSlaveActivity : public CFilterSlaveActivityBase, public CThorSteppable diff --git a/thorlcr/activities/firstn/thfirstnslave.cpp b/thorlcr/activities/firstn/thfirstnslave.cpp index 2140ab9a67b..0973c014b77 100644 --- a/thorlcr/activities/firstn/thfirstnslave.cpp +++ b/thorlcr/activities/firstn/thfirstnslave.cpp @@ -124,6 +124,11 @@ class CFirstNSlaveLocal : public CFirstNSlaveBase } return NULL; } + virtual void getMetaInfo(ThorDataLinkMetaInfo &info) const override + { + PARENT::getMetaInfo(info); + info.fastThrough = true; + } }; class CFirstNSlaveGrouped : public CFirstNSlaveBase diff --git a/thorlcr/activities/limit/thlimitslave.cpp b/thorlcr/activities/limit/thlimitslave.cpp index ffb55a2457d..2e0231e4db3 100644 --- a/thorlcr/activities/limit/thlimitslave.cpp +++ b/thorlcr/activities/limit/thlimitslave.cpp @@ -75,7 +75,6 @@ class CLimitSlaveActivityBase : public CSlaveActivity { initMetaInfo(info); info.canReduceNumRows = true; - info.canBufferInput = false; info.totalRowsMax = rowLimit; calcMetaInfoSize(info, queryInput(0)); } diff --git a/thorlcr/activities/msort/thgroupsortslave.cpp b/thorlcr/activities/msort/thgroupsortslave.cpp index 0e7c97898c8..4e6aac95e57 100644 --- a/thorlcr/activities/msort/thgroupsortslave.cpp +++ b/thorlcr/activities/msort/thgroupsortslave.cpp @@ -106,7 +106,7 @@ class CLocalSortSlaveActivity : public CSlaveActivity virtual void getMetaInfo(ThorDataLinkMetaInfo &info) const override { initMetaInfo(info); - info.buffersInput = true; + info.canBufferInput = true; calcMetaInfoSize(info, queryInput(0)); } }; @@ -183,6 +183,7 @@ class CSortedSlaveActivity : public CSlaveActivity, public CThorSteppable { initMetaInfo(info); calcMetaInfoSize(info, queryInput(0)); + info.fastThrough = true; } // steppable virtual void setInputStream(unsigned index, CThorInput &input, bool consumerOrdered) override diff --git a/thorlcr/activities/msort/thmsortslave.cpp b/thorlcr/activities/msort/thmsortslave.cpp index a4289e33d0e..14acc3dedb4 100644 --- a/thorlcr/activities/msort/thmsortslave.cpp +++ b/thorlcr/activities/msort/thmsortslave.cpp @@ -206,7 +206,7 @@ class MSortSlaveActivity : public CSlaveActivity virtual void getMetaInfo(ThorDataLinkMetaInfo &info) const override { initMetaInfo(info); - info.buffersInput = true; + info.canBufferInput = true; info.unknownRowsOutput = false; // shuffles rows if (totalrows!=RCUNSET) { // NB totalrows not available until after start info.totalRowsMin = totalrows; diff --git a/thorlcr/activities/nsplitter/thnsplitterslave.cpp b/thorlcr/activities/nsplitter/thnsplitterslave.cpp index 56c5dbe0c91..56158ae2381 100644 --- a/thorlcr/activities/nsplitter/thnsplitterslave.cpp +++ b/thorlcr/activities/nsplitter/thnsplitterslave.cpp @@ -208,6 +208,8 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf PARENT::start(); initMetaInfo(cachedMetaInfo); + cachedMetaInfo.suppressLookAhead = true; + calcMetaInfoSize(cachedMetaInfo, queryInput(0)); ForEachItemIn(o, outputs) diff --git a/thorlcr/activities/pull/thpullslave.cpp b/thorlcr/activities/pull/thpullslave.cpp index f0279539eb5..ef0daa33064 100644 --- a/thorlcr/activities/pull/thpullslave.cpp +++ b/thorlcr/activities/pull/thpullslave.cpp @@ -54,7 +54,9 @@ class PullSlaveActivity : public CSlaveActivity virtual void getMetaInfo(ThorDataLinkMetaInfo &info) const override { initMetaInfo(info); - info.buffersInput = true; + info.canBufferInput = true; + info.suppressLookAhead = true; + info.fastThrough = true; calcMetaInfoSize(info, queryInput(0)); } }; diff --git a/thorlcr/activities/selfjoin/thselfjoinslave.cpp b/thorlcr/activities/selfjoin/thselfjoinslave.cpp index 075d28c9390..8a951a8fe06 100644 --- a/thorlcr/activities/selfjoin/thselfjoinslave.cpp +++ b/thorlcr/activities/selfjoin/thselfjoinslave.cpp @@ -224,7 +224,7 @@ class SelfJoinSlaveActivity : public CSlaveActivity virtual void getMetaInfo(ThorDataLinkMetaInfo &info) const override { initMetaInfo(info); - info.buffersInput = true; + info.canBufferInput = true; info.unknownRowsOutput = true; } virtual void gatherActiveStats(CRuntimeStatisticCollection &activeStats) const diff --git a/thorlcr/graph/thgraphslave.cpp b/thorlcr/graph/thgraphslave.cpp index b295efeec64..3974042bc12 100644 --- a/thorlcr/graph/thgraphslave.cpp +++ b/thorlcr/graph/thgraphslave.cpp @@ -124,6 +124,12 @@ bool CThorInput::isFastThrough() const { return itdl->queryFromActivity()->isFastThrough(); } + +bool CThorInput::suppressLookAhead() const +{ + return itdl->queryFromActivity()->suppressLookAhead(); +} + // CSlaveActivity::CSlaveActivity(CGraphElementBase *_container, const StatisticsMapping &statsMapping) @@ -251,28 +257,32 @@ bool CSlaveActivity::isInputFastThrough(unsigned index) const return input.isFastThrough(); } -/* If fastThrough, return false. - * If !fastThrough (indicating needs look ahead) and has existing lookahead, start it, return false. - * If !fastThrough (indicating needs look ahead) and no existing lookahead, return true, caller will install. +/* If fastThrough or suppressLookAhead, return false. + * If not (indicating needs look ahead) and has existing lookahead, start it, return false. + * If not (indicating needs look ahead) and no existing lookahead, return true, caller will install. * * NB: only return true if new lookahead needs installing. */ bool CSlaveActivity::ensureStartFTLookAhead(unsigned index) { CThorInput &input = inputs.item(index); - if (input.isFastThrough()) + if (input.isFastThrough() || input.suppressLookAhead()) return false; // no look ahead required else { // look ahead required if (input.hasLookAhead()) { + //ActPrintLog("Already has lookahead"); // no change, start existing look ahead startLookAhead(index); return false; // no [new] look ahead required } else + { + //ActPrintLog("lookahead will be inserted"); return true; // new look ahead required + } } } @@ -308,7 +318,7 @@ bool CSlaveActivity::canStall() const getMetaInfo(info); if (info.canStall) return true; - if (info.isSource || info.buffersInput || info.canBufferInput) + if (info.isSource || info.canBufferInput) return false; for (unsigned i=0; iqueryFromActivity(); + if (!inputAct->suppressLookAhead()) + return false; + } + } + return true; +} IStrandJunction *CSlaveActivity::getOutputStreams(CActivityBase &ctx, unsigned idx, PointerArrayOf &streams, const CThorStrandOptions * consumerOptions, bool consumerOrdered, IOrderedCallbackCollection * orderedCallbacks) diff --git a/thorlcr/graph/thgraphslave.hpp b/thorlcr/graph/thgraphslave.hpp index c0239e4e027..f6b73c912c1 100644 --- a/thorlcr/graph/thgraphslave.hpp +++ b/thorlcr/graph/thgraphslave.hpp @@ -186,6 +186,7 @@ class CThorInput : public CSimpleInterfaceOf stopped = true; } bool isFastThrough() const; + bool suppressLookAhead() const; }; typedef IArrayOf CThorInputArray; @@ -266,6 +267,7 @@ class graphslave_decl CSlaveActivity : public CActivityBase, public CEdgeProgres void debugRequest(unsigned edgeIdx, MemoryBuffer &msg); bool canStall() const; bool isFastThrough() const; + bool suppressLookAhead() const; // IThorDataLink virtual CSlaveActivity *queryFromActivity() override { return this; } diff --git a/thorlcr/slave/slave.hpp b/thorlcr/slave/slave.hpp index 5b882d43091..1ebe27b7824 100644 --- a/thorlcr/slave/slave.hpp +++ b/thorlcr/slave/slave.hpp @@ -41,15 +41,14 @@ struct ThorDataLinkMetaInfo __int64 totalRowsMax = -1; // set to -1 if not known offset_t spilled = (offset_t)-1; // amount "spilled" to disk (approx) (offset_t)-1 for not known - bool isSource = false; - bool isSequential = false; - bool canStall = false; - bool fastThrough = false; - bool buffersInput = false; - bool canBufferInput = false; - bool singleRowOutput = false; - bool canIncreaseNumRows = false; - bool canReduceNumRows = false; + bool isSource = false; // A source activity (disk read, index read, etc) + bool isSequential = false; // There is a sequential nature to the implementation, workers dependent on previous worker (e.g. global ChooseN) + bool canStall = false; // The activity may stall if its outputs are not pulled on each worker + bool fastThrough = false; // The activity will return rows quickly if it can (does not mean it can't block on its input) + bool canBufferInput = false; // The activity caches input rows + bool suppressLookAhead = false; // Downstream activities should avoid inserting lookaheads + bool canIncreaseNumRows = false; // The activity can produce more rows than it reads from its input (e.g. NORMALIZE) + bool canReduceNumRows = false; // The activity can produce less rows than it reads from its input (e.g. ENTH) bool unknownRowsOutput = false; // cannot use input to deduce total offset_t byteTotal = (offset_t)-1; // total (uncompressed) byte count of all rows }; From edfb8edf015ddcdcd3421babbaf77119a04f9898 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Fri, 14 Jun 2024 16:36:40 +0100 Subject: [PATCH 061/151] Resolve merge clash Signed-off-by: Gavin Halliday --- .../LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.hpp | 6 ------ system/logaccess/ElasticStack/ElasticStackLogAccess.hpp | 6 ------ 2 files changed, 12 deletions(-) diff --git a/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.hpp b/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.hpp index 106a92327bc..d2b92460f4e 100644 --- a/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.hpp +++ b/system/logaccess/Azure/LogAnalytics/CurlClient/AzureLogAnalyticsCurlClient.hpp @@ -76,12 +76,6 @@ class AzureLogAnalyticsCurlClient : public CInterfaceOf StringBuffer m_componentsLookupKeyColumn; StringBuffer m_instanceLookupKeyColumn; - StringBuffer m_spanSearchColName; - StringBuffer m_spanIndexSearchPattern; - - StringBuffer m_traceSearchColName; - StringBuffer m_traceIndexSearchPattern; - bool targetIsContainerLogV2 = false; public: diff --git a/system/logaccess/ElasticStack/ElasticStackLogAccess.hpp b/system/logaccess/ElasticStack/ElasticStackLogAccess.hpp index cae1e01e517..b0fd8c15383 100644 --- a/system/logaccess/ElasticStack/ElasticStackLogAccess.hpp +++ b/system/logaccess/ElasticStack/ElasticStackLogAccess.hpp @@ -71,12 +71,6 @@ class ElasticStackLogAccess : public CInterfaceOf StringBuffer m_hostSearchColName; StringBuffer m_hostIndexSearchPattern; - StringBuffer m_spanSearchColName; - StringBuffer m_spanIndexSearchPattern; - - StringBuffer m_traceSearchColName; - StringBuffer m_traceIndexSearchPattern; - StringBuffer m_defaultDocType; //default doc type to query elasticlient::Client m_esClient; From a34170e4d144a9093506e466ce9fbca370fa3510 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Fri, 14 Jun 2024 17:00:55 +0100 Subject: [PATCH 062/151] Split off 9.0.118 Signed-off-by: Gavin Halliday --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/localroxie.yaml.fixed | 4 ++-- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/roxie.yaml.fixed | 16 ++++++++-------- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 16 files changed, 35 insertions(+), 35 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index cfbfe006f2d..d464f82f589 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.0.117-closedown0 +version: 9.0.119-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.0.117-closedown0 +appVersion: 9.0.119-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index c84dc4ed448..e5a996e4ce5 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1314,7 +1314,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index f7329d5447a..3ad9abc8a7d 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 annotations: checksum/config: {{ $configSHA }} spec: diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index d9a3507dd28..8ca163cf633 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 5976009b5c4..83bea8a6e79 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index cbdd2a61145..bb2f431d598 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -135,7 +135,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index df4d7a92cbc..9baab7b13bc 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -142,7 +142,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index e3cc9b35284..a41719e8e70 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index ddfeebe8f12..f703f88f434 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -117,7 +117,7 @@ spec: server: {{ .name | quote }} accessDali: "yes" app: {{ $application }} - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index e634cb96ed2..0ff8395a220 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml.fixed b/helm/hpcc/templates/localroxie.yaml.fixed index 099b501840b..1365ca48448 100644 --- a/helm/hpcc/templates/localroxie.yaml.fixed +++ b/helm/hpcc/templates/localroxie.yaml.fixed @@ -74,13 +74,13 @@ spec: accessDali: "yes" accessEsp: "yes" <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 6cdf0495d85..72d1f73456e 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -180,7 +180,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -242,7 +242,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -347,7 +347,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/roxie.yaml.fixed b/helm/hpcc/templates/roxie.yaml.fixed index 3f919f7c048..3f764809c02 100644 --- a/helm/hpcc/templates/roxie.yaml.fixed +++ b/helm/hpcc/templates/roxie.yaml.fixed @@ -126,7 +126,7 @@ spec: run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -134,7 +134,7 @@ spec: {{ toYaml $toposerver.labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -193,10 +193,10 @@ metadata: name: {{ $commonCtx.toponame | quote }} labels: <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} >>>>>>> origin/candidate-9.6.x spec: @@ -260,7 +260,7 @@ spec: accessDali: "yes" accessEsp: "yes" <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -269,7 +269,7 @@ spec: {{ toYaml $roxie.labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -379,7 +379,7 @@ spec: accessDali: "yes" accessEsp: "yes" <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -387,7 +387,7 @@ spec: {{ toYaml $roxie.labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 0b87dce862f..753f0362c7d 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -52,7 +52,7 @@ spec: run: {{ $serviceName | quote }} server: {{ $serviceName | quote }} accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 92fe896c3cd..3182a00a772 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -147,7 +147,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -214,7 +214,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -347,7 +347,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -412,7 +412,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.0.117-closedown0 + helmVersion: 9.0.119-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 5eb1da4aa55..2f11f281cb9 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 0 ) -set ( HPCC_POINT 117 ) +set ( HPCC_POINT 119 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-07T16:59:28Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-14T16:00:55Z" ) ### From c370e6787692d33a4ad928995ead535d3b6e3d60 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Fri, 14 Jun 2024 17:02:30 +0100 Subject: [PATCH 063/151] Split off 9.2.96 Signed-off-by: Gavin Halliday --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/localroxie.yaml.fixed | 4 ++-- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/roxie.yaml.fixed | 16 ++++++++-------- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 16 files changed, 35 insertions(+), 35 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index cffec699dbc..08b98c94298 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.2.95-closedown0 +version: 9.2.97-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.2.95-closedown0 +appVersion: 9.2.97-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index e6dd19d96b6..444d2af71db 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1405,7 +1405,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 7bdd3cb8574..381d7fbddb0 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index 8b35e6d018f..d840198008d 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 0e12a34c332..6de6c7b87c3 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index c7216fa0f66..520cded722f 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -133,7 +133,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 1398ba37599..e665c1e1c0b 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -140,7 +140,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index fff53b07f76..d0b54cef1b7 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 92bebaa579e..d7ee54fc3c3 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -120,7 +120,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index dee41f099e3..9ef61b02d47 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml.fixed b/helm/hpcc/templates/localroxie.yaml.fixed index 099b501840b..b522e90b9ca 100644 --- a/helm/hpcc/templates/localroxie.yaml.fixed +++ b/helm/hpcc/templates/localroxie.yaml.fixed @@ -74,13 +74,13 @@ spec: accessDali: "yes" accessEsp: "yes" <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.2.97-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.2.97-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 05ef4e57b0a..a39049fa504 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -178,7 +178,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -240,7 +240,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -346,7 +346,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/roxie.yaml.fixed b/helm/hpcc/templates/roxie.yaml.fixed index 3f919f7c048..4b7736c989e 100644 --- a/helm/hpcc/templates/roxie.yaml.fixed +++ b/helm/hpcc/templates/roxie.yaml.fixed @@ -126,7 +126,7 @@ spec: run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -134,7 +134,7 @@ spec: {{ toYaml $toposerver.labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -193,10 +193,10 @@ metadata: name: {{ $commonCtx.toponame | quote }} labels: <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.2.97-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.2.97-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} >>>>>>> origin/candidate-9.6.x spec: @@ -260,7 +260,7 @@ spec: accessDali: "yes" accessEsp: "yes" <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.2.97-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -269,7 +269,7 @@ spec: {{ toYaml $roxie.labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.2.97-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -379,7 +379,7 @@ spec: accessDali: "yes" accessEsp: "yes" <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -387,7 +387,7 @@ spec: {{ toYaml $roxie.labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 0c3f8317753..34a1a660472 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 16d0cec1ba3..caf98186b0e 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -145,7 +145,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -210,7 +210,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -341,7 +341,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -404,7 +404,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.2.95-closedown0 + helmVersion: 9.2.97-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index e26c90f5321..5a0d035376d 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 2 ) -set ( HPCC_POINT 95 ) +set ( HPCC_POINT 97 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-07T17:01:06Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-14T16:02:29Z" ) ### From 8a7b4428c6f2b8433032c25ac406fa987f582f79 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Fri, 14 Jun 2024 17:03:56 +0100 Subject: [PATCH 064/151] Split off 9.4.70 Signed-off-by: Gavin Halliday --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/localroxie.yaml.fixed | 4 ++-- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/roxie.yaml.fixed | 16 ++++++++-------- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 16 files changed, 35 insertions(+), 35 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 139a9b83c03..c8177a82bfb 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.4.69-closedown0 +version: 9.4.71-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.4.69-closedown0 +appVersion: 9.4.71-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 33b9199de4d..3af14c427c0 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1519,7 +1519,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 314832ebffc..732a1114341 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index 1974d20224d..c7b1a0a3b7c 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index c4e0dc5b0ff..eadf65d08fd 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index e50403c215d..05ebf461b28 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -60,7 +60,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -137,7 +137,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index d1f04144b0a..135798849d6 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -143,7 +143,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index fb4890e011d..1afedb255f5 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 5b692cf5102..b2d4f24bfc6 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -122,7 +122,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index de404448da2..8063096a70e 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml.fixed b/helm/hpcc/templates/localroxie.yaml.fixed index 099b501840b..f5597efa94f 100644 --- a/helm/hpcc/templates/localroxie.yaml.fixed +++ b/helm/hpcc/templates/localroxie.yaml.fixed @@ -74,13 +74,13 @@ spec: accessDali: "yes" accessEsp: "yes" <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.4.71-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.4.71-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 073b7570de8..ecd46f7112a 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/roxie.yaml.fixed b/helm/hpcc/templates/roxie.yaml.fixed index 3f919f7c048..7a2c338e179 100644 --- a/helm/hpcc/templates/roxie.yaml.fixed +++ b/helm/hpcc/templates/roxie.yaml.fixed @@ -126,7 +126,7 @@ spec: run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -134,7 +134,7 @@ spec: {{ toYaml $toposerver.labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -193,10 +193,10 @@ metadata: name: {{ $commonCtx.toponame | quote }} labels: <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.4.71-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.4.71-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} >>>>>>> origin/candidate-9.6.x spec: @@ -260,7 +260,7 @@ spec: accessDali: "yes" accessEsp: "yes" <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.4.71-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -269,7 +269,7 @@ spec: {{ toYaml $roxie.labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.4.71-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -379,7 +379,7 @@ spec: accessDali: "yes" accessEsp: "yes" <<<<<<< HEAD - helmVersion: 9.0.117-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -387,7 +387,7 @@ spec: {{ toYaml $roxie.labels | indent 8 }} {{- end }} ======= - helmVersion: 9.0.117-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index df851836973..a6359bb8cd2 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index a69918afa2b..f5ef8bf3d6d 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -86,7 +86,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -151,7 +151,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -218,7 +218,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -351,7 +351,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -416,7 +416,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.4.69-closedown0 + helmVersion: 9.4.71-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index c2381512fc8..e313885be18 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 4 ) -set ( HPCC_POINT 69 ) +set ( HPCC_POINT 71 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-07T17:02:37Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-14T16:03:56Z" ) ### From f5232ef51974f57ce47df38b6e32e69477969bd0 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Fri, 14 Jun 2024 17:27:18 +0100 Subject: [PATCH 065/151] Split off 9.6.22 Signed-off-by: Gavin Halliday --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 13fae5887dc..6ef69d7f8e0 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.6.21-closedown0 +version: 9.6.23-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.6.21-closedown0 +appVersion: 9.6.23-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index d8543620659..b9335684a21 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1523,7 +1523,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index ff382b69d37..b6c3dac92b8 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index dd60aaedcd8..bef0f09a7a7 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index f272ffb018d..d1913603c0f 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 3a4910ed5ac..4c14437f2a9 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -139,7 +139,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 126e2291259..bf61ed3bd6a 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -147,7 +147,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 28b723dbf1b..d2e73bb48fc 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 85556052525..4b9b87f8c35 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -125,7 +125,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 4db571144f9..c92e039725e 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 933b60193be..f2200dd39b0 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 72a15b9ec34..498ba0f78cf 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index e16ad6a6be1..7ea00c0c88e 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -88,7 +88,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -153,7 +153,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -220,7 +220,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -353,7 +353,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -418,7 +418,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.6.21-closedown0 + helmVersion: 9.6.23-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 93195ddb8d2..dc014e9a22f 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 6 ) -set ( HPCC_POINT 21 ) +set ( HPCC_POINT 23 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-07T17:04:00Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-14T16:27:18Z" ) ### From 723f3047219fecf3e3c4f327e90437a7a5ed3134 Mon Sep 17 00:00:00 2001 From: "Dan S. Camper" Date: Mon, 17 Jun 2024 08:47:47 -0500 Subject: [PATCH 066/151] HPCC-32063 Add 'wrap' parameter to Std.File.Copy() and Std.File.fCopy() wrap is already an option in Std.File.RemotePull(), Std.File.fRemotePull(), and in dfuplus (the command line utiility). --- ecllibrary/std/File.ecl | 9 ++++---- plugins/fileservices/fileservices.cpp | 30 +++++++++++++++++-------- plugins/fileservices/fileservices.hpp | 2 ++ plugins/proxies/lib_fileservices.ecllib | 4 ++-- 4 files changed, 30 insertions(+), 15 deletions(-) diff --git a/ecllibrary/std/File.ecl b/ecllibrary/std/File.ecl index bb508adb422..d36405486f5 100644 --- a/ecllibrary/std/File.ecl +++ b/ecllibrary/std/File.ecl @@ -645,11 +645,12 @@ EXPORT Despray(varstring logicalName, varstring destinationIP='', varstring dest * @param noSplit Don't split a file part to multiple target parts. Default is FALSE. * @param expireDays Number of days to auto-remove file. Default is -1, not expire. * @param ensure Only copy file parts if not copied. Defaults to FALSE. + * @param wrap Should the fileparts be wrapped when copying to a smaller sized cluster? The default is FALSE. * @return The DFU workunit id for the job. */ -EXPORT varstring fCopy(varstring sourceLogicalName, varstring destinationGroup, varstring destinationLogicalName, varstring sourceDali='', integer4 timeOut=-1, varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowOverwrite=FALSE, boolean replicate=FALSE, boolean asSuperfile=FALSE, boolean compress=FALSE, boolean forcePush=FALSE, integer4 transferBufferSize=0, boolean preserveCompression=TRUE, boolean noSplit=FALSE, integer4 expireDays=-1, boolean ensure=false) := - lib_fileservices.FileServices.fCopy(sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, allowOverwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, ensure); +EXPORT varstring fCopy(varstring sourceLogicalName, varstring destinationGroup, varstring destinationLogicalName, varstring sourceDali='', integer4 timeOut=-1, varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowOverwrite=FALSE, boolean replicate=FALSE, boolean asSuperfile=FALSE, boolean compress=FALSE, boolean forcePush=FALSE, integer4 transferBufferSize=0, boolean preserveCompression=TRUE, boolean noSplit=FALSE, integer4 expireDays=-1, boolean ensure=false, boolean wrap=false) := + lib_fileservices.FileServices.fCopy(sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, allowOverwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, ensure, wrap); /** * Same as fCopy, but does not return the DFU Workunit ID. @@ -657,8 +658,8 @@ EXPORT varstring fCopy(varstring sourceLogicalName, varstring destinationGroup, * @see fCopy */ -EXPORT Copy(varstring sourceLogicalName, varstring destinationGroup, varstring destinationLogicalName, varstring sourceDali='', integer4 timeOut=-1, varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowOverwrite=FALSE, boolean replicate=FALSE, boolean asSuperfile=FALSE, boolean compress=FALSE, boolean forcePush=FALSE, integer4 transferBufferSize=0, boolean preserveCompression=TRUE, boolean noSplit=FALSE, integer4 expireDays=-1, boolean ensure=false) := - lib_fileservices.FileServices.Copy(sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, allowOverwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, ensure); +EXPORT Copy(varstring sourceLogicalName, varstring destinationGroup, varstring destinationLogicalName, varstring sourceDali='', integer4 timeOut=-1, varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowOverwrite=FALSE, boolean replicate=FALSE, boolean asSuperfile=FALSE, boolean compress=FALSE, boolean forcePush=FALSE, integer4 transferBufferSize=0, boolean preserveCompression=TRUE, boolean noSplit=FALSE, integer4 expireDays=-1, boolean ensure=false, boolean wrap=false) := + lib_fileservices.FileServices.Copy(sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, allowOverwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, ensure, wrap); /** * Ensures the specified file is replicated to its mirror copies. diff --git a/plugins/fileservices/fileservices.cpp b/plugins/fileservices/fileservices.cpp index f5e1ae82d48..150b0d1e5d6 100644 --- a/plugins/fileservices/fileservices.cpp +++ b/plugins/fileservices/fileservices.cpp @@ -1358,7 +1358,7 @@ FILESERVICES_API char * FILESERVICES_CALL fsfDespray2(ICodeContext *ctx, const c return implementDespray(ctx, sourceLogicalName, destinationIP, destinationPath, timeOut, espServerIpPort, maxConnections, overwrite, destinationPlane); } -FILESERVICES_API char * FILESERVICES_CALL implementCopy(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit, int expireDays, bool ensure) +FILESERVICES_API char * FILESERVICES_CALL implementCopy(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit, int expireDays, bool ensure, bool wrap) { LOG(MCauditInfo, "Copy: %s%s", sourceLogicalName,asSuperfile?" as superfile":""); @@ -1398,6 +1398,8 @@ FILESERVICES_API char * FILESERVICES_CALL implementCopy(ICodeContext *ctx, const req->setNosplit(true); if (ensure) req->setEnsure(true); + if (wrap) + req->setWrap(true); req->setExpireDays(expireDays); Owned result = server.Copy(req); @@ -1426,42 +1428,52 @@ FILESERVICES_API char * FILESERVICES_CALL implementCopy(ICodeContext *ctx, const FILESERVICES_API void FILESERVICES_CALL fsCopy(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize) { - CTXFREE(parentCtx, implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, true, false, -1, false)); + CTXFREE(parentCtx, implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, true, false, -1, false, false)); } FILESERVICES_API void FILESERVICES_CALL fsCopy_v2(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression) { - CTXFREE(parentCtx, implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, false, -1, false)); + CTXFREE(parentCtx, implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, false, -1, false, false)); } FILESERVICES_API void FILESERVICES_CALL fsCopy_v3(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit, int expireDays) { - CTXFREE(parentCtx, implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, false)); + CTXFREE(parentCtx, implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, false, false)); } FILESERVICES_API void FILESERVICES_CALL fsCopy_v4(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit, int expireDays, bool ensure) { - CTXFREE(parentCtx, implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, ensure)); + CTXFREE(parentCtx, implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, ensure, false)); +} + +FILESERVICES_API void FILESERVICES_CALL fsCopy_v5(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit, int expireDays, bool ensure, bool wrap) +{ + CTXFREE(parentCtx, implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, ensure, wrap)); } FILESERVICES_API char * FILESERVICES_CALL fsfCopy(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize) { - return implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, true, false, -1, false); + return implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, true, false, -1, false, false); } FILESERVICES_API char * FILESERVICES_CALL fsfCopy_v2(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression) { - return implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, false, -1, false); + return implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, false, -1, false, false); } FILESERVICES_API char * FILESERVICES_CALL fsfCopy_v3(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit, int expireDays) { - return implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, false); + return implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, false, false); } FILESERVICES_API char * FILESERVICES_CALL fsfCopy_v4(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit, int expireDays, bool ensure) { - return implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, ensure); + return implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, ensure, false); +} + +FILESERVICES_API char * FILESERVICES_CALL fsfCopy_v5(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit, int expireDays, bool ensure, bool wrap) +{ + return implementCopy(ctx, sourceLogicalName, destinationGroup, destinationLogicalName, sourceDali, timeOut, espServerIpPort, maxConnections, overwrite, replicate, asSuperfile, compress, forcePush, transferBufferSize, preserveCompression, noSplit, expireDays, ensure, wrap); } diff --git a/plugins/fileservices/fileservices.hpp b/plugins/fileservices/fileservices.hpp index 0bd5e5fee85..04dae78ed35 100644 --- a/plugins/fileservices/fileservices.hpp +++ b/plugins/fileservices/fileservices.hpp @@ -86,6 +86,7 @@ FILESERVICES_API void FILESERVICES_CALL fsCopy(ICodeContext *ctx, const char * s FILESERVICES_API void FILESERVICES_CALL fsCopy_v2(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression); FILESERVICES_API void FILESERVICES_CALL fsCopy_v3(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit=false, int expireDays=-1); FILESERVICES_API void FILESERVICES_CALL fsCopy_v4(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit=false, int expireDays=-1, bool ensure=false); +FILESERVICES_API void FILESERVICES_CALL fsCopy_v5(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit=false, int expireDays=-1, bool ensure=false, bool wrap=false); FILESERVICES_API void FILESERVICES_CALL fsDkc(ICodeContext *ctx, const char * sourceLogicalName, const char * destinationIP, const char * destinationPath, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite); FILESERVICES_API void FILESERVICES_CALL fsReplicate(ICodeContext *ctx, const char * sourceLogicalName, int timeOut, const char * espServerIpPort); FILESERVICES_API void FILESERVICES_CALL fsCreateSuperFile(ICodeContext *ctx, const char *lsuperfn, bool sequentialparts, bool ifdoesnotexist); @@ -140,6 +141,7 @@ FILESERVICES_API char * FILESERVICES_CALL fsfCopy(ICodeContext *ctx, const char FILESERVICES_API char * FILESERVICES_CALL fsfCopy_v2(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression); FILESERVICES_API char * FILESERVICES_CALL fsfCopy_v3(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit=false, int expireDays=-1); FILESERVICES_API char * FILESERVICES_CALL fsfCopy_v4(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit=false, int expireDays=-1, bool ensure=false); +FILESERVICES_API char * FILESERVICES_CALL fsfCopy_v5(ICodeContext *ctx, const char * sourceLogicalName, const char *destinationGroup, const char * destinationLogicalName, const char * sourceDali, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite, bool replicate, bool asSuperfile, bool compress, bool forcePush, int transferBufferSize, bool preserveCompression, bool noSplit=false, int expireDays=-1, bool ensure=false, bool wrap=false); FILESERVICES_API char * FILESERVICES_CALL fsfDkc(ICodeContext *ctx, const char * sourceLogicalName, const char * destinationIP, const char * destinationPath, int timeOut, const char * espServerIpPort, int maxConnections, bool overwrite); FILESERVICES_API char * FILESERVICES_CALL fsfReplicate(ICodeContext *ctx, const char * sourceLogicalName, int timeOut, const char * espServerIpPort); FILESERVICES_API char * FILESERVICES_CALL fsfMonitorLogicalFileName(ICodeContext *ctx, const char *eventname, const char *_lfn,int shotcount, const char * espServerIpPort); diff --git a/plugins/proxies/lib_fileservices.ecllib b/plugins/proxies/lib_fileservices.ecllib index fc8db8d65c8..f785ee7ab34 100644 --- a/plugins/proxies/lib_fileservices.ecllib +++ b/plugins/proxies/lib_fileservices.ecllib @@ -44,7 +44,7 @@ export FileServices := SERVICE : plugin('fileservices'), time SprayXml(const varstring sourceIP='', const varstring sourcePath, integer4 sourceMaxRecordSize=8192, const varstring sourceRowTag, const varstring sourceEncoding='utf8', const varstring destinationGroup, const varstring destinationLogicalName, integer4 timeOut=-1, const varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowoverwrite=false, boolean replicate=false,boolean compress=__CONTAINERIZED__, boolean failIfNoSourceFile=false, integer4 expireDays=-1, const varstring dfuServerQueue='', boolean noSplit=false, const varstring sourcePlane='', unsigned4 destinationNumParts=0, boolean noCommon=true) : c,action,context,entrypoint='fsSprayXml_v6'; SprayJson(const varstring sourceIP='', const varstring sourcePath, integer4 sourceMaxRecordSize=8192, const varstring sourceRowPath='/', const varstring sourceEncoding='utf8', const varstring destinationGroup, const varstring destinationLogicalName, integer4 timeOut=-1, const varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowoverwrite=false, boolean replicate=false,boolean compress=__CONTAINERIZED__, boolean failIfNoSourceFile=false, integer4 expireDays=-1, const varstring dfuServerQueue='', boolean noSplit=false, const varstring username = '', const varstring userPw = '', const varstring sourcePlane='', unsigned4 destinationNumParts=0, boolean noCommon=true) : c,action,context,entrypoint='fsSprayJson_v3'; Despray(const varstring logicalName, const varstring destinationIP='', const varstring destinationPath, integer4 timeOut=-1, const varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowoverwrite=false, const varstring destinationPlane='') : c,action,context,entrypoint='fsDespray2'; - Copy(const varstring sourceLogicalName, const varstring destinationGroup, const varstring destinationLogicalName, const varstring sourceDali='', integer4 timeOut=-1, const varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowoverwrite=false, boolean replicate=false, boolean asSuperfile=false, boolean compress=false, boolean forcePush=false, integer4 transferBufferSize=0, boolean preserveCompression=true, boolean noSplit=false, integer4 expireDays=-1, boolean ensure=false) : c,action,context,entrypoint='fsCopy_v4'; + Copy(const varstring sourceLogicalName, const varstring destinationGroup, const varstring destinationLogicalName, const varstring sourceDali='', integer4 timeOut=-1, const varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowoverwrite=false, boolean replicate=false, boolean asSuperfile=false, boolean compress=false, boolean forcePush=false, integer4 transferBufferSize=0, boolean preserveCompression=true, boolean noSplit=false, integer4 expireDays=-1, boolean ensure=false, boolean wrap=false) : c,action,context,entrypoint='fsCopy_v5'; Replicate(const varstring logicalName, integer4 timeOut=-1, const varstring espServerIpPort=GETENV('ws_fs_server')) : c,action,context,entrypoint='fsReplicate'; CreateSuperFile(const varstring lsuperfn, boolean sequentialparts=false,boolean ifdoesnotexist=false) : c,action,context,entrypoint='fsCreateSuperFile'; boolean SuperFileExists(const varstring lsuperfn) : c,context,entrypoint='fsSuperFileExists'; @@ -71,7 +71,7 @@ export FileServices := SERVICE : plugin('fileservices'), time varstring fSprayXml(const varstring sourceIP='', const varstring sourcePath, integer4 sourceMaxRecordSize=8192, const varstring sourceRowTag, const varstring sourceEncoding='utf8', const varstring destinationGroup, const varstring destinationLogicalName, integer4 timeOut=-1, const varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowoverwrite=false, boolean replicate=false, boolean compress=__CONTAINERIZED__, boolean failIfNoSourceFile=false, integer4 expireDays=-1, const varstring dfuServerQueue='', boolean noSplit=false, const varstring sourcePlane='', unsigned4 destinationNumParts=0, boolean noCommon=true) : c,action,context,entrypoint='fsfSprayXml_v6'; varstring fSprayJson(const varstring sourceIP='', const varstring sourcePath, integer4 sourceMaxRecordSize=8192, const varstring sourceRowPath='/', const varstring sourceEncoding='utf8', const varstring destinationGroup, const varstring destinationLogicalName, integer4 timeOut=-1, const varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowoverwrite=false, boolean replicate=false, boolean compress=__CONTAINERIZED__, boolean failIfNoSourceFile=false, integer4 expireDays=-1, const varstring dfuServerQueue='', boolean noSplit=false, const varstring username = '', const varstring userPw = '', const varstring sourcePlane='', unsigned4 destinationNumParts=0, boolean noCommon=true) : c,action,context,entrypoint='fsfSprayJson_v3'; varstring fDespray(const varstring logicalName, const varstring destinationIP='', const varstring destinationPath, integer4 timeOut=-1, const varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowoverwrite=false, const varstring destinationPlane='') : c,action,context,entrypoint='fsfDespray2'; - varstring fCopy(const varstring sourceLogicalName, const varstring destinationGroup, const varstring destinationLogicalName, const varstring sourceDali='', integer4 timeOut=-1, const varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowoverwrite=false, boolean replicate=false, boolean asSuperfile=false, boolean compress=false, boolean forcePush=false, integer4 transferBufferSize=0, boolean preserveCompression=true, boolean noSplit=false, integer4 expireDays=-1, boolean ensure=false) : c,action,context,entrypoint='fsfCopy_v4'; + varstring fCopy(const varstring sourceLogicalName, const varstring destinationGroup, const varstring destinationLogicalName, const varstring sourceDali='', integer4 timeOut=-1, const varstring espServerIpPort=GETENV('ws_fs_server'), integer4 maxConnections=-1, boolean allowoverwrite=false, boolean replicate=false, boolean asSuperfile=false, boolean compress=false, boolean forcePush=false, integer4 transferBufferSize=0, boolean preserveCompression=true, boolean noSplit=false, integer4 expireDays=-1, boolean ensure=false, boolean wrap=false) : c,action,context,entrypoint='fsfCopy_v5'; varstring fMonitorLogicalFileName(const varstring event_name, const varstring name, integer4 shotcount=1,const varstring espServerIpPort=GETENV('ws_fs_server')) : c,action,context,entrypoint='fsfMonitorLogicalFileName'; varstring fMonitorFile(const varstring event_name, const varstring ip, const varstring filename, boolean subdirs=false, integer4 shotcount=1,const varstring espServerIpPort=GETENV('ws_fs_server')) : c,action,context,entrypoint='fsfMonitorFile'; varstring fReplicate(const varstring logicalName, integer4 timeOut=-1, const varstring espServerIpPort=GETENV('ws_fs_server')) : c,action,context,entrypoint='fsfReplicate'; From 9da16ac1777b9c2e9909f2fa033c7dec755a5e4b Mon Sep 17 00:00:00 2001 From: M Kelly Date: Mon, 17 Jun 2024 08:42:35 -0400 Subject: [PATCH 067/151] HPCC-32071 Windows build fix for HPCC-28461 Signed-off-by: M Kelly --- fs/dafilesrv/dafilesrv.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/dafilesrv/dafilesrv.cpp b/fs/dafilesrv/dafilesrv.cpp index 34e875fa315..ffe2e689aea 100644 --- a/fs/dafilesrv/dafilesrv.cpp +++ b/fs/dafilesrv/dafilesrv.cpp @@ -734,6 +734,7 @@ int main(int argc, const char* argv[]) unsigned throttleSlowDelayMs; unsigned throttleSlowCPULimit; unsigned sslport; + unsigned listenQueueLimit; Linked keyPairInfo; StringAttr rowServiceConfiguration; unsigned dedicatedRowServicePort; @@ -763,7 +764,7 @@ int main(int argc, const char* argv[]) unsigned _maxThreads, unsigned _maxThreadsDelayMs, unsigned _maxAsyncCopy, unsigned _parallelRequestLimit, unsigned _throttleDelayMs, unsigned _throttleCPULimit, unsigned _parallelSlowRequestLimit, unsigned _throttleSlowDelayMs, unsigned _throttleSlowCPULimit, - unsigned _sslport, + unsigned _sslport, unsigned _listenQueueLimit, IPropertyTree *_keyPairInfo, const char *_rowServiceConfiguration, unsigned _dedicatedRowServicePort, bool _dedicatedRowServiceSSL, bool _rowServiceOnStdPort) @@ -771,7 +772,7 @@ int main(int argc, const char* argv[]) maxThreads(_maxThreads), maxThreadsDelayMs(_maxThreadsDelayMs), maxAsyncCopy(_maxAsyncCopy), parallelRequestLimit(_parallelRequestLimit), throttleDelayMs(_throttleDelayMs), throttleCPULimit(_throttleCPULimit), parallelSlowRequestLimit(_parallelSlowRequestLimit), throttleSlowDelayMs(_throttleSlowDelayMs), throttleSlowCPULimit(_throttleSlowCPULimit), - sslport(_sslport), + sslport(_sslport), listenQueueLimit(_listenQueueLimit), keyPairInfo(_keyPairInfo), rowServiceConfiguration(_rowServiceConfiguration), dedicatedRowServicePort(_dedicatedRowServicePort), dedicatedRowServiceSSL(_dedicatedRowServiceSSL), rowServiceOnStdPort(_rowServiceOnStdPort) { @@ -863,7 +864,7 @@ int main(int argc, const char* argv[]) } service(config, connectMethod, listenep, maxThreads, maxThreadsDelayMs, maxAsyncCopy, parallelRequestLimit, throttleDelayMs, throttleCPULimit, - parallelSlowRequestLimit, throttleSlowDelayMs, throttleSlowCPULimit, sslport, + parallelSlowRequestLimit, throttleSlowDelayMs, throttleSlowCPULimit, sslport, listenQueueLimit, keyPairInfo, rowServiceConfiguration, dedicatedRowServicePort, dedicatedRowServiceSSL, rowServiceOnStdPort); service.start(); return 0; From e3827c673f4001cfdf55827b4a49a768812a2d3b Mon Sep 17 00:00:00 2001 From: Jim DeFabia Date: Mon, 17 Jun 2024 12:06:33 -0400 Subject: [PATCH 068/151] HPCC-32073 Fix Doc Typo: Std.File.RemotePull() s/b Std.File.fRemotePull Signed-off-by: Jim DeFabia --- docs/EN_US/ECLStandardLibraryReference/SLR-Mods/RemotePull.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/EN_US/ECLStandardLibraryReference/SLR-Mods/RemotePull.xml b/docs/EN_US/ECLStandardLibraryReference/SLR-Mods/RemotePull.xml index e4e3ceb79d3..2aeeac499be 100644 --- a/docs/EN_US/ECLStandardLibraryReference/SLR-Mods/RemotePull.xml +++ b/docs/EN_US/ECLStandardLibraryReference/SLR-Mods/RemotePull.xml @@ -28,7 +28,7 @@ role="bold">]) dfuwuid := - STD.File.RemotePull( remoteURL, + STD.File.fRemotePull( remoteURL, sourcelogicalname, destinationGroup , destinationlogicalname, [ ,timeout From bfb1f425895489542fc1d23a586e44d49a44f54c Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Mon, 17 Jun 2024 16:03:20 -0400 Subject: [PATCH 069/151] HPCC-32076 ECL Watch v9 fix Logs filter start date fix an issue where the "Start Date" filter param specified by the user was not being correctly set in the request sent to ESP Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- esp/src/src-react/components/Logs.tsx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/esp/src/src-react/components/Logs.tsx b/esp/src/src-react/components/Logs.tsx index 10ee0a988be..911cf63b236 100644 --- a/esp/src/src-react/components/Logs.tsx +++ b/esp/src/src-react/components/Logs.tsx @@ -13,7 +13,7 @@ import { Filter } from "./forms/Filter"; import { Fields } from "./forms/Fields"; import { ShortVerticalDivider } from "./Common"; -const maximumTimeUntilRefresh = 8 * 60 * 60 * 1000; +const eightHours = 8 * 60 * 60 * 1000; const startTimeOffset = 1 * 60 * 60 * 1000; const endTimeOffset = 23 * 60 * 60 * 1000; const defaultStartDate = new Date(new Date().getTime() - startTimeOffset); @@ -115,8 +115,9 @@ export const Logs: React.FunctionComponent = ({ if (typeof filter.StartDate === "string") { filter.StartDate = new Date(filter.StartDate + ":00Z"); } - if (filter.StartDate && now.getTime() - filter.StartDate.getTime() > maximumTimeUntilRefresh) { - filter.StartDate = new Date(now.getTime() - startTimeOffset); + if (!filter.StartDate) { + //assign a reasonable default start date if one isn't set + filter.StartDate = new Date(now.getTime() - eightHours); } if (!filter.EndDate) { filter.EndDate = new Date(now.getTime() + endTimeOffset); From 0df26f28a90f40a85ce90b1238cf39675be8632d Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Tue, 18 Jun 2024 09:51:49 +0100 Subject: [PATCH 070/151] Remove files accidentantly included in helm directory Signed-off-by: Gavin Halliday --- helm/hpcc/templates/localroxie.yaml.fixed | 161 -------- helm/hpcc/templates/roxie.yaml.fixed | 479 ---------------------- 2 files changed, 640 deletions(-) delete mode 100644 helm/hpcc/templates/localroxie.yaml.fixed delete mode 100644 helm/hpcc/templates/roxie.yaml.fixed diff --git a/helm/hpcc/templates/localroxie.yaml.fixed b/helm/hpcc/templates/localroxie.yaml.fixed deleted file mode 100644 index 1365ca48448..00000000000 --- a/helm/hpcc/templates/localroxie.yaml.fixed +++ /dev/null @@ -1,161 +0,0 @@ -{{/* - ---- DO NOT EDIT THIS FILE - all configuration of HPCC platform should be done via values.yaml ---- - -############################################################################## - - HPCC SYSTEMS software Copyright (C) 2021 HPCC Systems®. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -############################################################################## - -*/}} -{{/* -localroxie configmap -Pass in dict with root and me -*/}} -{{- define "hpcc.localroxieConfigMap" }} -apiVersion: v1 -metadata: - name: {{ .me.name }}-configmap -data: - {{ .me.name }}.yaml: - version: 1.0 - roxie: -{{ toYaml (omit .me "logging" "tracing" "env") | indent 6 }} -{{- include "hpcc.generateLoggingConfig" . | indent 6 }} -{{- include "hpcc.generateTracingConfig" . | indent 6 }} -{{ include "hpcc.generateVaultConfig" . | indent 6 }} - global: -{{ include "hpcc.generateGlobalConfigMap" .root | indent 6 }} -{{- end -}}{{/* define "hpcc.localroxieConfigMap */}} - -{{ range $roxie := $.Values.roxie -}} -{{- if not $roxie.disabled -}} -{{- $env := concat ($.Values.global.env | default list) (.env | default list) -}} -{{- $secretsCategories := list "system" "eclUser" "ecl" "storage" }} -{{- $enginePlaneDetails := dict -}} -{{- $_ := include "hpcc.getEnginePlanes" (dict "root" $ "me" . "result" $enginePlaneDetails) -}} -{{- $commonCtx := dict "root" $ "me" $roxie "includeCategories" $enginePlaneDetails.planeCategories "includeNames" $enginePlaneDetails.namedPlanes "secretsCategories" $secretsCategories "env" $env }} -{{- $configSHA := include "hpcc.getConfigSHA" ($commonCtx | merge (dict "configMapHelper" "hpcc.localroxieConfigMap" "component" "roxie" "excludeKeys" "global")) }} -{{- include "hpcc.checkDefaultStoragePlane" $commonCtx }} -{{- $singleNode := (hasKey $roxie "singleNode") | ternary $roxie.singleNode ((hasKey $roxie "localAgent") | ternary $roxie.localAgent false) }} -{{- if $singleNode -}} -{{- $localAgent := ((hasKey $roxie "localAgent") | ternary $roxie.localAgent true) -}} -{{- $name := $roxie.name -}} -{{- $servername := printf "%s-server" $roxie.name -}} - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ $roxie.name | quote }} -spec: - replicas: {{ $roxie.replicas | default 1 }} - selector: - matchLabels: - run: {{ $roxie.name | quote }} - server: {{ $servername | quote }} - template: - metadata: - labels: - run: {{ $roxie.name | quote }} - server: {{ $servername | quote }} - accessDali: "yes" - accessEsp: "yes" -<<<<<<< HEAD - helmVersion: 9.0.119-closedown0 -{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} -{{- if hasKey . "labels" }} -{{ toYaml .labels | indent 8 }} -{{- end }} -======= - helmVersion: 9.0.119-closedown0 - {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} -{{- if hasKey . "labels" }} -{{ toYaml .labels | indent 8 }} -{{- end }} ->>>>>>> origin/candidate-9.6.x - annotations: - checksum/config: {{ $configSHA }} -{{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} - spec: -{{- include "hpcc.placementsByPodTargetType" (dict "root" $ "pod" $roxie.name "target" $roxie.name "type" "roxie") | indent 6 }} - serviceAccountName: "hpcc-default" - initContainers: -{{- include "hpcc.createConfigInitContainers" $commonCtx | indent 6 }} -{{- include "hpcc.addImagePullSecrets" $commonCtx | nindent 6 -}} - containers: - - name: {{ $roxie.name | quote }} - workingDir: /var/lib/HPCCSystems - command: [ {{ include "hpcc.componentCommand" (dict "me" $roxie "root" $ "process" "roxie") }} ] - args: [ {{- include "hpcc.componentStartArgs" (dict "me" $roxie "root" $ "process" "roxie") | nindent 16 }} - {{ include "hpcc.configArg" $roxie }}, - {{ include "hpcc.daliArg" (dict "root" $ "component" "Local Roxie" "optional" false) }}, - "--server=true", - "--localAgent={{ $localAgent }}", - "--resolveLocally=false" - ] - env: -{{ include "hpcc.mergeEnvironments" $env | indent 8 -}} - - name: "SENTINEL" - value: "/tmp/{{ $roxie.name }}.sentinel" -{{- $local := dict "first" true }} -{{- range $service := $roxie.services }} -{{- if ne (int $service.servicePort) 0 }} -{{- if $local.first }} -{{- $_ := set $local "first" false }} - ports: -{{- end }} - - name: {{ $service.name }} - containerPort: {{ $service.servicePort }} -{{- end }} -{{- end }} -{{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- include "hpcc.addResources" (dict "me" $roxie.resources "root" $) | indent 8 }} -{{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} - volumeMounts: -{{ include "hpcc.addConfigMapVolumeMount" . | indent 8 }} -{{ include "hpcc.addVolumeMounts" $commonCtx | indent 8 }} -{{- include "hpcc.addSecretVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addVaultClientCertificateVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "name" $roxie.name "component" "localroxie" "external" false) | indent 8 }} -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "name" $roxie.name "component" "localroxie" "external" true "includeRemote" true) | indent 8 }} -{{ include "hpcc.addUDPCertificateVolumeMount" (dict "root" $ "name" $roxie.name "component" "localudpkey" ) | indent 8 }} - volumes: -{{ include "hpcc.addConfigMapVolume" . | indent 6 }} -{{ include "hpcc.addVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addSecretVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addVaultClientCertificateVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "name" $roxie.name "component" "localroxie" "external" false) | indent 6 }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "name" $roxie.name "component" "localroxie" "external" true "includeRemote" true) | indent 6 }} -{{ include "hpcc.addUDPCertificateVolume" (dict "root" $ "name" $roxie.name "component" "localudpkey" ) | indent 6 }} ---- -{{- range $service := $roxie.services }} -{{- if ne (int $service.servicePort) 0 }} -{{ include "hpcc.addService" ( dict "root" $ "name" $service.name "service" $service "selector" $servername "defaultPort" $service.servicePort ) }} ---- -{{- end }} -{{- end }} -kind: ConfigMap -{{ include "hpcc.generateConfig" ($commonCtx | merge (dict "configMapHelper" "hpcc.localroxieConfigMap")) }} ---- -{{ include "hpcc.addCertificate" (dict "root" $ "name" $roxie.name "services" $roxie.services "component" "localroxie" "external" false) }} -{{ include "hpcc.addCertificate" (dict "root" $ "name" $roxie.name "services" $roxie.services "component" "localroxie" "external" true "includeRemote" true) }} -{{ include "hpcc.addUDPCertificate" (dict "root" $ "name" $roxie.name "component" "localudpkey") }} ---- -{{ include "hpcc.addEgress" $commonCtx }} - -{{- end }}{{/* if singleNode */}} -{{- end }}{{/* if not disabled */}} -{{- end }}{{/* range */}} diff --git a/helm/hpcc/templates/roxie.yaml.fixed b/helm/hpcc/templates/roxie.yaml.fixed deleted file mode 100644 index 3f764809c02..00000000000 --- a/helm/hpcc/templates/roxie.yaml.fixed +++ /dev/null @@ -1,479 +0,0 @@ -{{/* - ---- DO NOT EDIT THIS FILE - all configuration of HPCC platform should be done via values.yaml ---- - -############################################################################## - - HPCC SYSTEMS software Copyright (C) 2021 HPCC Systems®. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -############################################################################## - -*/}} - -{{/* -roxie configmap -Pass in dict with root and me -*/}} -{{- define "hpcc.roxieConfigMap" -}} -apiVersion: v1 -metadata: - name: {{ .me.name }}-configmap -data: - {{ .me.name }}.yaml: - version: 1.0 - roxie: -{{- $root := .root -}} -{{- $component := .me }} - services: -{{- range $service := .me.services }} - - name: {{ $service.name }} -{{ toYaml (omit $service "tls" "name") | indent 8 }} -{{- if ne (int $service.servicePort) 0 }} -{{- include "hpcc.addTLSServiceEntries" (dict "root" $root "service" $service "component" $component "visibility" $service.visibility "remoteClients" $service.remoteClients "trustClients" $service.trustClients "includeTrustedPeers" true "incluedRoxieAndEspServices" true) | indent 6 }} -{{- end }} -{{- end }} -{{ toYaml ( omit .me "logging" "tracing" "topoServer" "encryptInTransit" "env" "services") | indent 6 }} - numChannels: {{ .numChannels }} - topologyServers: "{{ .toponame }}:{{ .topoport }}" - heartbeatInterval: {{ .heartbeatInterval }} - resolveLocally: false -{{- $mtlsEnabled := (eq (include "hpcc.isMtlsEnabled" (dict "root" .root)) "true") -}} -{{/* By default use encryption if local certificates are enabled, but allow it to be turned off via roxie .encryptInTransit value */}} -{{- if (hasKey .me "encryptInTransit") -}} -{{- if and (.me.encryptInTransit) (not $mtlsEnabled) -}} -{{- $_ := fail (printf "Roxie %s encryptInTransit requires local cert-manager configuration." .me.name ) }} -{{- end }} - encryptInTransit: {{ .me.encryptInTransit }} -{{ else }} - encryptInTransit: {{ $mtlsEnabled }} -{{ end -}} -{{- include "hpcc.generateLoggingConfig" (dict "root" .root "me" .me) | indent 6 }} -{{- include "hpcc.generateTracingConfig" (dict "root" .root "me" .me) | indent 6 }} -{{ include "hpcc.generateVaultConfig" . | indent 6 }} - global: -{{ include "hpcc.generateGlobalConfigMap" .root | indent 6 }} -{{- end -}}{{/*define "hpcc.roxieConfigMap"*/}} - -{{- define "hpcc.roxieTopoConfigMap" -}} -apiVersion: v1 -metadata: - name: {{ .toponame }}-configmap -data: - {{ .toponame }}.yaml: - version: 1.0 - toposerver: -{{ toYaml ( omit .toposerver "logging" "tracing" "env") | indent 6 }} -{{- include "hpcc.generateLoggingConfig" (dict "root" .root "me" .toposerver) | indent 6 }} -{{- include "hpcc.generateTracingConfig" (dict "root" .root "me" .toposerver) | indent 6 }} - global: -{{ include "hpcc.generateGlobalConfigMap" .root | indent 6 }} -{{- end -}}{{/*define "hpcc.roxieConfigMap"*/}} - -{{ range $roxie := $.Values.roxie -}} -{{- if not $roxie.disabled -}} -{{- $env := concat ($.Values.global.env | default list) (.env | default list) -}} -{{- $secretsCategories := list "system" "eclUser" "ecl" "storage" }} -{{- $toposerver := ($roxie.topoServer | default dict) -}} -{{- $enginePlaneDetails := dict -}} -{{- $_ := include "hpcc.getEnginePlanes" (dict "root" $ "me" . "result" $enginePlaneDetails) -}} -{{- $commonCtx := dict "root" $ "me" $roxie "includeCategories" $enginePlaneDetails.planeCategories "includeNames" $enginePlaneDetails.namedPlanes "secretsCategories" $secretsCategories "toposerver" $toposerver "env" $env }} -{{- $_ := set $commonCtx "toponame" (printf "%s-toposerver" $roxie.name) -}} -{{- $_ := set $commonCtx "numChannels" ($roxie.numChannels | int | default 1) -}} -{{- $_ := set $commonCtx "topoport" ($toposerver.port | int | default 9004) -}} -{{- $_ := set $commonCtx "heartbeatInterval" ($toposerver.heartbeatInterval | int | default 10000) -}} -{{- $_ := set $toposerver "name" $commonCtx.toponame -}} -{{- $configSHA := include "hpcc.getConfigSHA" ($commonCtx | merge (dict "configMapHelper" "hpcc.roxieConfigMap" "component" "roxie" "excludeKeys" "global")) }} -{{- $topoconfigSHA := include "hpcc.getConfigSHA" ($commonCtx | merge (dict "configMapHelper" "hpcc.roxieTopoConfigMap" "component" "toposerver" "excludeKeys" "global")) }} -{{- include "hpcc.checkDefaultStoragePlane" $commonCtx }} -{{- $singleNode := (hasKey $roxie "singleNode") | ternary $roxie.singleNode ((hasKey $roxie "localAgent") | ternary $roxie.localAgent false) }} -{{- if not $singleNode -}} -{{- $servername := printf "%s-server" $roxie.name -}} -{{- $udpkeyname := $roxie.name -}} -{{- range $service := $roxie.services }} -{{- range $remoteClient := $service.remoteClients }} - {{ include "hpcc.addExternalRemoteClientCertificate" (dict "root" $ "client" $remoteClient.name "organization" $remoteClient.organization "instance" $service.name "component" "roxie" "visibility" $service.visibility "secretTemplate" $remoteClient.secretTemplate) }} -{{- end }} -{{- if ne (int $service.servicePort) 0 }} -{{- $_ := set $service "port" $service.servicePort }} -{{- end }} -{{- end }} - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ $commonCtx.toponame | quote }} -spec: - replicas: {{ $toposerver.replicas | default 1 }} - selector: - matchLabels: - run: {{ $commonCtx.toponame | quote }} - template: - metadata: - labels: -{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} - run: {{ $commonCtx.toponame | quote }} - roxie-cluster: {{ $roxie.name | quote }} -<<<<<<< HEAD - helmVersion: 9.0.119-closedown0 -{{- if hasKey $.Values.global "metrics" }} -{{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} -{{- end }} -{{- if hasKey $toposerver "labels" }} -{{ toYaml $toposerver.labels | indent 8 }} -{{- end }} -======= - helmVersion: 9.0.119-closedown0 -{{- if hasKey $.Values.global "metrics" }} - {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} -{{- end }} -{{- if hasKey $toposerver "labels" }} -{{ toYaml $toposerver.labels | indent 8 }} -{{- end }} ->>>>>>> origin/candidate-9.6.x - annotations: - checksum/config: {{ $topoconfigSHA }} -{{- include "hpcc.generateAnnotations" (dict "root" $commonCtx.root "me" $toposerver) | indent 8 }} -{{- if hasKey $.Values.global "metrics" }} -{{- include "hpcc.addPrometheusScrapeAnnotations" $.Values.global.metrics | nindent 8 }} -{{- end }} - spec: -{{- include "hpcc.placementsByPodTargetType" (dict "root" $ "pod" $commonCtx.toponame "target" $roxie.name "type" "roxie") | indent 6 }} - serviceAccountName: "hpcc-default" -{{- include "hpcc.addImagePullSecrets" $commonCtx | nindent 6 -}} - containers: - - name: {{ $commonCtx.toponame | quote }} -{{ include "hpcc.addSentinelProbes" $toposerver | indent 8 }} -{{- include "hpcc.addSecurityContext" $commonCtx | indent 8 }} -{{- $defaultResources := dict "cpu" "500m" "memory" "200Mi" }} -{{- include "hpcc.addResources" (dict "me" .topoResources "defaults" $defaultResources "root" $) | indent 8 }} -{{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} - workingDir: /var/lib/HPCCSystems - command: [ {{ include "hpcc.componentCommand" (dict "me" $toposerver "root" $ "process" "toposerver") }} ] - args: [ {{- include "hpcc.componentStartArgs" (dict "me" $toposerver "root" $ "process" "toposerver") | nindent 16 }} - {{ include "hpcc.configArg" $toposerver }} - ] - env: -{{ include "hpcc.mergeEnvironments" $env | indent 8 -}} - - name: "SENTINEL" - value: "/tmp/{{ $commonCtx.toponame }}.sentinel" - volumeMounts: -{{ include "hpcc.addConfigMapVolumeMount" $toposerver | indent 8 }} -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "topo" "name" $commonCtx.toponame "external" false) | indent 8 }} - volumes: -{{ include "hpcc.addConfigMapVolume" $toposerver | indent 6 }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "topo" "name" $commonCtx.toponame "external" false) | indent 6 }} - ---- -{{ include "hpcc.addCertificate" (dict "root" $ "name" $commonCtx.toponame "servicename" $commonCtx.toponame "component" "topo" "external" false) }} -{{ include "hpcc.addUDPCertificate" (dict "root" $ "name" $udpkeyname "component" "udpkey") }} - ---- -{{- range $service := $roxie.services }} -{{- if ne (int $service.servicePort) 0 }} -{{ include "hpcc.addService" ( dict "root" $ "name" $service.name "service" $service "selector" $servername "defaultPort" $service.servicePort) }} ---- -{{- end }} -{{- end }} - -apiVersion: v1 -kind: Service -metadata: - name: {{ $commonCtx.toponame | quote }} - labels: -<<<<<<< HEAD - helmVersion: 9.0.119-closedown0 -{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} -======= - helmVersion: 9.0.119-closedown0 - {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} ->>>>>>> origin/candidate-9.6.x -spec: - ports: - - port: {{ $commonCtx.topoport }} - protocol: TCP - targetPort: {{ $commonCtx.topoport }} - selector: - run: {{ $commonCtx.toponame | quote }} - clusterIP: None # Headless service ---- - -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ printf "%s-internal-traffic" $roxie.name }} -spec: - podSelector: - matchLabels: - roxie-cluster: {{ $roxie.name | quote }} - policyTypes: - - Ingress - - Egress - ingress: - - from: - - podSelector: - matchLabels: - roxie-cluster: {{ $roxie.name | quote }} - egress: - - to: - - podSelector: - matchLabels: - roxie-cluster: {{ $roxie.name | quote }} - ---- -kind: ConfigMap -{{ include "hpcc.generateConfig" ($commonCtx | merge (dict "configMapHelper" "hpcc.roxieConfigMap")) }} ---- -kind: ConfigMap -{{ include "hpcc.generateConfig" ($commonCtx | merge (dict "configMapHelper" "hpcc.roxieTopoConfigMap")) }} ---- - -{{- $_ := set $commonCtx "instanceNames" list -}} -{{ if $roxie.serverReplicas -}} -{{ $_ := set $commonCtx "instanceNames" (list $servername) -}} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ $servername | quote }} -spec: - replicas: {{ $roxie.serverReplicas }} - selector: - matchLabels: - run: {{ $servername | quote }} - template: - metadata: - labels: - run: {{ $servername | quote }} - server: {{ $roxie.name | quote }} - roxie-cluster: {{ $roxie.name | quote }} - accessDali: "yes" - accessEsp: "yes" -<<<<<<< HEAD - helmVersion: 9.0.119-closedown0 -{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} -{{- if hasKey $.Values.global "metrics" }} -{{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} -{{- end }} -{{- if hasKey $roxie "labels" }} -{{ toYaml $roxie.labels | indent 8 }} -{{- end }} -======= - helmVersion: 9.0.119-closedown0 - {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} -{{- if hasKey $.Values.global "metrics" }} - {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} -{{- end }} -{{- if hasKey $roxie "labels" }} -{{ toYaml $roxie.labels | indent 8 }} -{{- end }} ->>>>>>> origin/candidate-9.6.x - annotations: - checksum/config: {{ $configSHA }} -{{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} -{{- if hasKey $.Values.global "metrics" }} -{{- include "hpcc.addPrometheusScrapeAnnotations" $.Values.global.metrics | nindent 8 }} -{{- end }} -{{- if hasKey $roxie "annotations" }} -{{ toYaml $roxie.annotations | indent 8 }} -{{- end }} - spec: -{{- include "hpcc.placementsByPodTargetType" (dict "root" $ "pod" $servername "target" $roxie.name "type" "roxie") | indent 6 }} - serviceAccountName: "hpcc-default" - initContainers: -{{- include "hpcc.createConfigInitContainers" $commonCtx | indent 6 }} -{{- include "hpcc.addImagePullSecrets" $commonCtx | nindent 6 -}} - terminationGracePeriodSeconds: {{ add ($roxie.agentQueryReleaseDelaySeconds | default 60) 30 }} - containers: - - name: {{ $servername | quote }} - workingDir: /var/lib/HPCCSystems - command: [ {{ include "hpcc.componentCommand" (dict "me" $roxie "root" $ "process" "roxie") }} ] - args: [ {{- include "hpcc.componentStartArgs" (dict "me" $roxie "root" $ "process" "roxie") | nindent 16 }} - {{ include "hpcc.daliArg" (dict "root" $ "component" "Roxie" "optional" false) }}, - "--server=true" - ] - env: -{{ include "hpcc.mergeEnvironments" $env | indent 8 -}} - - name: "SENTINEL" - value: "/tmp/{{ $roxie.name }}.sentinel" -{{- $local := dict "first" true }} -{{- range $service := $roxie.services }} -{{- if ne (int $service.servicePort) 0 }} -{{- if $local.first }} -{{- $_ := set $local "first" false }} - ports: -{{- end }} - - name: {{ $service.name }} - containerPort: {{ $service.servicePort }} -{{- end }} -{{- end }} - lifecycle: - preStop: - exec: - command: ["testsocket", ".", "control:closedown"] -{{ include "hpcc.addSentinelProbes" ( $roxie | merge (dict "readyProbeName" ".ready" )) | indent 8 }} -{{ include "hpcc.addSecurityContext" (dict "root" $ "me" .) | indent 8 }} -{{- include "hpcc.addResources" (dict "me" ($roxie.serverResources | default $roxie.resources) "root" $) | indent 8 }} -{{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} - volumeMounts: -{{ include "hpcc.addConfigMapVolumeMount" $roxie | indent 8 }} -{{ include "hpcc.addVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addSecretVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addVaultClientCertificateVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "roxie-server" "name" $servername "external" false) | indent 8 }} -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "roxie-server" "name" $servername "certificate" $roxie.certificate "external" true "includeRemote" true) | indent 8 }} -{{ include "hpcc.addUDPCertificateVolumeMount" (dict "root" $ "component" "udpkey" "name" $udpkeyname ) | indent 8 }} - volumes: -{{ include "hpcc.addConfigMapVolume" $roxie | indent 6 }} -{{ include "hpcc.addVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addSecretVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addVaultClientCertificateVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "roxie-server" "name" $servername "external" false) | indent 6 }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "roxie-server" "name" $servername "certificate" $roxie.certificate "external" true "includeRemote" true) | indent 6 }} -{{ include "hpcc.addUDPCertificateVolume" (dict "root" $ "component" "udpkey" "name" $udpkeyname) | indent 6 }} - ---- -{{ include "hpcc.addCertificate" (dict "root" $ "name" $servername "services" $roxie.services "component" "roxie-server" "external" false) }} -{{ include "hpcc.addCertificate" (dict "root" $ "name" $servername "services" $roxie.services "component" "roxie-server" "external" true "includeRemote" true) }} ---- -{{ end -}}{{/* if serverReplicas */}} - -{{- $agentPublicCertName := printf "%s-agent" $roxie.name }} -{{ include "hpcc.addCertificate" (dict "root" $ "name" $agentPublicCertName "services" $roxie.services "component" "roxie-agent" "external" true "includeRemote" true) }} - -{{ range $c, $e := until ($commonCtx.numChannels|int) -}} -{{- $channel := add $c 1 -}} -{{- $name := printf "%s-agent-%d" $roxie.name $channel }} -{{- $_ := set $commonCtx "instanceNames" (append $commonCtx.instanceNames $name) }} - -{{ include "hpcc.addCertificate" (dict "root" $ "name" $name "services" $roxie.services "component" "roxie-agent" "external" false) }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ $name | quote}} -spec: - replicas: {{ (hasKey $roxie "replicas") | ternary $roxie.replicas 1 }} - selector: - matchLabels: - run: {{ $name | quote}} - template: - metadata: - labels: -{{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-agent" "name" "roxie" "instance" $name) | indent 8 }} - run: {{ $name | quote}} -{{- if not $roxie.serverReplicas }} - server: {{ $servername | quote }} -{{- end }} - roxie-cluster: {{ $roxie.name | quote }} - accessDali: "yes" - accessEsp: "yes" -<<<<<<< HEAD - helmVersion: 9.0.119-closedown0 -{{- if hasKey $.Values.global "metrics" }} -{{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} -{{- end }} -{{- if hasKey $roxie "labels" }} -{{ toYaml $roxie.labels | indent 8 }} -{{- end }} -======= - helmVersion: 9.0.119-closedown0 -{{- if hasKey $.Values.global "metrics" }} - {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} -{{- end }} -{{- if hasKey $roxie "labels" }} -{{ toYaml $roxie.labels | indent 8 }} -{{- end }} ->>>>>>> origin/candidate-9.6.x - annotations: - checksum/config: {{ $configSHA }} -{{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} -{{- if hasKey $.Values.global "metrics" }} -{{- include "hpcc.addPrometheusScrapeAnnotations" $.Values.global.metrics | nindent 8 }} -{{- end }} - spec: -{{- include "hpcc.placementsByPodTargetType" (dict "root" $ "pod" $name "target" $roxie.name "type" "roxie") | indent 6 }} - serviceAccountName: "hpcc-default" - initContainers: -{{- include "hpcc.createConfigInitContainers" $commonCtx | indent 6 }} -{{- include "hpcc.addImagePullSecrets" $commonCtx | nindent 6 -}} - terminationGracePeriodSeconds: {{ add ($roxie.agentQueryReleaseDelaySeconds | default 60) 30 }} - containers: - - name: {{ $name | quote}} - workingDir: /var/lib/HPCCSystems - command: [ {{ include "hpcc.componentCommand" (dict "me" $roxie "root" $ "process" "roxie") }} ] - args: [ {{- include "hpcc.componentStartArgs" (dict "me" $roxie "root" $ "process" "roxie") | nindent 16 }} - {{ include "hpcc.configArg" $roxie }}, - {{ include "hpcc.daliArg" (dict "root" $ "component" "Roxie" "optional" false) }}, - "--channels={{ $channel }}", - "--server={{ not $roxie.serverReplicas }}", - ] - env: -{{ include "hpcc.mergeEnvironments" $env | indent 8 -}} - - name: "SENTINEL" - value: "/tmp/{{ $roxie.name }}.sentinel" -{{- if not $roxie.serverReplicas }} -{{- $local := dict "first" true }} -{{- range $service := $roxie.services }} -{{- if ne (int $service.servicePort) 0 }} -{{- if $local.first }} -{{- $_ := set $local "first" false }} - ports: -{{- end }} - - name: {{ $service.name }} - containerPort: {{ $service.servicePort }} -{{- end }} -{{- end }} - lifecycle: - preStop: - exec: - command: ["testsocket", ".", "control:closedown"] -{{ include "hpcc.addSentinelProbes" ( $roxie | merge (dict "readyProbeName" ".ready" )) | indent 8 }} -{{- end }}{{/* not serverReplicas */}} -{{ include "hpcc.addSecurityContext" (dict "root" $ "me" .) | indent 8 }} -{{- include "hpcc.addResources" (dict "me" ($roxie.channelResources | default $roxie.resources) "root" $) | indent 8 }} -{{ include "hpcc.addImageAttrs" $commonCtx | indent 8 }} - volumeMounts: -{{ include "hpcc.addConfigMapVolumeMount" $roxie | indent 8 }} -{{ include "hpcc.addVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addSecretVolumeMounts" $commonCtx | indent 8 }} -{{ include "hpcc.addVaultClientCertificateVolumeMounts" $commonCtx | indent 8 }} -{{- if not $roxie.serverReplicas }} - -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "roxie-agent" "name" $name "external" false) | indent 8 }} -{{ include "hpcc.addCertificateVolumeMount" (dict "root" $ "component" "roxie-agent" "name" $agentPublicCertName "certificate" $roxie.certificate "external" true "includeRemote" true) | indent 8 }} -{{ include "hpcc.addUDPCertificateVolumeMount" (dict "root" $ "component" "udpkey" "name" $udpkeyname ) | indent 8 }} -{{- end }}{{/* not serverReplicas */}} - - volumes: -{{ include "hpcc.addConfigMapVolume" $roxie | indent 6 }} -{{ include "hpcc.addVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addSecretVolumes" $commonCtx | indent 6 }} -{{ include "hpcc.addVaultClientCertificateVolumes" $commonCtx | indent 6 }} -{{- if not $roxie.serverReplicas }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "roxie-agent" "name" $name "external" false) | indent 6 }} -{{ include "hpcc.addCertificateVolume" (dict "root" $ "component" "roxie-agent" "name" $agentPublicCertName "certificate" $roxie.certificate "external" true "includeRemote" true) | indent 6 }} -{{ include "hpcc.addUDPCertificateVolume" (dict "root" $ "component" "udpkey" "name" $udpkeyname) | indent 6 }} -{{- end }}{{/* not serverReplicas */}} ---- - -{{- end }} -{{- end }}{{/* if not singlenode */}} ---- -{{ include "hpcc.addEgress" (dict "root" $ "me" $roxie "labels" $commonCtx.instanceNames) }} -{{- if hasKey . "hpa" }} -{{- include "hpcc.addHorizontalPodAutoscaler" (dict "name" $roxie.name "kind" "Deployment" "hpa" $roxie.hpa) }} -{{- end }} -{{- end }}{{/* if not disabled */}} -{{- end }}{{/* range */}} - From d5574ace2d557fa26013251d3a7a8e97769da5ce Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Mon, 17 Jun 2024 18:15:55 +0100 Subject: [PATCH 071/151] HPCC-31627 Remove legacy central node delay Some very old redundant code that was not being hit until the HPCC-18382 changes were introduced caused random delays when sockets were connecting in Thor. This was spuriously seen when SORT was connecting merge streams, introducing significant delays overall. Signed-off-by: Jake Smith --- system/jlib/jsocket.cpp | 28 ++-------------------------- system/jlib/jsocket.hpp | 3 --- thorlcr/slave/thslavemain.cpp | 1 - 3 files changed, 2 insertions(+), 30 deletions(-) diff --git a/system/jlib/jsocket.cpp b/system/jlib/jsocket.cpp index 962d328364b..9f4bb2d6b72 100644 --- a/system/jlib/jsocket.cpp +++ b/system/jlib/jsocket.cpp @@ -353,9 +353,6 @@ struct xfd_set { __fd_mask fds_bits[XFD_SETSIZE / __NFDBITS]; }; // define our o #define T_SOCKET int #define SEND_FLAGS (MSG_NOSIGNAL) #endif -#ifdef CENTRAL_NODE_RANDOM_DELAY -static SocketEndpointArray CentralNodeArray; -#endif enum SOCKETMODE { sm_tcp_server, sm_tcp, sm_udp_server, sm_udp, sm_multicast_server, sm_multicast}; #define BADSOCKERR(err) ((err==JSE_BADF)||(err==JSE_NOTSOCK)) @@ -1538,20 +1535,8 @@ void CSocket::connect_wait(unsigned timems) bool exit = false; int err; unsigned refuseddelay = 1; - while (!exit) { -#ifdef CENTRAL_NODE_RANDOM_DELAY - ForEachItemIn(cn,CentralNodeArray) { - const SocketEndpoint &ep=CentralNodeArray.item(cn); - if (ep.ipequals(targetip)) { - unsigned sleeptime = getRandom() % 1000; - StringBuffer s; - ep.getHostText(s); - DBGLOG("Connection to central node %s - sleeping %d milliseconds", s.str(), sleeptime); - Sleep(sleeptime); - break; - } - } -#endif + while (!exit) + { unsigned remaining; exit = tm.timedout(&remaining); bool blockselect = exit; // if last time round block @@ -6189,15 +6174,6 @@ ISocketBufferReader *createSocketBufferReader(const char *trc) } -extern jlib_decl void markNodeCentral(SocketEndpoint &ep) -{ -#ifdef CENTRAL_NODE_RANDOM_DELAY - CriticalBlock block(CSocket::crit); - CentralNodeArray.append(ep); -#endif -} - - static CSocket *prepareSocket(unsigned idx,const SocketEndpoint &ep, ISocketConnectNotify &inotify) { Owned sock = new CSocket(ep,sm_tcp,NULL); diff --git a/system/jlib/jsocket.hpp b/system/jlib/jsocket.hpp index 6587e9410ff..83c01639ef6 100644 --- a/system/jlib/jsocket.hpp +++ b/system/jlib/jsocket.hpp @@ -72,7 +72,6 @@ enum JSOCKET_ERROR_CODES { #ifndef _WIN32 #define BLOCK_POLLED_SINGLE_CONNECTS // NB this is much slower in windows -#define CENTRAL_NODE_RANDOM_DELAY #else #define USERECVSEM // to singlethread BF_SYNC_TRANSFER_PUSH #endif @@ -633,8 +632,6 @@ interface ISocketBufferReader: extends IInterface extern jlib_decl ISocketBufferReader *createSocketBufferReader(const char *trc=NULL); -extern jlib_decl void markNodeCentral(SocketEndpoint &ep); // random delay for linux - interface ISocketConnectNotify { public: diff --git a/thorlcr/slave/thslavemain.cpp b/thorlcr/slave/thslavemain.cpp index 2f0c496a8cf..17bea9fb450 100644 --- a/thorlcr/slave/thslavemain.cpp +++ b/thorlcr/slave/thslavemain.cpp @@ -434,7 +434,6 @@ int main( int argc, const char *argv[] ) SocketEndpoint masterEp(master); localHostToNIC(masterEp); setMasterPortBase(masterEp.port); - markNodeCentral(masterEp); if (RegisterSelf(masterEp)) { From fef13042a56b9af13160afa0872603b7ef6f2c0f Mon Sep 17 00:00:00 2001 From: Jim DeFabia Date: Tue, 18 Jun 2024 09:52:48 -0400 Subject: [PATCH 072/151] HPCC-32081 Fix typo in containerized doc Signed-off-by: Jim DeFabia --- .../ContainerizedHPCC/ContainerizedMods/LocalDeployment.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/LocalDeployment.xml b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/LocalDeployment.xml index 4952471fb55..59430122cdc 100644 --- a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/LocalDeployment.xml +++ b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/LocalDeployment.xml @@ -16,7 +16,7 @@ Note: When you install Docker Desktop, it installs Kubernetes and the kubectl command line interface. - You merely need to enable it in Docker Desktop settings. + You merely need to enable it in Docker Desktop settings. @@ -302,7 +302,7 @@ helm install hpcc-localfile hpcc/hpcc-localfile --set common.hostpath=~/hpccdata Note: - The value passed to --set common-hostpath is case + The value passed to --set common.hostpath is case sensitive. From c41e168f577bc515563a27a843ece133698055c3 Mon Sep 17 00:00:00 2001 From: Ken Rowland Date: Tue, 14 May 2024 13:36:29 -0400 Subject: [PATCH 073/151] HPCC-31761 LDAP caches first user's default file scope permission and uses it for future requests Added a per user default file scope permission cache Signed-Off-By: Kenneth Rowland kenneth.rowland@lexisnexisrisk.com --- esp/applications/common/ldap/ldap.yaml | 2 +- helm/hpcc/values.schema.json | 4 ++ initfiles/componentfiles/configxml/dali.xsl | 3 + initfiles/componentfiles/configxml/esp.xsl | 3 + system/security/LdapSecurity/ldapsecurity.cpp | 2 + system/security/LdapSecurity/ldapsecurity.ipp | 1 + system/security/shared/caching.cpp | 62 ++++++++++++++++--- system/security/shared/caching.hpp | 9 +++ 8 files changed, 78 insertions(+), 8 deletions(-) diff --git a/esp/applications/common/ldap/ldap.yaml b/esp/applications/common/ldap/ldap.yaml index 3f87d56f518..7ff618f050b 100644 --- a/esp/applications/common/ldap/ldap.yaml +++ b/esp/applications/common/ldap/ldap.yaml @@ -21,4 +21,4 @@ ldap: ldapAdminVaultId: "" hpccAdminSecretKey: "" hpccAdminVaultId: "" - checkScopeScans: true + checkScopeScans: true \ No newline at end of file diff --git a/helm/hpcc/values.schema.json b/helm/hpcc/values.schema.json index ffdc4935bb4..c43ae249e26 100644 --- a/helm/hpcc/values.schema.json +++ b/helm/hpcc/values.schema.json @@ -1086,6 +1086,10 @@ "checkScopeScans": { "type": "boolean", "description": "Only return iterated logical file metadata for files that user has scope permission to access" + }, + "useLegacyDefaultFileScopePermissionCache": { + "type": "boolean", + "description": "Use legacy default filescope permissions that cached value for first user" } } }, diff --git a/initfiles/componentfiles/configxml/dali.xsl b/initfiles/componentfiles/configxml/dali.xsl index ffafa3199cc..5983537297d 100644 --- a/initfiles/componentfiles/configxml/dali.xsl +++ b/initfiles/componentfiles/configxml/dali.xsl @@ -308,6 +308,9 @@ + + + diff --git a/initfiles/componentfiles/configxml/esp.xsl b/initfiles/componentfiles/configxml/esp.xsl index c567856f282..8fca1a45b39 100644 --- a/initfiles/componentfiles/configxml/esp.xsl +++ b/initfiles/componentfiles/configxml/esp.xsl @@ -466,6 +466,9 @@ + + + diff --git a/system/security/LdapSecurity/ldapsecurity.cpp b/system/security/LdapSecurity/ldapsecurity.cpp index 7bd62e4e3d0..55d9b793ce6 100644 --- a/system/security/LdapSecurity/ldapsecurity.cpp +++ b/system/security/LdapSecurity/ldapsecurity.cpp @@ -630,6 +630,8 @@ void CLdapSecManager::init(const char *serviceName, IPropertyTree* cfg) m_permissionsCache->setCacheTimeout( 60 * cacheTimeoutMinutes); m_permissionsCache->setTransactionalEnabled(true); m_permissionsCache->setSecManager(this); + m_useLegacyDefaultFileScopePermissionCaching = cfg->getPropBool("@useLegacyDefaultFileScopePermissionCache", m_useLegacyDefaultFileScopePermissionCaching); + m_permissionsCache->setUseLegacyDefaultFileScopePermissionCache(m_useLegacyDefaultFileScopePermissionCaching); m_passwordExpirationWarningDays = cfg->getPropInt(".//@passwordExpirationWarningDays", 10); //Default to 10 days m_checkViewPermissions = cfg->getPropBool(".//@checkViewPermissions", false); m_hpccInternalScope.set(queryDfsXmlBranchName(DXB_Internal)).append("::");//HpccInternal:: diff --git a/system/security/LdapSecurity/ldapsecurity.ipp b/system/security/LdapSecurity/ldapsecurity.ipp index ef5647a83ab..b7d4b35ae6b 100644 --- a/system/security/LdapSecurity/ldapsecurity.ipp +++ b/system/security/LdapSecurity/ldapsecurity.ipp @@ -323,6 +323,7 @@ private: static const SecFeatureSet s_safeFeatures = SMF_ALL_FEATURES; static const SecFeatureSet s_implementedFeatures = s_safeFeatures & ~(SMF_RetrieveUserData | SMF_RemoveResources); StringBuffer m_hpccInternalScope; + bool m_useLegacyDefaultFileScopePermissionCaching = true; public: IMPLEMENT_IINTERFACE diff --git a/system/security/shared/caching.cpp b/system/security/shared/caching.cpp index 27a7fdb6707..0a1278dd864 100644 --- a/system/security/shared/caching.cpp +++ b/system/security/shared/caching.cpp @@ -549,7 +549,8 @@ inline void CPermissionsCache::removeAllManagedFileScopes() etc. Until full scope path checked, or no read permissions hit on ancestor scope. */ -static CriticalSection msCacheSyncCS;//for managed scopes cache syncronization +static CriticalSection msCacheSyncCS;//for managed scopes cache synchronization +static CriticalSection syncDefaultScopePermissions;//for cached default file scope permissions bool CPermissionsCache::queryPermsManagedFileScope(ISecUser& sec_user, const char * fullScope, StringBuffer& managedScope, SecAccessFlags * accessFlags) { unsigned start = msTick(); @@ -572,7 +573,15 @@ bool CPermissionsCache::queryPermsManagedFileScope(ISecUser& sec_user, const cha aindex_t count = m_secMgr->getManagedScopeTree(RT_FILE_SCOPE, nullptr, scopes); if (count) addManagedFileScopes(scopes); - m_defaultPermission = SecAccess_Unknown;//trigger refresh + if (m_useLegacyDefaultFileScopePermissionCache) + { + m_defaultPermission = SecAccess_Unknown; + } + else + { + CriticalBlock defaultScopePermissionBlock(syncDefaultScopePermissions); + m_userDefaultFileScopePermissions.clear(); + } time(&m_lastManagedFileScopesRefresh); } } @@ -672,16 +681,47 @@ bool CPermissionsCache::queryPermsManagedFileScope(ISecUser& sec_user, const cha SecAccessFlags CPermissionsCache::queryDefaultPermission(ISecUser& user) { - if (m_defaultPermission == SecAccess_Unknown) + if (!m_secMgr) + return SecAccess_Full; // if no security manager, all full access to all scopes + + if (m_useLegacyDefaultFileScopePermissionCache) { - if (m_secMgr) + if (m_defaultPermission == SecAccess_Unknown) + { m_defaultPermission = m_secMgr->queryDefaultPermission(user); + DBGLOG("Legacy default file scope permission set to %s(%d) for all users, based on User '%s'", getSecAccessFlagName(m_defaultPermission), + m_defaultPermission, user.getName()); + } + return m_defaultPermission; + } + + SecAccessFlags defaultPermission = SecAccess_None; + const std::string username(user.getName()); + bool addedToCache = false; + { + CriticalBlock defaultScopePermissionBlock(syncDefaultScopePermissions); + auto it = m_userDefaultFileScopePermissions.find(username); + if (it == m_userDefaultFileScopePermissions.end()) + { + defaultPermission = m_secMgr->queryDefaultPermission(user); + m_userDefaultFileScopePermissions.emplace(username, defaultPermission); + addedToCache = true; + } else - m_defaultPermission = SecAccess_None; + { + defaultPermission = it->second; + } + } + + if (addedToCache) + { + DBGLOG("Added user '%s' to default file scope permissions with access %s(%d)", username.c_str(), getSecAccessFlagName(defaultPermission), + defaultPermission); } - return m_defaultPermission; + return defaultPermission; } + void CPermissionsCache::flush() { // MORE - is this safe? m_defaultPermossion and m_lastManagedFileScopesRefresh are unprotected, @@ -702,8 +742,16 @@ void CPermissionsCache::flush() delete (*ui).second; m_userCache.clear(); } + if (m_useLegacyDefaultFileScopePermissionCache) + { + m_defaultPermission = SecAccess_Unknown; + } + else + { + CriticalBlock defaultScopePermissionBlock(syncDefaultScopePermissions); + m_userDefaultFileScopePermissions.clear(); + } m_lastManagedFileScopesRefresh = 0; - m_defaultPermission = SecAccess_Unknown;//trigger refresh } CPermissionsCache* CPermissionsCache::getInstance(const char * _secMgrClass) diff --git a/system/security/shared/caching.hpp b/system/security/shared/caching.hpp index ac37cc9f457..243b71ef486 100644 --- a/system/security/shared/caching.hpp +++ b/system/security/shared/caching.hpp @@ -203,6 +203,12 @@ class CPermissionsCache : public CInterface bool queryPermsManagedFileScope(ISecUser& sec_user, const char * fullScope, StringBuffer& managedScope, SecAccessFlags * accessFlags); void setSecManager(ISecManager * secMgr) { m_secMgr = secMgr; } SecAccessFlags queryDefaultPermission(ISecUser& user); + void setUseLegacyDefaultFileScopePermissionCache(bool useLegacy) + { + if (useLegacy) + DBGLOG("*** Setting default file scope permissions to use legacy mode which uses first retrieved permission for all users."); + m_useLegacyDefaultFileScopePermissionCache = useLegacy; + } private: typedef std::map MapResPermissionsCache; @@ -221,11 +227,14 @@ class CPermissionsCache : public CInterface StringAttr m_secMgrClass; //Managed File Scope support + std::map m_userDefaultFileScopePermissions; SecAccessFlags m_defaultPermission; map m_managedFileScopesMap; mutable ReadWriteLock m_scopesRWLock;//guards m_managedFileScopesMap ISecManager * m_secMgr; time_t m_lastManagedFileScopesRefresh; + + bool m_useLegacyDefaultFileScopePermissionCache = false; }; time_t getThreadCreateTime(); From 0176a72aeaa0f932d48d11034feb24c66ef3ec1c Mon Sep 17 00:00:00 2001 From: Jim DeFabia Date: Tue, 18 Jun 2024 10:41:09 -0400 Subject: [PATCH 074/151] HPCC-32074 Document 'wrap' parameter Std.File.Copy and fCopy Signed-off-by: Jim DeFabia --- .../SLR-Mods/Copy.xml | 23 ++++++++++++++----- 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/docs/EN_US/ECLStandardLibraryReference/SLR-Mods/Copy.xml b/docs/EN_US/ECLStandardLibraryReference/SLR-Mods/Copy.xml index bca266d1967..9027a596cf1 100644 --- a/docs/EN_US/ECLStandardLibraryReference/SLR-Mods/Copy.xml +++ b/docs/EN_US/ECLStandardLibraryReference/SLR-Mods/Copy.xml @@ -28,9 +28,9 @@ [ ,preserveCompression ] [ ,noSplit ] [ ,expireDays ] [ - ,ensure ]); + role="bold">] [ ,ensure ] [ ,wrap ] ); dfuwuid := STD.File.fCopy @@ -57,9 +57,9 @@ [ ,preserveCompression ] [ ,noSplit ] [ ,expireDays ] [ - ,ensure]); - + role="bold">] [ ,ensure] [ ,wrap ] ); @@ -215,6 +215,17 @@ if they already exist. Default is FALSE. + + wrap + + Optional. A boolean TRUE or FALSE flag indicating whether to + automatically wrap the file parts when copying to smaller sized + clusters. For example, copying from a 6-node cluster to a 3-node + cluster, two file parts will end up on each node; the difference is + whether node 1 gets parts 1 and 2 or parts 1 and 4. If omitted, the + default is FALSE. + + dfuwuid From 3e17095173e3df2065eee05d3360a78ca9c64f51 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Tue, 18 Jun 2024 14:06:22 +0100 Subject: [PATCH 075/151] HPCC-32054 Prevent global undefined due to out of order evaluation Signed-off-by: Gavin Halliday --- ecl/hql/hqltrans.ipp | 3 ++- ecl/hqlcpp/hqlttcpp.cpp | 24 ++++++++++++++++-------- ecl/hqlcpp/hqlttcpp.ipp | 6 ++---- 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/ecl/hql/hqltrans.ipp b/ecl/hql/hqltrans.ipp index 89a49046473..b0cd886c17f 100644 --- a/ecl/hql/hqltrans.ipp +++ b/ecl/hql/hqltrans.ipp @@ -688,7 +688,8 @@ private: class HQL_API ConditionalHqlTransformer : public NewHqlTransformer { public: - enum { CTFnoteifactions = 0x0001, + enum { CTFnone = 0x0000, + CTFnoteifactions = 0x0001, CTFnoteifdatasets = 0x0002, CTFnoteifdatarows = 0x0004, CTFnoteifall = 0x0008, diff --git a/ecl/hqlcpp/hqlttcpp.cpp b/ecl/hqlcpp/hqlttcpp.cpp index 6dadd057d3f..49d13191769 100644 --- a/ecl/hqlcpp/hqlttcpp.cpp +++ b/ecl/hqlcpp/hqlttcpp.cpp @@ -8315,7 +8315,7 @@ IHqlDataset * queryRootDataset(IHqlExpression * dataset) //therefore, there is no need to special case if actions. Thor on the other hand will cause it to be executed unnecessarily. static HqlTransformerInfo newScopeMigrateTransformerInfo("NewScopeMigrateTransformer"); NewScopeMigrateTransformer::NewScopeMigrateTransformer(IWorkUnit * _wu, HqlCppTranslator & _translator) -: HoistingHqlTransformer(newScopeMigrateTransformerInfo, 0), translator(_translator) +: HoistingHqlTransformer(newScopeMigrateTransformerInfo, CTFnone), translator(_translator) { wu = _wu; isRoxie = translator.targetRoxie(); @@ -8622,7 +8622,7 @@ bool AutoScopeMigrateInfo::doAutoHoist(IHqlExpression * transformed, bool minimi static HqlTransformerInfo autoScopeMigrateTransformerInfo("AutoScopeMigrateTransformer"); AutoScopeMigrateTransformer::AutoScopeMigrateTransformer(IWorkUnit * _wu, HqlCppTranslator & _translator) -: NewHqlTransformer(autoScopeMigrateTransformerInfo), translator(_translator) +: HoistingHqlTransformer(autoScopeMigrateTransformerInfo, CTFnone), translator(_translator) { wu = _wu; isRoxie = (translator.getTargetClusterType() == RoxieCluster); @@ -8631,7 +8631,6 @@ AutoScopeMigrateTransformer::AutoScopeMigrateTransformer(IWorkUnit * _wu, HqlCpp hasCandidate = false; activityDepth = 0; curGraph = 1; - globalTarget = NULL; } //Ensure all input activities are marked as never hoisting, but child activities are unaffected @@ -8861,7 +8860,7 @@ IHqlExpression * AutoScopeMigrateTransformer::createTransformed(IHqlExpression * //else hoist it within the current graph, otherwise it can get hoisted before globals on datasets that //it is dependent on. if (extra->firstUseIsConditional) - globalTarget->append(*createWrapper(no_thor, setResult.getClear())); + appendToTarget(*createWrapper(no_thor, setResult.getClear())); else graphActions.append(*setResult.getClear()); transformed.setown(getResult.getClear()); @@ -8871,11 +8870,20 @@ IHqlExpression * AutoScopeMigrateTransformer::createTransformed(IHqlExpression * } -void AutoScopeMigrateTransformer::transformRoot(const HqlExprArray & in, HqlExprArray & out) +IHqlExpression * AutoScopeMigrateTransformer::doTransformIndependent(IHqlExpression * expr) { - globalTarget = &out; - NewHqlTransformer::transformRoot(in, out); - globalTarget = NULL; + AutoScopeMigrateTransformer transformer(wu, translator); + + HqlExprArray exprs; + unwindCommaCompound(exprs, expr); + transformer.analyseArray(exprs, 0); + transformer.analyseArray(exprs, 1); + if (!transformer.worthTransforming()) + return LINK(expr); + + HqlExprArray results; + transformer.transformRoot(exprs, results); + return createActionList(results); } diff --git a/ecl/hqlcpp/hqlttcpp.ipp b/ecl/hqlcpp/hqlttcpp.ipp index d5ca93bdca5..a5f1e72df27 100644 --- a/ecl/hqlcpp/hqlttcpp.ipp +++ b/ecl/hqlcpp/hqlttcpp.ipp @@ -729,13 +729,11 @@ public: bool neverHoist = false; }; -class AutoScopeMigrateTransformer : public NewHqlTransformer +class AutoScopeMigrateTransformer : public HoistingHqlTransformer { public: AutoScopeMigrateTransformer(IWorkUnit * _wu, HqlCppTranslator & _translator); - void transformRoot(const HqlExprArray & in, HqlExprArray & out); - bool worthTransforming() const { return hasCandidate; } protected: @@ -751,6 +749,7 @@ protected: IHqlExpression * transformCond(IHqlExpression * expr); void doAnalyseExpr(IHqlExpression * expr); void doAnalyseConditionalExpr(IHqlExpression * expr, unsigned firstConditional); + virtual IHqlExpression * doTransformIndependent(IHqlExpression * expr) override; inline AutoScopeMigrateInfo * queryBodyExtra(IHqlExpression * expr) { return static_cast(queryTransformExtra(expr->queryBody())); } @@ -765,7 +764,6 @@ private: unsigned graphDepth = 0; HqlExprArray graphActions; unsigned activityDepth; - HqlExprArray * globalTarget; }; //--------------------------------------------------------------------------- From 501ef6dfedf6871dd2115b2ba3766be7f89c4062 Mon Sep 17 00:00:00 2001 From: Richard Chapman Date: Wed, 12 Jun 2024 11:10:09 +0100 Subject: [PATCH 076/151] HPCC-31981 Mutex code was inefficient Refactor Mutex class to use modern C++ standard mutexes, which are significantly faster. Leave old Mutex class in place just in case the unlockall functionality is significant in the one place that uses it. Also fixes Incorrect summary stats fromjlib timing test Signed-off-by: Richard Chapman --- deployment/deploy/DeployTask.cpp | 4 +- esp/esplib/pqueue.hpp | 279 ------------------ system/jlib/jlog.cpp | 2 +- system/jlib/jlog.ipp | 2 +- system/jlib/jmutex.cpp | 39 ++- system/jlib/jmutex.hpp | 98 ++++-- system/jlib/jpqueue.hpp | 27 +- system/mp/mpcomm.cpp | 8 +- .../security/LdapSecurity/ldapconnection.cpp | 2 +- system/security/LdapSecurity/ldapsecurity.cpp | 2 +- system/security/LdapSecurity/permissions.cpp | 4 +- testing/unittests/jlibtests.cpp | 16 +- 12 files changed, 129 insertions(+), 354 deletions(-) delete mode 100644 esp/esplib/pqueue.hpp diff --git a/deployment/deploy/DeployTask.cpp b/deployment/deploy/DeployTask.cpp index 841cab4edc3..754575486d5 100644 --- a/deployment/deploy/DeployTask.cpp +++ b/deployment/deploy/DeployTask.cpp @@ -752,7 +752,7 @@ class CDeployTask : public CInterface, implements IDeployTask // Prompt to retry on error m_errorString.appendf("Cannot copy %s to %s: ", source, target); - synchronized block(s_monitor); + MonitorBlock block(s_monitor); if (m_pCallback->getAbortStatus())//has some other thread set the global abort flag? break; //go back to beginning of loop where we exit on abort @@ -822,7 +822,7 @@ class CDeployTask : public CInterface, implements IDeployTask if (m_msgBoxOwner)//did this thread show the message box in last iteration of this loop? { - synchronized block(s_monitor); + MonitorBlock block(s_monitor); s_msgBoxActive = false; m_msgBoxOwner = false;//up for grabs by other threads s_monitor.notifyAll(); diff --git a/esp/esplib/pqueue.hpp b/esp/esplib/pqueue.hpp deleted file mode 100644 index 9b5f59e40a7..00000000000 --- a/esp/esplib/pqueue.hpp +++ /dev/null @@ -1,279 +0,0 @@ -/*############################################################################## - - HPCC SYSTEMS software Copyright (C) 2012 HPCC Systems®. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -############################################################################## */ - -#ifndef __PQUEUE_HPP -#define __PQUEUE_HPP - -#ifdef _MSC_VER -#pragma warning( push ) -#pragma warning(disable : 4284 ) // return type for '...::operator ->' is '...' (ie; not a UDT or reference to a UDT. Will produce errors if applied using infix notation) -#endif - -#include "jlibplus.hpp" - -#include "jmutex.hpp" -#include "jthread.hpp" -#include "jmisc.hpp" -#include -#include - - -namespace esp -{ - -template class WaitQueue: public CInterface, protected Mutex, implements IInterface -{ -public: - IMPLEMENT_IINTERFACE; - WaitQueue(): counter(), stopped(false), waiting(0) - { - } - - ~WaitQueue() - { - stop(); - synchronized block(*this); - while(waiting) - { - counter.signal(waiting); // actually need only one and only once - wait(INFINITE); - } - } - - unsigned size() - { - synchronized block(*this); - return queue.size(); - } - - T get(unsigned timeout=INFINITE) - { - synchronized block(*this); - for(;;) - { - if(stopped) - return 0; - if(queue.size()) - break; - if(!wait(timeout)) - return 0; - } - T item=queue.front(); - queue.pop_front(); - return item; - } - - bool put(const T& item) - { - synchronized block(*this); - if(stopped) - return true; - queue.push_back(item); - counter.signal(); - return waiting>0; - } - - void stop() - { - synchronized block(*this); - stopped=true; - queue.clear(); - counter.signal(waiting); - } - - bool isStopped() - { - synchronized block(*this); - return stopped; - } - -private: - - bool wait(unsigned timeout) - { - bool ret=false; - waiting++; - - int locked = unlockAll(); - ret=counter.wait(timeout); - lockAll(locked); - - waiting--; - return ret; - } - - Semaphore counter; - std::list queue; - volatile unsigned waiting; - volatile bool stopped; //need event -}; - -interface ITask: extends IInterface -{ - virtual int run()=0; - virtual bool stop()=0; -}; - -interface IErrorListener: extends IInterface -{ - virtual void reportError(const char* err,...) __attribute__((format(printf, 2, 3))) =0; -}; - -class TaskQueue -{ -public: - TaskQueue(size32_t _maxsize,IErrorListener* _err=0): maxsize(_maxsize), err(_err) - { - } - - ~TaskQueue() - { - stop(); - join(); - } - - void put(ITask* task) - { - bool needthread=!queue.put(task); - if(needthread) - { - synchronized block(mworkers); - if(workers.size()start(); - } -// DBGLOG("%d threads",workers.size()); - } - } - - void stop() - { - queue.stop(); - - synchronized block(mworkers); - for(Workers::iterator it=workers.begin();it!=workers.end();it++) - (*it)->stop(); // no good if threads did not clean up - } - - void join() - { - synchronized block(mworkers); - while(!workers.empty()) - { - mworkers.wait(); - } - - } - - void setErrorListener(IErrorListener* _err) - { - err.set(_err); - } - - void reportError(const char* e) - { - if(err) - { - synchronized block(merr); - err->reportError(e); - } - } - -private: - - class WorkerThread: public Thread - { - public: - WorkerThread(TaskQueue& _tq): tq(_tq), stopped(false) - { - } - - virtual int run() - { - for(;;) - { - - try - { - task.setown(tq.queue.get(1000).get()); - if(stopped || !task) - break; - task->run(); - } - catch (IException *E) - { - StringBuffer err; - E->errorMessage(err); - tq.reportError(err.str()); - E->Release(); - } - catch (...) - { - tq.reportError("Unknown exception "); - } - task.clear(); - } - Release(); // There should be one more - return 0; - } - - - bool stop() - { - stopped=true; - Linked t(task.get()); - return t ? t->stop() : true; - } - - virtual void beforeDispose() - { - tq.remove(this); - } - - private: - TaskQueue& tq; - volatile bool stopped; - Owned task; - }; - - void remove(WorkerThread* th) - { - synchronized block(mworkers); - workers.remove(th); - if(workers.empty()) - mworkers.notifyAll(); - } - - - WaitQueue > queue; - - size32_t maxsize; - friend WorkerThread; - typedef std::list Workers; - Workers workers; - Monitor mworkers; - Linked err; - Mutex merr; -}; -} - -#ifdef _MSC_VER -#pragma warning(pop) -#endif - -#endif diff --git a/system/jlib/jlog.cpp b/system/jlib/jlog.cpp index ba13f2b9276..af28ee6ccaa 100644 --- a/system/jlib/jlog.cpp +++ b/system/jlib/jlog.cpp @@ -1437,7 +1437,7 @@ bool CLogMsgManager::MsgProcessor::flush(unsigned timeout) return false; try { - synchronized block(pullCycleMutex, timeout+start-now); + TimedMutexBlock block(pullCycleMutex, timeout+start-now); } catch(IException * e) { diff --git a/system/jlib/jlog.ipp b/system/jlib/jlog.ipp index 31a6f05dfeb..7318156a0b1 100644 --- a/system/jlib/jlog.ipp +++ b/system/jlib/jlog.ipp @@ -722,7 +722,7 @@ private: CallbackInterThreadQueueOf q; unsigned droppingLimit; unsigned numToDrop = 1; - Mutex pullCycleMutex; + TimedMutex pullCycleMutex; }; Owned processor; diff --git a/system/jlib/jmutex.cpp b/system/jlib/jmutex.cpp index 8d1fe0b369a..f2b5589ac1c 100644 --- a/system/jlib/jmutex.cpp +++ b/system/jlib/jmutex.cpp @@ -27,7 +27,7 @@ //=========================================================================== #ifndef _WIN32 -Mutex::Mutex() +LegacyMutex::LegacyMutex() { pthread_mutex_init(&mutex, NULL); pthread_cond_init(&lock_free, NULL); @@ -35,13 +35,13 @@ Mutex::Mutex() lockcount = 0; } -Mutex::~Mutex() +LegacyMutex::~LegacyMutex() { pthread_cond_destroy(&lock_free); pthread_mutex_destroy(&mutex); } -void Mutex::lock() +void LegacyMutex::lock() { pthread_mutex_lock(&mutex); while ((owner!=0) && !pthread_equal(owner, pthread_self())) @@ -51,7 +51,7 @@ void Mutex::lock() pthread_mutex_unlock(&mutex); } -bool Mutex::lockWait(unsigned timeout) +bool LegacyMutex::lockWait(unsigned timeout) { if (timeout==(unsigned)-1) { lock(); @@ -76,7 +76,7 @@ bool Mutex::lockWait(unsigned timeout) return true; } -void Mutex::unlock() +void LegacyMutex::unlock() { pthread_mutex_lock(&mutex); #ifdef _DEBUG @@ -90,7 +90,7 @@ void Mutex::unlock() pthread_mutex_unlock(&mutex); } -void Mutex::lockAll(int count) +void LegacyMutex::lockAll(int count) { if (count) { pthread_mutex_lock(&mutex); @@ -102,7 +102,7 @@ void Mutex::lockAll(int count) } } -int Mutex::unlockAll() +int LegacyMutex::unlockAll() { pthread_mutex_lock(&mutex); int ret = lockcount; @@ -118,9 +118,6 @@ int Mutex::unlockAll() return ret; } - - - inline bool read_data(int fd, void *buf, size_t nbytes) { size32_t nread = 0; @@ -268,7 +265,17 @@ void NamedMutex::unlock() #endif -void synchronized::throwLockException(unsigned timeout) +bool TimedMutex::lockWait(unsigned timeout) +{ + if (timeout==(unsigned)-1) { + lock(); + return true; + } + std::chrono::milliseconds ms(timeout); + return mutex.try_lock_for(ms); +} + +void TimedMutexBlock::throwLockException(unsigned timeout) { throw MakeStringException(0,"Can not lock - %d",timeout); } @@ -279,14 +286,14 @@ void synchronized::throwLockException(unsigned timeout) void Monitor::wait() { - assertex(owner==GetCurrentThreadId()); + assertex(mutex.owner==GetCurrentThreadId()); waiting++; void *cur = last; last = &cur; while (1) { - int locked = unlockAll(); + int locked = mutex.unlockAll(); sem->wait(); - lockAll(locked); + mutex.lockAll(locked); if (cur==NULL) { // i.e. first in void **p=(void **)&last; while (*p!=&cur) @@ -300,7 +307,7 @@ void Monitor::wait() void Monitor::notify() { // should always be locked - assertex(owner==GetCurrentThreadId()); + assertex(mutex.owner==GetCurrentThreadId()); if (waiting) { waiting--; @@ -310,7 +317,7 @@ void Monitor::notify() void Monitor::notifyAll() { // should always be locked - assertex(owner==GetCurrentThreadId()); + assertex(mutex.owner==GetCurrentThreadId()); if (waiting) { sem->signal(waiting); diff --git a/system/jlib/jmutex.hpp b/system/jlib/jmutex.hpp index d9f072cf6ea..4e3f51cdc7f 100644 --- a/system/jlib/jmutex.hpp +++ b/system/jlib/jmutex.hpp @@ -22,6 +22,7 @@ #include #include +#include #include #include "jiface.hpp" #include "jsem.hpp" @@ -46,10 +47,11 @@ extern jlib_decl void spinUntilReady(std::atomic_uint &value); #endif #ifdef _WIN32 -class jlib_decl Mutex +class jlib_decl LegacyMutex { +friend class Monitor; protected: - Mutex(const char *name) + LegacyMutex(const char *name) { mutex = CreateMutex(NULL, FALSE, name); assertex(mutex); @@ -57,13 +59,13 @@ class jlib_decl Mutex owner = 0; } public: - Mutex() + LegacyMutex() { mutex = CreateMutex(NULL, FALSE, NULL); lockcount = 0; owner = 0; } - ~Mutex() + ~LegacyMutex() { if (owner != 0) printf("Warning - Owned mutex destroyed"); // can't use DBGLOG here! @@ -123,25 +125,14 @@ class jlib_decl Mutex int lockcount; }; -class jlib_decl NamedMutex: public Mutex -{ -public: - NamedMutex(const char *name) - : Mutex(name) - { - } -}; - - - #else // posix -class jlib_decl Mutex +class jlib_decl LegacyMutex { + friend class Monitor; public: - Mutex(); -// Mutex(const char *name); //not supported - ~Mutex(); + LegacyMutex(); + ~LegacyMutex(); void lock(); bool lockWait(unsigned timeout); void unlock(); @@ -154,7 +145,35 @@ class jlib_decl Mutex int lockcount; pthread_cond_t lock_free; }; +#endif + +class jlib_decl SimpleMutex +{ +public: + void lock() { mutex.lock(); }; + void unlock() { mutex.unlock(); }; +private: + std::mutex mutex; +}; + +class jlib_decl Mutex +{ +public: + void lock() { mutex.lock(); }; + void unlock() { mutex.unlock(); }; +private: + std::recursive_mutex mutex; +}; +class jlib_decl TimedMutex +{ +public: + void lock() { mutex.lock(); }; + bool lockWait(unsigned timeout); + void unlock() { mutex.unlock(); }; +private: + std::recursive_timed_mutex mutex; +}; class jlib_decl NamedMutex { @@ -165,23 +184,30 @@ class jlib_decl NamedMutex bool lockWait(unsigned timeout); void unlock(); private: - Mutex threadmutex; + TimedMutex threadmutex; char *mutexfname; }; +template class jlib_decl MutexBlock +{ +private: + T &mutex; +public: + MutexBlock(T &m) : mutex(m) { mutex.lock(); }; + ~MutexBlock() { mutex.unlock(); }; +}; +typedef MutexBlock synchronized; -#endif - -class jlib_decl synchronized +class jlib_decl TimedMutexBlock { private: - Mutex &mutex; + TimedMutex &mutex; void throwLockException(unsigned timeout); public: - synchronized(Mutex &m) : mutex(m) { mutex.lock(); }; - synchronized(Mutex &m,unsigned timeout) : mutex(m) { if(!mutex.lockWait(timeout)) throwLockException(timeout); } - inline ~synchronized() { mutex.unlock(); }; + TimedMutexBlock(TimedMutex &m) : mutex(m) { mutex.lock(); }; + TimedMutexBlock(TimedMutex &m, unsigned timeout) : mutex(m) { if(!mutex.lockWait(timeout)) throwLockException(timeout); } + inline ~TimedMutexBlock() { mutex.unlock(); }; }; #ifdef _WIN32 @@ -561,14 +587,16 @@ class NonReentrantSpinUnblock -class jlib_decl Monitor: public Mutex +class jlib_decl Monitor { // Like a java object - you can synchronize on it for a block, wait for a notify on it, or notify on it + friend class MonitorBlock; Semaphore *sem; int waiting; void *last; + LegacyMutex mutex; public: - Monitor() : Mutex() { sem = new Semaphore(); waiting = 0; last = NULL; } + Monitor() { sem = new Semaphore(); waiting = 0; last = NULL; } // Monitor(const char *name) : Mutex(name) { sem = new Semaphore(name); waiting = 0; last = NULL; } // not supported ~Monitor() {delete sem;}; @@ -577,6 +605,16 @@ class jlib_decl Monitor: public Mutex void notifyAll(); // only called when locked -- notifys for all waiting threads }; +class jlib_decl MonitorBlock +{ +private: + Monitor &monitor; +public: + MonitorBlock(Monitor &m) : monitor(m) { monitor.mutex.lock(); }; + inline ~MonitorBlock() { monitor.mutex.unlock(); }; +}; + + //-------------------------------------------------------------------------------------------------------------------- //Currently disabled since performance profile of own implementation is preferable, and queryWriteLocked() cannot be implemented @@ -838,7 +876,7 @@ class Barrier #define USECHECKEDCRITICALSECTIONS #ifdef USECHECKEDCRITICALSECTIONS -typedef Mutex CheckedCriticalSection; +typedef TimedMutex CheckedCriticalSection; void jlib_decl checkedCritEnter(CheckedCriticalSection &crit, unsigned timeout, const char *fname, unsigned lnum); void jlib_decl checkedCritLeave(CheckedCriticalSection &crit); class jlib_decl CheckedCriticalBlock diff --git a/system/jlib/jpqueue.hpp b/system/jlib/jpqueue.hpp index 3b96b419020..f2c0122e2b4 100644 --- a/system/jlib/jpqueue.hpp +++ b/system/jlib/jpqueue.hpp @@ -30,7 +30,7 @@ #include #include -template class WaitQueue: public CInterface, protected Mutex +template class WaitQueue: public CInterface { public: WaitQueue(): counter(), stopped(false), waiting(0) @@ -40,7 +40,7 @@ template class WaitQueue: public CInterface, protected Mutex ~WaitQueue() { stop(); - synchronized block(*this); + synchronized block(mutex); while(waiting) { counter.signal(waiting); // actually need only one and only once @@ -50,13 +50,13 @@ template class WaitQueue: public CInterface, protected Mutex unsigned size() { - synchronized block(*this); + synchronized block(mutex); return queue.size(); } T get(unsigned timeout=INFINITE) { - synchronized block(*this); + synchronized block(mutex); for(;;) { if(stopped) @@ -73,7 +73,7 @@ template class WaitQueue: public CInterface, protected Mutex bool put(const T& item) { - synchronized block(*this); + synchronized block(mutex); if(stopped) return true; queue.push_back(item); @@ -83,7 +83,7 @@ template class WaitQueue: public CInterface, protected Mutex void stop() { - synchronized block(*this); + synchronized block(mutex); stopped=true; queue.clear(); counter.signal(waiting); @@ -91,7 +91,7 @@ template class WaitQueue: public CInterface, protected Mutex bool isStopped() { - synchronized block(*this); + synchronized block(mutex); return stopped; } @@ -102,14 +102,15 @@ template class WaitQueue: public CInterface, protected Mutex bool ret=false; waiting++; - int locked = unlockAll(); + mutex.unlock(); ret=counter.wait(timeout); - lockAll(locked); + mutex.lock(); waiting--; return ret; } + Mutex mutex; Semaphore counter; std::list queue; volatile unsigned waiting; @@ -146,7 +147,7 @@ class TaskQueue bool needthread=!queue.put(task); if(needthread) { - synchronized block(mworkers); + MonitorBlock block(mworkers); if(workers.size()stop(); // no good if threads did not clean up } void join() { - synchronized block(mworkers); + MonitorBlock block(mworkers); while(!workers.empty()) { mworkers.wait(); @@ -248,7 +249,7 @@ class TaskQueue void remove(WorkerThread* th) { - synchronized block(mworkers); + MonitorBlock block(mworkers); workers.remove(th); if(workers.empty()) mworkers.notifyAll(); diff --git a/system/mp/mpcomm.cpp b/system/mp/mpcomm.cpp index 6355baa5a15..f4d7e9f1281 100644 --- a/system/mp/mpcomm.cpp +++ b/system/mp/mpcomm.cpp @@ -917,7 +917,7 @@ class CMPChannel: public CInterface { ISocket *channelsock = nullptr; CMPServer *parent; - Mutex sendmutex; + TimedMutex sendmutex; Semaphore sendwaitingsig; unsigned sendwaiting = 0; // number waiting on sendwaitingsem (for multi/single clashes to resolve) CriticalSection connectsect; @@ -1627,7 +1627,7 @@ class MultiPacketHandler // TAG_SYS_MULTI } return msg; } - bool send(CMPChannel *channel,PacketHeader &hdr,MemoryBuffer &mb, CTimeMon &tm, Mutex &sendmutex) + bool send(CMPChannel *channel,PacketHeader &hdr,MemoryBuffer &mb, CTimeMon &tm, TimedMutex &sendmutex) { // must not adjust mb #ifdef _FULLTRACE @@ -2045,12 +2045,12 @@ bool CMPChannel::send(MemoryBuffer &mb, mptag_t tag, mptag_t replytag, CTimeMon struct Cpostcondition // can we start using eiffel { - Mutex &sendmutex; + TimedMutex &sendmutex; unsigned &sendwaiting; Semaphore &sendwaitingsig; mptag_t *multitag; - Cpostcondition(Mutex &_sendmutex,unsigned &_sendwaiting,Semaphore &_sendwaitingsig,mptag_t *_multitag) + Cpostcondition(TimedMutex &_sendmutex,unsigned &_sendwaiting,Semaphore &_sendwaitingsig,mptag_t *_multitag) : sendmutex(_sendmutex),sendwaiting(_sendwaiting),sendwaitingsig(_sendwaitingsig) { multitag = _multitag; diff --git a/system/security/LdapSecurity/ldapconnection.cpp b/system/security/LdapSecurity/ldapconnection.cpp index 3c55315f9a6..aaf426e8d67 100644 --- a/system/security/LdapSecurity/ldapconnection.cpp +++ b/system/security/LdapSecurity/ldapconnection.cpp @@ -1077,7 +1077,7 @@ class CLdapConnectionPool : implements ILdapConnectionPool, public CInterface virtual ILdapConnection* getConnection() { - synchronized block(m_monitor); + MonitorBlock block(m_monitor); ForEachItemIn(x, m_connections) { CLdapConnection* curcon = (CLdapConnection*)&(m_connections.item(x)); diff --git a/system/security/LdapSecurity/ldapsecurity.cpp b/system/security/LdapSecurity/ldapsecurity.cpp index 7bd62e4e3d0..01d9a2b04e7 100644 --- a/system/security/LdapSecurity/ldapsecurity.cpp +++ b/system/security/LdapSecurity/ldapsecurity.cpp @@ -1202,7 +1202,7 @@ ISecUser * CLdapSecManager::findUser(const char * username, IEspSecureContext* s ISecUserIterator * CLdapSecManager::getAllUsers(IEspSecureContext* secureContext) { - synchronized block(m_monitor); + MonitorBlock block(m_monitor); m_user_array.popAll(true); m_ldap_client->retrieveUsers(m_user_array); return new ArrayIIteratorOf(m_user_array); diff --git a/system/security/LdapSecurity/permissions.cpp b/system/security/LdapSecurity/permissions.cpp index b268524f9e0..50c8c8ce356 100644 --- a/system/security/LdapSecurity/permissions.cpp +++ b/system/security/LdapSecurity/permissions.cpp @@ -173,7 +173,7 @@ void PermissionProcessor::getCachedSid(const char* name, MemoryBuffer& sid) StringBuffer buf; if(toXpath(name, buf)) { - synchronized block(m_monitor); + MonitorBlock block(m_monitor); try { m_sidcache->getPropBin(buf.str(), sid); @@ -200,7 +200,7 @@ void PermissionProcessor::cacheSid(const char* name, int len, const void* sidbuf StringBuffer buf; if(toXpath(name, buf)) { - synchronized block(m_monitor); + MonitorBlock block(m_monitor); try { if(!m_sidcache->hasProp(buf.str())) diff --git a/testing/unittests/jlibtests.cpp b/testing/unittests/jlibtests.cpp index c98b58521b5..ad579e38092 100644 --- a/testing/unittests/jlibtests.cpp +++ b/testing/unittests/jlibtests.cpp @@ -3259,6 +3259,14 @@ class AtomicTimingStressTest : public CppUnit::TestFixture DO_TEST(Mutex, synchronized, unsigned __int64, 2, 1); DO_TEST(Mutex, synchronized, unsigned __int64, 5, 1); DO_TEST(Mutex, synchronized, unsigned __int64, 1, 2); + DO_TEST(SimpleMutex, MutexBlock, unsigned __int64, 1, 1); + DO_TEST(SimpleMutex, MutexBlock, unsigned __int64, 2, 1); + DO_TEST(SimpleMutex, MutexBlock, unsigned __int64, 5, 1); + DO_TEST(SimpleMutex, MutexBlock, unsigned __int64, 1, 2); + DO_TEST(TimedMutex, TimedMutexBlock, unsigned __int64, 1, 1); + DO_TEST(TimedMutex, TimedMutexBlock, unsigned __int64, 2, 1); + DO_TEST(TimedMutex, TimedMutexBlock, unsigned __int64, 5, 1); + DO_TEST(TimedMutex, TimedMutexBlock, unsigned __int64, 1, 2); DO_TEST(SpinLock, SpinBlock, unsigned __int64, 1, 1); DO_TEST(SpinLock, SpinBlock, unsigned __int64, 2, 1); DO_TEST(SpinLock, SpinBlock, unsigned __int64, 5, 1); @@ -3294,10 +3302,10 @@ class AtomicTimingStressTest : public CppUnit::TestFixture void summariseTimings(const char * option, UInt64Array & times) { - DBGLOG("%11s 1x: cs(%3" I64F "u) spin(%3" I64F "u) atomic(%3" I64F "u) ratomic(%3" I64F "u) cas(%3" I64F "u) rd(%3" I64F "u) wr(%3" I64F "u) " - "5x: cs(%3" I64F "u) spin(%3" I64F "u) atomic(%3" I64F "u) ratomic(%3" I64F "u) cas(%3" I64F "u) rd(%3" I64F "u) wr(%3" I64F "u)", option, - times.item(0), times.item(4), times.item(8), times.item(12), times.item(14), times.item(19), times.item(23), - times.item(2), times.item(6), times.item(10), times.item(13), times.item(15), times.item(21), times.item(25)); + DBGLOG("%11s 1x: cs(%3" I64F "u) mutex (%3" I64F "u) smutex (%3" I64F "u) tmutex (%3" I64F "u) spin(%3" I64F "u) atomic(%3" I64F "u) ratomic(%3" I64F "u) cas(%3" I64F "u) rd(%3" I64F "u) wr(%3" I64F "u)", option, + times.item(0), times.item(4), times.item(8), times.item(12), times.item(16), times.item(20), times.item(24), times.item(26), times.item(31), times.item(35)); + DBGLOG("%11s 5x: cs(%3" I64F "u) mutex (%3" I64F "u) smutex (%3" I64F "u) tmutex (%3" I64F "u) spin(%3" I64F "u) atomic(%3" I64F "u) ratomic(%3" I64F "u) cas(%3" I64F "u) rd(%3" I64F "u) wr(%3" I64F "u)", "", + times.item(2), times.item(6), times.item(10), times.item(14), times.item(18), times.item(22), times.item(25), times.item(27), times.item(33), times.item(37)); } private: From d79a8d2f9833ae6fe3d1246c9c6508ae37b1cb5a Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Wed, 19 Jun 2024 09:28:28 -0400 Subject: [PATCH 077/151] HPCC-32077 ECL Watch v9 correct label in Delete Queries dialog change the label in the Delete Queries dialog to show "Delete Selected Queries" instead of "Delete Selected Workunits" Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- esp/src/src-react/components/Queries.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/esp/src/src-react/components/Queries.tsx b/esp/src/src-react/components/Queries.tsx index a0da7144844..e11938a5734 100644 --- a/esp/src/src-react/components/Queries.tsx +++ b/esp/src/src-react/components/Queries.tsx @@ -180,7 +180,7 @@ export const Queries: React.FunctionComponent = ({ const [DeleteConfirm, setShowDeleteConfirm] = useConfirm({ title: nlsHPCC.Delete, - message: nlsHPCC.DeleteSelectedWorkunits, + message: nlsHPCC.DeleteSelectedQueries, items: selection.map(s => s.Id), onSubmit: React.useCallback(() => { WsWorkunits.WUQuerysetQueryAction(selection, "Delete").then(() => refreshTable.call(true)); From 98c533a8a313ae0ec54a4aaebdc0025d0ecedb14 Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Tue, 18 Jun 2024 16:08:04 -0400 Subject: [PATCH 078/151] HPCC-27635 ECL Watch v9 add syntax check to Playground added a syntax check button to the ECL Playground page Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- .../src-react/components/ECLPlayground.tsx | 129 +++++++++++++----- esp/src/src-react/components/InfoGrid.tsx | 19 ++- esp/src/src/nls/hpcc.ts | 1 + 3 files changed, 111 insertions(+), 38 deletions(-) diff --git a/esp/src/src-react/components/ECLPlayground.tsx b/esp/src/src-react/components/ECLPlayground.tsx index ecee18566cd..665e0c699ad 100644 --- a/esp/src/src-react/components/ECLPlayground.tsx +++ b/esp/src/src-react/components/ECLPlayground.tsx @@ -1,14 +1,17 @@ import * as React from "react"; import { ReflexContainer, ReflexElement, ReflexSplitter } from "../layouts/react-reflex"; -import { PrimaryButton, IconButton, IIconProps, Link, Dropdown, IDropdownOption, TextField, useTheme } from "@fluentui/react"; +import { IconButton, IIconProps, Link, Dropdown, IDropdownOption, TextField, useTheme } from "@fluentui/react"; +import { Button } from "@fluentui/react-components"; +import { CheckmarkCircleRegular, DismissCircleRegular, QuestionCircleRegular } from "@fluentui/react-icons"; import { scopedLogger } from "@hpcc-js/util"; import { useOnEvent } from "@fluentui/react-hooks"; import { mergeStyleSets } from "@fluentui/style-utilities"; import { ECLEditor, IPosition } from "@hpcc-js/codemirror"; -import { Workunit, WUUpdate } from "@hpcc-js/comms"; +import { Workunit, WUUpdate, WorkunitsService } from "@hpcc-js/comms"; import { HolyGrail } from "../layouts/HolyGrail"; import { DojoAdapter } from "../layouts/DojoAdapter"; import { pushUrl } from "../util/history"; +import { debounce } from "../util/throttle"; import { darkTheme } from "../themes"; import { InfoGrid } from "./InfoGrid"; import { TabbedResults } from "./Results"; @@ -77,6 +80,9 @@ const playgroundStyles = mergeStyleSets({ borderRight: borderStyle } }, + ".fui-Button": { + height: "min-content" + }, ".ms-Label": { marginRight: "12px" }, @@ -155,43 +161,58 @@ const warningIcon: IIconProps = { title: nlsHPCC.ErrorWarnings, ariaLabel: nlsHP const resultsIcon: IIconProps = { title: nlsHPCC.Outputs, ariaLabel: nlsHPCC.Outputs, iconName: "Table" }; const graphIcon: IIconProps = { title: nlsHPCC.Visualizations, ariaLabel: nlsHPCC.Visualizations, iconName: "BarChartVerticalFill" }; -const displayErrors = (wu, editor) => { +const displayErrors = async (wu = null, editor, errors = []) => { if (!editor) return; - wu.fetchECLExceptions().then(errors => { - errors.forEach(err => { - const lineError = err.LineNo; - const lineErrorNum = lineError > 0 ? lineError - 1 : 0; - const startPos: IPosition = { - ch: (err.Column > 0) ? err.Column - 1 : 0, - line: lineErrorNum - }; - const endPos: IPosition = { - ch: editor.getLineLength(lineErrorNum), - line: lineErrorNum - }; - - switch (err.Severity) { - case "Info": - editor.highlightInfo(startPos, endPos); - break; - case "Warning": - editor.highlightWarning(startPos, endPos); - break; - case "Error": - default: - editor.highlightError(startPos, endPos); - break; - } - }); + if (wu) { + errors = await wu.fetchECLExceptions(); + } + if (!errors.length) { + editor.removeAllHighlight(); + } + errors.forEach(err => { + const lineError = err.LineNo; + const lineErrorNum = lineError > 0 ? lineError - 1 : 0; + const startPos: IPosition = { + ch: (err.Column > 0) ? err.Column - 1 : 0, + line: lineErrorNum + }; + const endPos: IPosition = { + ch: editor.getLineLength(lineErrorNum), + line: lineErrorNum + }; + + switch (err.Severity) { + case "Info": + editor.highlightInfo(startPos, endPos); + break; + case "Warning": + editor.highlightWarning(startPos, endPos); + break; + case "Error": + default: + editor.highlightError(startPos, endPos); + break; + } }); }; +const service = new WorkunitsService({ baseUrl: "" }); + +enum SyntaxCheckResult { + Unknown, + Failed, + Passed +} + interface ECLEditorToolbarProps { editor: ECLEditor; outputMode: OutputMode; setOutputMode: (_: OutputMode) => void; workunit: Workunit; setWorkunit: (_: Workunit) => void; + setSyntaxErrors: (_: any) => void; + syntaxStatusIcon: number; + setSyntaxStatusIcon: (_: number) => void; } const ECLEditorToolbar: React.FunctionComponent = ({ @@ -199,7 +220,10 @@ const ECLEditorToolbar: React.FunctionComponent = ({ outputMode, setOutputMode, workunit, - setWorkunit + setWorkunit, + setSyntaxErrors, + syntaxStatusIcon, + setSyntaxStatusIcon }) => { const [cluster, setCluster] = React.useState(""); @@ -258,6 +282,24 @@ const ECLEditorToolbar: React.FunctionComponent = ({ } }, [cluster, editor, playgroundResults, queryName, setQueryNameErrorMsg]); + const checkSyntax = React.useCallback(() => { + service.WUSyntaxCheckECL({ + ECL: editor.ecl(), + Cluster: cluster + }).then(response => { + if (response.Errors) { + setSyntaxStatusIcon(SyntaxCheckResult.Failed); + setSyntaxErrors(response.Errors.ECLException); + displayErrors(null, editor, response.Errors.ECLException); + setOutputMode(OutputMode.ERRORS); + } else { + setSyntaxStatusIcon(SyntaxCheckResult.Passed); + setSyntaxErrors([]); + displayErrors(null, editor, []); + } + }); + }, [cluster, editor, setOutputMode, setSyntaxErrors, setSyntaxStatusIcon]); + const handleKeyUp = React.useCallback((evt) => { switch (evt.key) { case "Enter": @@ -282,10 +324,19 @@ const ECLEditorToolbar: React.FunctionComponent = ({ return
{showSubmitBtn ? ( - + ) : ( - + )} + = (props const [query, setQuery] = React.useState(""); const [selectedEclSample, setSelectedEclSample] = React.useState(""); const [eclContent, setEclContent] = React.useState(""); + const [syntaxErrors, setSyntaxErrors] = React.useState([]); + const [syntaxStatusIcon, setSyntaxStatusIcon] = React.useState(SyntaxCheckResult.Unknown); const [eclSamples, setEclSamples] = React.useState([]); React.useEffect(() => { @@ -417,6 +470,13 @@ export const ECLPlayground: React.FunctionComponent = (props }, [editor]); useOnEvent(document, "eclwatch-theme-toggle", handleThemeToggle); + const handleEclChange = React.useMemo(() => debounce((evt) => { + if (editor.hasFocus()) { + setSyntaxStatusIcon(SyntaxCheckResult.Unknown); + } + }, 300), [editor]); + useOnEvent(window, "keyup", handleEclChange); + return

{nlsHPCC.title_ECLPlayground}

@@ -437,7 +497,8 @@ export const ECLPlayground: React.FunctionComponent = (props main={} footer={ @@ -453,7 +514,7 @@ export const ECLPlayground: React.FunctionComponent = (props {outputMode === OutputMode.ERRORS ? ( - + ) : outputMode === OutputMode.RESULTS ? ( diff --git a/esp/src/src-react/components/InfoGrid.tsx b/esp/src/src-react/components/InfoGrid.tsx index 2fe63499487..0e13e1d0e7b 100644 --- a/esp/src/src-react/components/InfoGrid.tsx +++ b/esp/src/src-react/components/InfoGrid.tsx @@ -32,11 +32,13 @@ interface FilterCounts { } interface InfoGridProps { - wuid: string; + wuid?: string; + syntaxErrors?: any[]; } export const InfoGrid: React.FunctionComponent = ({ - wuid + wuid = null, + syntaxErrors = [] }) => { const [costChecked, setCostChecked] = React.useState(true); @@ -46,6 +48,7 @@ export const InfoGrid: React.FunctionComponent = ({ const [otherChecked, setOtherChecked] = React.useState(true); const [filterCounts, setFilterCounts] = React.useState({ cost: 0, penalty: 0, error: 0, warning: 0, info: 0, other: 0 }); const [exceptions] = useWorkunitExceptions(wuid); + const [errors, setErrors] = React.useState([]); const [data, setData] = React.useState([]); const { selection, setSelection, @@ -61,6 +64,14 @@ export const InfoGrid: React.FunctionComponent = ({ { key: "others", onRender: () => setOtherChecked(value)} styles={{ root: { paddingTop: 8, paddingRight: 8 } }} /> } ], [filterCounts.cost, filterCounts.error, filterCounts.info, filterCounts.other, filterCounts.warning]); + React.useEffect(() => { + if (syntaxErrors.length) { + setErrors(syntaxErrors); + } else { + setErrors(exceptions); + } + }, [syntaxErrors, exceptions]); + // Grid --- const columns = React.useMemo((): FluentColumns => { return { @@ -137,7 +148,7 @@ export const InfoGrid: React.FunctionComponent = ({ info: 0, other: 0 }; - const filteredExceptions = exceptions.map((row, idx) => { + const filteredExceptions = errors?.map((row, idx) => { if (row.Source === "Cost Optimizer") { row.Severity = "Cost"; } @@ -199,7 +210,7 @@ export const InfoGrid: React.FunctionComponent = ({ }); setData(filteredExceptions); setFilterCounts(filterCounts); - }, [costChecked, errorChecked, exceptions, infoChecked, otherChecked, warningChecked]); + }, [costChecked, errorChecked, errors, infoChecked, otherChecked, warningChecked]); React.useEffect(() => { if (data.length) { diff --git a/esp/src/src/nls/hpcc.ts b/esp/src/src/nls/hpcc.ts index ff03308e6fd..8588cf18c54 100644 --- a/esp/src/src/nls/hpcc.ts +++ b/esp/src/src/nls/hpcc.ts @@ -922,6 +922,7 @@ export = { Statistics: "Statistics", SVGSource: "SVG Source", SyncSelection: "Sync To Selection", + Syntax: "Syntax", SystemServers: "System Servers", tag: "tag", Target: "Target", From 34fa63d0d8dcc1839e263de31f219545dde78cc8 Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Wed, 5 Jun 2024 15:37:17 -0400 Subject: [PATCH 079/151] HPCC-31841 ECL Watch v9 redirect to intended url after login Captures the hash portion of the ECL Watch v9 url if present, eg "#/workunits/W20240514-150949/metrics/sg974", redirecting to this intended page after login. Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- esp/src/Login.html | 2 ++ esp/src/src/Session.ts | 7 ++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/esp/src/Login.html b/esp/src/Login.html index 42208eceae2..fcac7c377db 100644 --- a/esp/src/Login.html +++ b/esp/src/Login.html @@ -112,6 +112,8 @@ function (ready, nlsHPCCMod) { var nlsHPCC = nlsHPCCMod.default; ready(function () { + window.localStorage.setItem("redirectAfterLogin", window.location.hash); + var loginStr = document.getElementById("loginStr"); var error = document.getElementById("hidden_msg"); var disabled = document.getElementById('disabled_msg'); diff --git a/esp/src/src/Session.ts b/esp/src/src/Session.ts index 281742e4b13..f7a43044e3d 100644 --- a/esp/src/src/Session.ts +++ b/esp/src/src/Session.ts @@ -6,7 +6,7 @@ import { SMCService } from "@hpcc-js/comms"; import { scopedLogger } from "@hpcc-js/util"; import { cookieKeyValStore, sessionKeyValStore, userKeyValStore } from "src/KeyValStore"; import { singletonDebounce } from "../src-react/util/throttle"; -import { parseSearch } from "../src-react/util/history"; +import { parseSearch, replaceUrl } from "../src-react/util/history"; import { ModernMode } from "./BuildInfo"; import * as ESPUtil from "./ESPUtil"; @@ -135,6 +135,11 @@ export function formatCost(value): string { export function initSession() { if (sessionIsActive > -1) { + const redirectUrl = window.localStorage.getItem("redirectAfterLogin") ?? ""; + if (redirectUrl) { + window.localStorage.removeItem("redirectAfterLogin"); + replaceUrl(redirectUrl); + } idleWatcher.on("active", function () { resetESPTime(); }); From 02b1fcc2e11d05bec6b22997fb61d26070aab51d Mon Sep 17 00:00:00 2001 From: Valdir Fumene Junior Date: Wed, 12 Jun 2024 14:36:20 -0300 Subject: [PATCH 080/151] HPCC-32050 -HPCC Portuguese language Update 9.6 Signed-off-by: Valdir Fumene Junior HPCC-32050 -HPCC Portuguese language Update 9.6 tSigned-off-by: Valdir Fumene Junior --- .../PT_BR/ConfiguringHPCC/ConfiguringHPCC.xml | 3116 +++++++++-------- .../ContainerizedHPCCSystemsPlatform.xml | 56 +- .../ContainerizedMods/ConfigureValues.xml | 685 +++- .../ContainerizedMods/ContainerLogging.xml | 910 +++-- .../ContainerizedMods/CustomConfig.xml | 793 ++++- .../ContainerizedMods/LocalDeployment.xml | 19 +- .../SLR-Mods/Contains.xml | 2 +- .../SLR-Mods/Copy.xml | 17 +- .../SLR-Mods/CreateExternalDirectory.xml | 25 +- .../SLR-Mods/DeleteExternalFile.xml | 17 +- .../SLR-Mods/Find.xml | 2 + .../SLR-Mods/MoveExternalFile.xml | 29 +- .../SLR-Mods/RemoteDirectory.xml | 36 +- .../SLR-Mods/TimestampToString.xml | 88 + .../SLR-Mods/getElapsedMs.xml | 55 + .../SLR-includer.xml | 18 +- .../CT_Mods/CT_Comm_Line_DFU.xml | 8 + .../HPCCClientTools/CT_Mods/CT_ECL_CLI.xml | 66 + .../HPCCClientTools/CT_Mods/CT_Overview.xml | 61 +- .../CT_Mods/CT_Overview_withoutIDE.xml | 61 +- docs/PT_BR/HPCCClientTools/CT_Mods/ECLCC.xml | 215 +- .../HPCCSystemAdmin/SA-Mods/SecMgrMod.xml | 10 +- .../SA-Mods/SecMgrModConfDeploy.xml | 3 +- docs/PT_BR/HPCCSystemAdmin/SA-Mods/WUTool.xml | 236 +- .../Inst-Mods/UserSecurityMaint.xml | 25 + .../Inst-Mods/hpcc_ldap.xml | 21 + 26 files changed, 4492 insertions(+), 2082 deletions(-) create mode 100644 docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/TimestampToString.xml create mode 100644 docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/getElapsedMs.xml diff --git a/docs/PT_BR/ConfiguringHPCC/ConfiguringHPCC.xml b/docs/PT_BR/ConfiguringHPCC/ConfiguringHPCC.xml index f9ba3b272d4..45e356df861 100644 --- a/docs/PT_BR/ConfiguringHPCC/ConfiguringHPCC.xml +++ b/docs/PT_BR/ConfiguringHPCC/ConfiguringHPCC.xml @@ -799,8 +799,7 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi de Dali - - + Selecione Dali Server no painel Navigator no lado esquerdo. @@ -823,8 +822,7 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi fileref="images/GS-img01c.png"/> ícone do disco para salvar os atributos do - - + @@ -832,19 +830,16 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Esta seção descreve os atributos do DaliServer. - - - + - - - + @@ -855,19 +850,16 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Esta seção descreve os atributos que configuram como o Dali manipula o armazenamento de dados do sistema. - - - + - - - + @@ -877,17 +869,14 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Esta seção descreve a aba DaliServer LDAP. - - - + - - - + @@ -911,17 +900,14 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Esta seção descreve os atributos do DaliServerPlugin. - - - + - - - + @@ -934,9 +920,8 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Sistema para obter mais detalhes sobre como configurar um servidor Cassandra como um datastore do sistema. - - + @@ -976,15 +961,13 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi prefixsize - - Um valor inteiro especificando o número mínimo de - caracteres que devem ser fornecidos ao procurar curingas - no repositório. Valores maiores serão mais eficientes , - mas também mais restritivos para os usuários. O padrão é - 2. Assim como o partitions, este valor só entra em vigor - quando um novo repositório da workunit do Cassandra é - criado. - + Um valor inteiro especificando o número + mínimo de caracteres que devem ser fornecidos ao procurar + curingas no repositório. Valores maiores serão mais + eficientes , mas também mais restritivos para os usuários. + O padrão é 2. Assim como o partitions, este valor só entra + em vigor quando um novo repositório da workunit do + Cassandra é criado. @@ -1011,8 +994,7 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi - - + @@ -1046,9 +1028,8 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Select All e depois pressione o botão OK . - - - + @@ -1065,17 +1046,14 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Esta seção descreve os atributos do Dafilesrv. - - - + - - - + @@ -1113,17 +1091,14 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Esta seção descreve os atributos do DfuServer. - - - + - - - + @@ -1131,17 +1106,14 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Esta seção descreve as opções DfuServer SSH. - - - + - - - + @@ -1161,9 +1133,8 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi componentes com objetivo de determinar os diretórios que eles usarão para várias funções. - - - + @@ -1171,7 +1142,7 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi - + Nome @@ -1179,11 +1150,11 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Descrição + + - - log - + log /var/log/[NAME]/[INST] @@ -1191,9 +1162,7 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi - - temp - + temp /var/lib/[NAME]/[INST]/temp @@ -1201,9 +1170,7 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi - - data - + data @@ -1211,9 +1178,7 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi - - data2 - + data2 @@ -1222,9 +1187,7 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi - - data3 - + data3 @@ -1232,9 +1195,7 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi - - mirror - + mirror @@ -1242,9 +1203,7 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi - - query - + query @@ -1270,8 +1229,7 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Adicionar uma Drop Zone: - - + Clique com o botão direito do mouse no painel Navigator, no lado esquerdo, e escolha New @@ -1282,8 +1240,7 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Selecione Drop Zone - - + Atributos da Drop Zone @@ -1295,8 +1252,7 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Para alterar os atributos da Drop Zone: - - + Na abaAttributes, selecione o atributo a ser modificado. @@ -1315,18 +1271,14 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Clique no ícone de disco para salvar. - - + - - - + - - - + - - - + @@ -1455,1665 +1405,1755 @@ sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/envi Esta seção descreve a aba EclAgent Options. - - - + - - - - + + Programas PIPE permitidos + + Na versão 9.2.0 e superior, os comandos usados numa ação + PIPE são restritos por padrão. No entanto, por razões de legado, + o comportamento padrão de estoque é diferente em implantações + bare-metal (em ambiente físico) e containerizadas. Em ambos os + tipos de sistemas, se allowedPipePrograms não estiver definido, + então todos os programas, exceto os "built-in", são restritos (O + único programa incorporado atualmente é o 'roxiepipe'). + + Em bare-metal, o environment.xml padrão inclui um valor de + configuração de "*" para allowedPipePrograms. Significa, por + padrão, que qualquer programa PIPE pode ser utilizado. - - Notas sobre EclAgent Process + + Em um sistema seguro, isso deve ser + removido ou editado para evitar que programas arbitrários, + incluindo programas de sistema, de serem + executados. + - Esta aba permite que você adicione notas pertinentes à - configuração do componente. Isso pode ser útil para manter um - registro de alterações e para comunicar essas informações aos - parceiros. - - + + + Notas sobre EclAgent Process + + Esta aba permite que você adicione notas pertinentes à + configuração do componente. Isso pode ser útil para manter um + registro de alterações e para comunicar essas informações aos + parceiros. + + ECL CC Server Process + + + Instâncias do Ecl CC Server + + + + + Selecione Ecl CC Server - myeclccserver no painel + Navigator no lado esquerdo. + + + + Selecione a aba Instances. + + + + Na coluna do computador, escolha um nó da lista + suspensa, conforme mostrado abaixo: + + + + Clique no ícone do disco para salvar + + + + + + + a aba Atributos do Ecl CC Server + + Esta seção descreve a aba Ecl CC Server + Attributes. + + + + + + + + + + + + + + Opções do EclCC Server Process + + Para adicionar uma opção personalizada, clique com o + botão direito e selecione adicionar. Essas opções são passadas + para o compilador eclcc. + + Veja o capítulo Compilador ECL no manual de Ferramentas do Cliente para + detalhes. + + + + + + + + Notas sobre EclCC Server Process + + Esta aba permite que você adicione notas pertinentes à + configuração do componente. Isso pode ser útil para manter um + registro de alterações e para comunicar essas informações aos + parceiros. + + + ECL Scheduler + + + Instâncias + + + + Selecione ECL + Scheduler no painel Navigator ao lado + esquerdo. + - - ECL CC Server Process + + Selecione a aba Instances. + - - Instâncias do Ecl CC Server + + Na coluna do computador, escolha um nó da lista + suspensa, conforme mostrado abaixo: + - - - - Selecione Ecl CC Server - myeclccserver no painel - Navigator no lado esquerdo. - + + Clique no ícone do disco para salvar a + aba + + + - - Selecione a aba Instances. - + + EclScheduler Attributes. - - Na coluna do computador, escolha um nó da lista - suspensa, conforme mostrado abaixo: - + Esta seção descreve a aba EclScheduler + Attributes. - - Clique no ícone do disco para salvar - - - - + + + - - a aba Atributos do Ecl CC Server + - Esta seção descreve a aba Ecl CC Server Attributes. + + + + - - - + + Notas sobre EclScheduler - + Esta aba permite que você adicione notas pertinentes à + configuração do componente. Isso pode ser útil para manter um + registro de alterações e para comunicar essas informações aos + parceiros. + + + ESP Server - - - - + + Instâncias do Esp Process - - Opções do EclCC Server Process + + + Selecione ESP - + myespno painel Navigator do lado + esquerdo. + - Para adicionar uma opção personalizada, clique com o botão - direito e selecione adicionar. Essas opções são passadas para o - compilador eclcc. + + Selecione a aba Instances. + - Veja o capítulo Compilador ECL no manual de Ferramentas do Cliente para - detalhes. + + Na coluna do computador, escolha um nó da lista + suspensa, conforme mostrado abaixo: + - + + Clique no ícone do disco para + salvar + + + - - + - - Notas sobre EclCC Server Process + - Esta aba permite que você adicione notas pertinentes à - configuração do componente. Isso pode ser útil para manter um - registro de alterações e para comunicar essas informações aos - parceiros. - - + + Esp - myesp Attributes - - ECL Scheduler + Esta seção descreve a aba Esp - myesp Attributes. - - Instâncias + + + - - - Selecione ECL Scheduler - no painel Navigator ao lado esquerdo. - + - - Selecione a aba Instances. - + + + - - Na coluna do computador, escolha um nó da lista suspensa, - conforme mostrado abaixo: - + - - Clique no ícone do disco para salvar a aba - - - + As conexões HTTP persistentes podem causar problemas de + compatibilidade com versões anteriores com clientes HTTP não + padrão. Você pode desativar esse recurso definindo + maxPersistentRequests como 0. + - - EclScheduler Attributes. + + Esp - myesp Service Bindings - Esta seção descreve a aba EclScheduler Attributes. + Esta seção descreve a aba Esp - myesp Service Bindings. + É necessários etapas adicionais para configurar as conexões do + serviço. - - - + + + - + Você deve primeiro adicionar as ligações de serviço na + primeira tabela (botão direito do mouse, Add). Em seguida, + você configuraria os atributos nas outras tabelas nessa aba. A + tabela a seguir descreve a tabela URL + Authentication - - - - + - - Notas sobre EclScheduler + + + - Esta aba permite que você adicione notas pertinentes à - configuração do componente. Isso pode ser útil para manter um - registro de alterações e para comunicar essas informações aos - parceiros. - - + - - ESP Server + As tabelas a seguir descrevem as ESPProcess Service + Bindings e funcionalidades de + autenticação. - - Instâncias do Esp Process + - - - Selecione ESP - myespno - painel Navigator do lado esquerdo. - + + + - - Selecione a aba Instances. - + + - - Na coluna do computador, escolha um nó da lista suspensa, - conforme mostrado abaixo: - + + - - Clique no - ícone do disco para salvar - - - + + - + - + + Para adicionar flags de acesso a + recursos a uma configuração preexistente do ECL + Watch: + - - Esp - myesp Attributes + Se você atualizar a plataforma, mas estiver usando uma + configuração preexistente, poderá encontrar uma situação em que + as flags de acesso de nível de recurso não sejam criadas + automaticamente. Flags ausentes podem negar acesso a usuários + que tentam acessar recursos no sistema. - Esta seção descreve a aba Esp - myesp Attributes. + + + No Configuration Manager, abra sua cópia do arquivo + environment.xml e habilite Write + Access. + - - - + + À esquerda, selecione o ESP que tá hosteando o serviço + ECL Watch. + - + + À direita, selecione a aba ESP Service + Bindings. - - - + Manualmente adicione a flad de acesso ao novo serviço + na tabela Feature Authentication. + - + + Clique com botão direito no tabela Feature + Authentication e selecione Add - As conexões HTTP persistentes podem causar problemas de - compatibilidade com versões anteriores com clientes HTTP não padrão. - Você pode desativar esse recurso definindo - maxPersistentRequests como 0. - + + + + - - Esp - myesp Service Bindings + + Providencie os valores para path e resource + (normalmente o mesmo valor, por exemplo, + WsStoreAccess). + - Esta seção descreve a aba Esp - myesp Service Bindings. É - necessários etapas adicionais para configurar as conexões do - serviço. + + Clique no ícone para salvar + - - - + + Copie o arquivo para cada nó, em seguida reinice o + sistema. - Você deve primeiro adicionar as ligações de serviço na - primeira tabela (botão direito do mouse, Add). Em seguida, você - configuraria os atributos nas outras tabelas nessa aba. A tabela a - seguir descreve a tabela URL - Authentication + As OUs devem ser criadas automaticamente no LDAP após + o reinicio. + + - + + Esp - myesp Authentication - - - + Esta seção descreve a aba Esp - myesp Service + Authentication - + + + - As tabelas a seguir descrevem as ESPProcess Service Bindings e - funcionalidades de - autenticação. + - + + + - - - + - - + Informações adicionais sobre os métodos de autenticação + disponíveis: - - + + + + - - + - + + + none - - Para adicionar flags de acesso a recursos a - uma configuração preexistente do ECL Watch: - + não utiliza autenticação + - Se você atualizar a plataforma, mas estiver usando uma - configuração preexistente, poderá encontrar uma situação em que as - flags de acesso de nível de recurso não sejam criadas automaticamente. - Flags ausentes podem negar acesso a usuários que tentam acessar - recursos no sistema. + + ldap - - - No Configuration Manager, abra sua cópia do arquivo - environment.xml e habilite Write - Access. - + usa o protocolo Lightweight Directory Access + para autenticação + - - À esquerda, selecione o ESP que tá hosteando o serviço ECL - Watch. - + + ldaps - - À direita, selecione a aba ESP Service - Bindings. + semelhante ao LDAP, mas usa um protocolo mais + seguro (TLS) + - Manualmente adicione a flad de acesso ao novo serviço na - tabela Feature Authentication. - + + secmgrPlugin - - Clique com botão direito no tabela Feature Authentication e - selecione Add + utilizado para configurar o Security Manager + Plugin + + + + + + - - - - + + Esp - AuthDomain - - Providencie os valores para path e resource (normalmente o - mesmo valor, por exemplo, WsStoreAccess). - + - - Clique no ícone para salvar - + O atributo AuthDomain + permite definir as configurações usadas para o gerenciamento + de sessões. - - Copie o arquivo para cada nó, em seguida reinice o - sistema. + - As OUs devem ser criadas automaticamente no LDAP após o - reinicio. - - + + + + + Esp - myesp HTTPS + + Esta seção descreve os atributos do Aba Esp - myesp + HTTPS. + + + + + + O atributo cipherList + permite que você defina a lista ordenada de cifras disponíveis + para uso pelo openssl. Veja a documentação em openssl.org para + mais informações sobre cifras. + + + + + + + + + + Notas sobre EspProcess + + Esta aba permite que você adicione notas pertinentes à + configuração do componente. Isso pode ser útil para manter um + registro de alterações e para comunicar essas informações aos + parceiros. + + + ESP Services + + O ESP Services fornecem um meio para adicionar + funcionalidade a um ESP Server. + + + ECL Watch Service + + O Ecl Watch permite que você configure opções para o + utilitário ECL Watch. + + + + + + + Definições do atributo ECL + Watch. + + + + + + + - - Esp - myesp Authentication + + Atributos do ECL Watch + Monitoring + + + - Esta seção descreve a aba Esp - myesp Service - Authentication + + + + + + Configurando a integração do ELK Log + Visualization + - - - + O HPCC Systems fornece um mecanismo para integrar + visualizações baseadas em ELK (ElasticSearch, Logstash e + Kibana) no ECL Watch. Esse recurso pode ser configurado e + ativado pelo gerenciador do HPCC Configuration Manager + + Para configurar o componente ELK Log Visualization, + clique e expanda o link ESP + Service no lado esquerdo e selecione o + link EclWatch . Em seguida, + selecione a aba Elk Log + Visualization no lado direito. + + A aba ELK Log Visualization no ECL Watch: + + + + + + Para configurar a integração de visualização ELK, + forneça as seguintes informações: + + No campo kibanaAddress, + forneça o endereço IP do seu componente Kibana. Por + exemplo: http://123.123.123.123 + + + + No campo kibanaPort, forneça + o número da porta Kibana. Por exemplo: 5601 + + + + No campo kibanaEntryPointURI, + insira o URI do Kibana apontando para o painel do Kibana + compartilhado. + + Por exemplo: /app/kibana#/dashboard/etc. (obtido + na aba “share" em Kibana) + + + + + + + + WsECL Service + + O serviço WsECL permite configurar opções para o + utilitário WsECL. + + + + + + Os atributos de configuração do Ws ECL. + + + + + + + + + + Opções dos atributos Ws ECL VIPS. + + + + + + + + Tabela de Restrições do Ws ECL Target + + + + + + + + + + Ws_Store + + Uma instância do ws_store está incluída no ECL Watch, + mas você pode optar por adicionar outro serviço ESP ws_store à + sua plataforma HPCC Systems. + + Para adicionar o serviço Ws_Store. + + + + Clique com o botão direito do mouse no componente + Software no painel + Navegador (no lado + direito), escolha New ESP Services + e, em seguida, ws_store na lista + suspensa. + + + + Configure os atributos ws_store conforme + necessário. + + + + Clique no ícone de disco para + salvar. + + Os seguintes valores são os atributos + configuráveis para ws_store: + + + + + + + + + + Ws_SQL + + Você pode optar por adicionar o serviço ESP ws_sql à sua + plataforma HPCC Systems. + + Para adicionar o serviço ws_sql. + + + + + Clique com o botão direito do mouse no componente + Software no painel + Navigator (no lado + direito), escolha New ESP Services + e, em seguida, ws_sql na lista suspensa. + + + + Configure o ws_sql fornecendo o nome do + serviço. + + + + Clique no ícone do disco para + salvar o + + + + + + FTSlave Process. + + FTSlave é um processo auxiliar que todo nó precisa. Esta + seção descreve uma instalação do FTSlave. + + + Instâncias + + + + Selecione ESP - MyEsp no painel do navegador ao lado + esquerdo. + - + + Selecione a aba Instances. + - - - + + - + Clique com o botão direito em uma máquina na coluna + do computador e selecione Add Instance. - Informações adicionais sobre os métodos de autenticação - disponíveis: + + + + - - - - + + - + Selecione todos os computadores da lista, depois + pressione o botão OK + . - - - none + + + + - não utiliza autenticação - + + Clique no ícone do disco para + salvar o + + + + + + FTSlave Process. + + Esta seção descreve a aba de atributos + FTSlaveProcess. + + + + + + + + + + Notas sobre FtSlave Process + + Esta aba permite que você adicione notas pertinentes à + configuração do componente. Isso pode ser útil para manter um + registro de alterações e para comunicar essas informações aos + parceiros. + + + LDAP Server Process + + Esta seção descreve os atributos de configuração de uma + instalação do LDAPServer no ConfigManager. Para obter uma + descrição completa de como adicionar autenticação LDAP, consulte + a seção Usando autenticação LDAP no + documento Instalando e executando a + plataforma HPCC Systems. - - ldap + + + - usa o protocolo Lightweight Directory Access para - autenticação - + - - ldaps + - semelhante ao LDAP, mas usa um protocolo mais - seguro (TLS) - + + LDAP Server Process Instances - - secmgrPlugin + Essa aba permite adicionar instâncias à sua configuração + LDAP. Para adicionar instâncias, você teria adicionado + anteriormente os computadores LDAP na seção Hardware. Para + obter uma descrição completa de como adicionar autenticação + LDAP, consulte a seção Usando autenticação LDAP no + documento Instalando e + executando a plataforma HPCC Systems. - utilizado para configurar o Security Manager - Plugin - - - - - - + + + Na aba Instances, + clique com o botão direito na tabela à direita e selecione + Add Instances... + - - Esp - AuthDomain + + Selecione o computador a ser usado clicando na caixa + ao lado dele. - + Este é o computador que foi adicionado anteriormente + na área Hardware / + Add New Computers. + + + + + + A conta do administrador do HPCC Systems + + Você pode configurar uma conta de administrador do HPCC + Systems com direitos administrativos limitados. O + administrador do HPCC Systems não precisa ser uma conta de + administrador LDAP. Isso permite que o administrador do HPCC + System possa configurar usuários, grupos e definir permissões + no HPCC Systems sem ter direitos para executar outras funções + administrativas do LDAP. Para usar este recurso: + + Crie um grupo LDAP para conter todos os usuários + do administrador do HPCC. Por exemplo: + "HPCCAdminGroup" + + + + No gerenciador de configuração HPCC Systems, + navegue até esta página (LDAP Server Process) e insira o + nome do grupo Administrador HPCC Systems como o valor no + campo adminGroupName. + + + + Adicione usuários (Administrador do HPCC Systems) + a este novo grupo. + + + + Salve e implemente o novo arquivo de configuração + e reinicie o ESP para aplicar a nova + configuração. + + + + + + Notas sobre LDAP Server Process + + Esta aba permite que você adicione notas pertinentes à + configuração do componente. Isso pode ser útil para manter um + registro de alterações e para comunicar essas informações aos + parceiros. + + + Sasha Server + + + Instâncias + + + + Selecione Sasha Server no menu do lado + esquerdo. + - O atributo AuthDomain permite - definir as configurações usadas para o gerenciamento de - sessões. + + Selecione a aba Instances. + - + + Na coluna do computador, escolha um nó da lista + suspensa, conforme mostrado abaixo: + + - - + + - - Esp - myesp HTTPS + + Atributos Sasha Server - Esta seção descreve os atributos do Aba Esp - myesp - HTTPS. + - - - + Esta seção descreve os valores da aba + SashaServerProcessAttribute. - O atributo cipherList permite - que você defina a lista ordenada de cifras disponíveis para uso pelo - openssl. Veja a documentação em openssl.org para mais informações - sobre cifras. + - + - - - - + + + + - - Notas sobre EspProcess + - Esta aba permite que você adicione notas pertinentes à - configuração do componente. Isso pode ser útil para manter um - registro de alterações e para comunicar essas informações aos - parceiros. - - + + SashaServer Process Archiver - - ESP Services + Esta seção descreve a aba SashaServer Process + Archiver - O ESP Services fornecem um meio para adicionar funcionalidade a - um ESP Server. + + + - - ECL Watch Service + - O Ecl Watch permite que você configure opções para o - utilitário ECL Watch. + + + - - - + + - - Definições do atributo ECL Watch. - + + SashaServer Process Coalescer - + Esta seção descreve a aba SashaServer Process + Coalescer - - - + + + - - Atributos do ECL Watch Monitoring - + - + + + + - - - + - - Configurando a integração do ELK Log - Visualization - + + SashaServer Process DfuXRef - O HPCC Systems fornece um mecanismo para integrar - visualizações baseadas em ELK (ElasticSearch, Logstash e Kibana) no - ECL Watch. Esse recurso pode ser configurado e ativado pelo - gerenciador do HPCC Configuration Manager + Esta seção descreve a aba SashaServer Process + DfuXref - Para configurar o componente ELK Log Visualization, clique e - expanda o link ESP Service no lado - esquerdo e selecione o link - EclWatch . Em seguida, selecione a aba Elk Log Visualization no lado direito. + + + - A aba ELK Log Visualization no ECL Watch: + - - - + + + + - Para configurar a integração de visualização ELK, forneça as - seguintes informações: - - No campo kibanaAddress, forneça o - endereço IP do seu componente Kibana. Por exemplo: - http://123.123.123.123 - + - - No campo kibanaPort, forneça o - número da porta Kibana. Por exemplo: 5601 - + + SashaServer Process DfuExpiry - - No campo kibanaEntryPointURI, - insira o URI do Kibana apontando para o painel do Kibana - compartilhado. + Esta seção descreve a aba SashaServer Process + DfuExpiry - Por exemplo: /app/kibana#/dashboard/etc. (obtido na aba - “share" em Kibana) - - + + + - - + - - WsECL Service + - O serviço WsECL permite configurar opções para o utilitário - WsECL. + + + + - - - + - Os atributos de configuração do Ws ECL. + + SashaServer Process ThorQMon - + Esta seção descreve a aba SashaServer Process + ThorQMon - - - + + + - + - Opções dos atributos Ws ECL VIPS. + + + + - + - - - + + SashaServer Process DaFileSrvMonitor - Tabela de Restrições do Ws ECL Target + Esta seção descreve a aba SashaServer Process + DaFileSrvMonitor - + + + - - - - + - - Ws_Store + + + + - Uma instância do ws_store está incluída no ECL Watch, mas você - pode optar por adicionar outro serviço ESP ws_store à sua plataforma - HPCC Systems. + + Notas sobre SashaServer Process - Para adicionar o serviço Ws_Store. + Esta aba permite que você adicione notas pertinentes à + configuração do componente. Isso pode ser útil para manter um + registro de alterações e para comunicar essas informações aos + parceiros. + + + Thor - - - Clique com o botão direito do mouse no componente - Software no painel Navegador (no lado direito), escolha - New ESP Services e, em - seguida, ws_store na lista - suspensa. - + Esta seção detalha como definir um cluster de refinaria de + dados (Thor). Antes de começar, você deve decidir a largura do + cluster (ou seja, quantos nós filhos você terá). - - Configure os atributos ws_store conforme - necessário. - + + + Selecione Thor Cluster - + mythor no painel Navigator no lado + esquerdo. + - - Clique no ícone de disco para salvar. - - Os seguintes valores são os atributos configuráveis - para ws_store: - - - - - - - - - - Ws_SQL - - Você pode optar por adicionar o serviço ESP ws_sql à sua - plataforma HPCC Systems. - - Para adicionar o serviço ws_sql. - - - - - Clique com o botão direito do mouse no componente - Software no painel Navigator (no lado direito), escolha - New ESP Services e, em - seguida, ws_sql na lista - suspensa. - - - - Configure o ws_sql fornecendo o nome do serviço. - - - - Clique no ícone do disco para salvar - o - - - - - - - - FTSlave Process. - - FTSlave é um processo auxiliar que todo nó precisa. Esta seção - descreve uma instalação do FTSlave. - - - Instâncias - - - - Selecione ESP - MyEsp no painel do navegador ao lado - esquerdo. - - - - Selecione a aba Instances. - - - - - - Clique com o botão direito em uma máquina na coluna do - computador e selecione Add Instance. - - - - - - - - - - Selecione todos os computadores da lista, depois pressione - o botão OK . - - - - - - - - Clique no - ícone do disco para salvar o - - - - - - FTSlave Process. - - Esta seção descreve a aba de atributos FTSlaveProcess. - - - - - - - - - - Notas sobre FtSlave Process - - Esta aba permite que você adicione notas pertinentes à - configuração do componente. Isso pode ser útil para manter um - registro de alterações e para comunicar essas informações aos - parceiros. - - - - - LDAP Server Process - - Esta seção descreve os atributos de configuração de uma - instalação do LDAPServer no ConfigManager. Para obter uma descrição - completa de como adicionar autenticação LDAP, consulte a seção - Usando autenticação LDAP no documento Instalando e executando a - plataforma HPCC Systems. - - - - - - - - - - - LDAP Server Process Instances - - Essa aba permite adicionar instâncias à sua configuração LDAP. - Para adicionar instâncias, você teria adicionado anteriormente os - computadores LDAP na seção Hardware. Para obter uma descrição - completa de como adicionar autenticação LDAP, consulte a seção - Usando autenticação LDAP no documento Instalando e executando a plataforma HPCC - Systems. - - - - Na aba Instances, clique - com o botão direito na tabela à direita e selecione Add Instances... - - - - Selecione o computador a ser usado clicando na caixa ao - lado dele. - - Este é o computador que foi adicionado anteriormente na - área Hardware / Add New Computers. - - - - - - A conta do administrador do HPCC Systems - - Você pode configurar uma conta de administrador do HPCC - Systems com direitos administrativos limitados. O administrador do - HPCC Systems não precisa ser uma conta de administrador LDAP. Isso - permite que o administrador do HPCC System possa configurar - usuários, grupos e definir permissões no HPCC Systems sem ter - direitos para executar outras funções administrativas do LDAP. Para - usar este recurso: - - Crie um grupo LDAP para conter todos os usuários do - administrador do HPCC. Por exemplo: "HPCCAdminGroup" - - - - No gerenciador de configuração HPCC Systems, navegue até - esta página (LDAP Server Process) e insira o nome do grupo - Administrador HPCC Systems como o valor no campo adminGroupName. - - - - Adicione usuários (Administrador do HPCC Systems) a este - novo grupo. - - - - Salve e implemente o novo arquivo de configuração e - reinicie o ESP para aplicar a nova configuração. - - - - - - Notas sobre LDAP Server Process - - Esta aba permite que você adicione notas pertinentes à - configuração do componente. Isso pode ser útil para manter um - registro de alterações e para comunicar essas informações aos - parceiros. - - - - - Sasha Server - - - Instâncias - - - - Selecione Sasha Server no menu do lado esquerdo. - - - - Selecione a aba Instances. - - - - Na coluna do computador, escolha um nó da lista suspensa, - conforme mostrado abaixo: - - - - - - - - Atributos Sasha Server - - - - Esta seção descreve os valores da aba - SashaServerProcessAttribute. - - - - - - - - - - - - - - SashaServer Process Archiver - - Esta seção descreve a aba SashaServer Process Archiver - - - - + + Selecione a aba + Topology . + - + + Expanda a Topology, se necessário, clique com o botão + direito do mouse no Master e selecione Delete. + + - - - + Isso exclui o Thor de um nó de amostra. - - + Você irá substituir isso por um cluster de vários + nós. - - SashaServer Process Coalescer + + + - Esta seção descreve a aba SashaServer Process Coalescer + Clique com o botão direito do mouse na Topologia e + selecione Add Master. - - - + + + + - + + - - - - + Selecione um computador na lista e pressione o botão + OK. - + + + + - - SashaServer Process DfuXRef + + - Esta seção descreve a aba SashaServer Process DfuXref + Clique com o botão direito no mestre e selecione Add + Slaves. - - - + + + + - + + - - - - + Selecione os computadores a serem usados como filhos + na lista e pressione o botão OK. Use CTRL+CLICK para + multi-seleção ou SHIFT+CLICK para selecionar um + intervalo. - + Os nós agora são exibidos abaixo do nó + do Thor Master. - - SashaServer Process DfuExpiry + + + + - Esta seção descreve a aba SashaServer Process DfuExpiry + + Selecione Thor Cluster - mythor no painel Navigator no + lado esquerdo. + - - - + + - + Selecione a aba Attributes. - + + + + - - - - + + É possível alterar o valor localThor para false + - + + Clique no ícone do disco para + salvar + + - - SashaServer Process ThorQMon + - Esta seção descreve a aba SashaServer Process ThorQMon + + a alteração da topologia do Thor - - - + Se você deseja designar um nó diferente como o mestre + Thor ao configurar um sistema de vários nós, siga estas + etapas. - + + + Selecione Thor Cluster - + mythor no painel do Navigator ao lado + esquerdo. + - - - - + + Selecione a aba + Topology . + - + + Clique com o botão direito do mouse no nó + mestre + - - SashaServer Process DaFileSrvMonitor + + + + Selecione a opção Replace + Master + + + + + + + + + + + + + + + + + + + + + Você só deve usar esse recurso quando + configurar inicialmente seu sistema. Se houver dados + nos nós ao tentar trocar o mestre, você corre o + risco de perder ou corromper alguns dados. + + + + + + + + + + + + + Atributos ThorCluster + + Esta seção descreve a aba Thor Cluster + Attributes. + + + + + + + Configurações de Memória do Thor + + Quando o globalMemorySize é deixado + indefinido, Thor [master] detecta a memória física total e + aloca 75% dela. Se houver múltiplos escravos por nó + (slavesPerNode>1), ele divide o total entre os escravos. + Se globalMemorySize for definido, ele aloca a quantidade de + memória para cada slave. O atributo masterMemorySize aloca + memória para o mestre Thor. Se omitido, o Thor master usa + globalMemorySize ou o padrão 75% de memória. + + Em sistemas com muita memória, o padrão de 75% da + memória física é provavelmente muito conservador e reservar + o total físico menos 2GB (para o SO e outros processos) é + sensato. Você deve então dividir esse número pelo número de + slavesPerNode. + + Se houver vários Thors compartilhando os mesmos nós, o + globalMemorySize deverá ser configurado para levar isso em + conta. + + Por exemplo, se houver 2 Thors, cada um com 2 escravos + por caixa, isso significará que há 4 escravos por nó físico. + Portanto, você deve usar uma fórmula semelhante à seguinte + em seus cálculos ao configurar globalMemorySize: + + globalMemorySize = (total-physical-memory)-2GB / (2*2) + + Sem nenhuma configuração especificada, o Thor assume + que tem acesso exclusivo à memória e, portanto, usaria muito + (porque cada Thor não está ciente da configuração e uso de + memória do outro). + + Se localThor estiver + configurado como true e masterMemorySize e globalMemorySize não forem + especificados, os padrões serão 50% para globalMemorySize (dividido por + slavesPerNode) e 25% para + masterMemorySize. + + Embora uma configuração possa ser definida usando + limites de memória superiores que excedam a memória física + total, o Thor não reservará a memória com antecedência. Isso + somente ocorrerá em casos de problemas de memória quando e + se seus jobs usarem toda a memória. Portanto, por exemplo, + dois Thors configurados para usar toda a memória disponível + podem coexistir pacificamente até que uma consulta em cada + um esteja usando simultaneamente mais memória do que o nó + disponível. + + + + + + + + + + + Programas PIPE permitidos + + Na versão 9.2.0 e superior, os comandos usados numa ação + PIPE são restritos por padrão. No entanto, por razões de + legado, o comportamento padrão de estoque é diferente em + implantações bare-metal (em ambiente físico) e + containerizadas. Em ambos os tipos de sistemas, se + allowedPipePrograms não estiver definido, então todos os + programas, exceto os "built-in", são restritos (O único + programa incorporado atualmente é o 'roxiepipe'). + + Em bare-metal, o environment.xml padrão inclui um valor + de configuração de "*" para allowedPipePrograms. Significa, por + padrão, que qualquer programa PIPE pode ser utilizado. + + + Em um sistema seguro, isso deve ser + removido ou editado para evitar que programas arbitrários, + incluindo programas de sistema, de serem + executados. + + + + - Esta seção descreve a aba SashaServer Process - DaFileSrvMonitor + - - - + + Opções do SSH ThorCluster - + Esta seção descreve a aba ThorCluster SSH + Options. - - - - + + + - - Notas sobre SashaServer Process + - Esta aba permite que você adicione notas pertinentes à - configuração do componente. Isso pode ser útil para manter um - registro de alterações e para comunicar essas informações aos - parceiros. - - + + + + - - Thor + + Depuração do ThorCluster - Esta seção detalha como definir um cluster de refinaria de dados - (Thor). Antes de começar, você deve decidir a largura do cluster (ou - seja, quantos nós filhos você terá). + A aba de depuração é somente para uso interno. + - - - Selecione Thor Cluster - - mythor no painel Navigator no lado esquerdo. - + - - Selecione a aba Topology - . - + + Nó Swap ThorCluster - - Expanda a Topology, se necessário, clique com o botão - direito do mouse no Master e selecione Delete. - - + Esta seção descreve a aba ThorCluster Swap Node - Isso exclui o Thor de um nó de amostra. + + + - Você irá substituir isso por um cluster de vários nós. + - - - + + + + - Clique com o botão direito do mouse na Topologia e selecione - Add Master. + - - - - + + Notas sobre ThorCluster - - + Esta aba permite que você adicione notas pertinentes à + configuração do componente. Isso pode ser útil para manter um + registro de alterações e para comunicar essas informações aos + parceiros. + + + Roxie - Selecione um computador na lista e pressione o botão - OK. + Esta seção detalha como definir um cluster do Mecanismo de + Entrega Rápida de Dados (Rapid Data Delivery Engine - Roxie). + Antes de começar, você deve decidir a largura do cluster (ou + seja, quantos nós de agente você terá). - - - - + - - + + + Selecione Roxie + Cluster no painel do navegador ao lado + esquerdo. + + Observação: Se você + não especificou um valor no campo Number of nodes + for Roxie cluster (Número de nós para o cluster Roxie) + ao configurar seu ambiente pela primeira vez, + você não terá um Roxie Cluster. Para adicionar um componente + Roxie Cluster: Clique com o botão direito do mouse no + componente Software no + Painel Navigator, selecione New + Components e depois roxie nas listas suspensas. + - Clique com o botão direito no mestre e selecione Add - Slaves. + + Selecione a aba Servidores . + - - - - + + - - + Clique com o botão direito do mouse nos Servidores + Roxie e selecione Reconfigure + Servers. - Selecione os computadores a serem usados como filhos na - lista e pressione o botão OK. Use CTRL+CLICK para multi-seleção ou - SHIFT+CLICK para selecionar um intervalo. + + + + - Os nós agora são exibidos abaixo do nó do - Thor Master. + + - - - - + Selecione os computadores a serem usados como + servidores na lista e pressione o botão OK. - - Selecione Thor Cluster - mythor no painel Navigator no lado - esquerdo. - + + + + - - + + Selecione a aba + Redundancy . + - Selecione a aba Attributes. + + - - - - + Selecione o esquema de redundância a ser usado. + Normalmente, isso é redundância cíclica, conforme mostrado + abaixo. - - É possível alterar o valor localThor para false - + + + + - - Clique no - ícone do disco para salvar - - + + Clique no ícone do disco para + salvar + - + + Feche o Gerenciador de Configuração, pressionando + ctrl+C na janela de comando em que está sendo + executado. + + + + + Atributos de Configuração do Roxie + + O Roxie possui muitos atributos configuráveis que podem + ser usados para personalizar e ajustar suas necessidades + específicas. A seção a seguir se expande em cada uma das guias + Roxie e nos atributos disponíveis. Há informações adicionais + sobre a configuração do Roxie na seção imediatamente após + essas tabelas. + + + + + + + <sect3> + <title>Itens adicionais de Configuração Roxie + + + + + Conexões Persistentes para o + Roxie + + + As conexões persistentes podem causar problemas de + compatibilidade com versões anteriores com clientes HTTP não + padrão. Você pode desativar esse recurso definindo + maxHttpConnectionRequests como 0 ou + 1. + + Além disso, conexões persistentes podem afetar o + balanceamento de carga do Roxie. Se isso for uma preocupação, + você poderá ajustar os valores + maxHttpConnectionRequests e + maxHttpKeepAliveWait para otimizar por + quanto tempo todas as transações de uma conexão persistente + específica vão para um único nó, em vez de serem distribuídas + pelo cluster. + + + Adicionar servidores ao Roxie + Farm + + + Para adicionar servidores ao Roxie Farm + + + + Selecione o Roxie Cluster - + myroxie (padrão) na janela Navigator no lado + esquerdo. + + + + Selecione a aba Servidores . + + + + Clique com o botão direito do mouse em Roxie Servers. + + + + Selecione Reconfigure + Servers. + + + + Pressione o botão + Add Hardware. + + + + Digite os valores para os novo(s) servidore(s) na + caixa de diálogo e pressione OK. + + Todos os servidores configurados são usados + quando você cria uma porta para escutar. + + + + OBSERVAÇÃO + + + Se estiver trabalhando com um arquivo de ambiente + antigo, esse processo foi alterado. Você não precisa + mais especificar para um servidor usar uma porta + específica. + + + + + + + Redundância + + O Roxie pode ser configurado para utilizar alguns + modelos de redundância diferentes. + + Simple Redundancy - um canal por agente. Mais + comumente usado para um único nó Roxie. + + + + Full Redundancy - mais agente que o número de + canais. Vários agentes hospedam cada canal. + + + + Overloaded Redundancy - Existem vários canais por + agente. + + + + Cyclic Redundancy - Cada nó hospeda vários canais + em rotação. A configuração mais usada. + + + + + Topology + + Esta seção descreve a aba Topology. - - a alteração da topologia do Thor + + - Se você deseja designar um nó diferente como o mestre Thor ao - configurar um sistema de vários nós, siga estas etapas. + + + - - - Selecione Thor Cluster - - mythor no painel do Navigator ao lado - esquerdo. - + - - Selecione a aba Topology - . - + + + Nome do Atributo - - Clique com o botão direito do mouse no nó mestre - + Definição + - - + + + Topology + - Selecione a opção Replace - Master + descreve a topologia do sistema + - - - + + + Cluster - thor + - - - + descreve os clusters Thor + - - + + + Cluster - hthor + - + descreve os clusters de hThor + - - + Cluster - roxie - Você só deve usar esse recurso quando - configurar inicialmente seu sistema. Se houver dados - nos nós ao tentar trocar o mestre, você corre o risco - de perder ou corromper alguns dados. + descreve os clusters Roxie - - - - - - - - Atributos ThorCluster - - Esta seção descreve a aba Thor Cluster Attributes. - - - - - - - Configurações de Memória do Thor - - Quando o globalMemorySize é - deixado indefinido, Thor [master] detecta a memória física total e - aloca 75% dela. Se houver múltiplos escravos por nó - (slavesPerNode>1), ele divide o total entre os escravos. Se - globalMemorySize for definido, ele aloca a quantidade de memória - para cada slave. O atributo masterMemorySize aloca memória para o - mestre Thor. Se omitido, o Thor master usa globalMemorySize ou o - padrão 75% de memória. - - Em sistemas com muita memória, o padrão de 75% da memória - física é provavelmente muito conservador e reservar o total físico - menos 2GB (para o SO e outros processos) é sensato. Você deve - então dividir esse número pelo número de slavesPerNode. - - Se houver vários Thors compartilhando os mesmos nós, o - globalMemorySize deverá ser configurado para levar isso em - conta. - - Por exemplo, se houver 2 Thors, cada um com 2 escravos por - caixa, isso significará que há 4 escravos por nó físico. Portanto, - você deve usar uma fórmula semelhante à seguinte em seus cálculos - ao configurar globalMemorySize: - - globalMemorySize = (total-physical-memory)-2GB / (2*2) - - Sem nenhuma configuração especificada, o Thor assume que tem - acesso exclusivo à memória e, portanto, usaria muito (porque cada - Thor não está ciente da configuração e uso de memória do - outro). - - Se localThor estiver - configurado como true e masterMemorySize e globalMemorySize não forem especificados, - os padrões serão 50% para globalMemorySize (dividido por slavesPerNode) e 25% para masterMemorySize. - - Embora uma configuração possa ser definida usando limites de - memória superiores que excedam a memória física total, o Thor não - reservará a memória com antecedência. Isso somente ocorrerá em - casos de problemas de memória quando e se seus jobs usarem toda a - memória. Portanto, por exemplo, dois Thors configurados para usar - toda a memória disponível podem coexistir pacificamente até que - uma consulta em cada um esteja usando simultaneamente mais memória - do que o nó disponível. - - - - - - - - - - - - - - - Opções do SSH ThorCluster - - Esta seção descreve a aba ThorCluster SSH Options. - - - - - - - - - - - - - - Depuração do ThorCluster + + Topo Server - A aba de depuração é somente para uso interno. - - - - - - Nó Swap ThorCluster - - Esta seção descreve a aba ThorCluster Swap Node - - - - - - - - - - - - - - - - Notas sobre ThorCluster - - Esta aba permite que você adicione notas pertinentes à - configuração do componente. Isso pode ser útil para manter um - registro de alterações e para comunicar essas informações aos - parceiros. - - - - - Roxie - - Esta seção detalha como definir um cluster do Mecanismo de - Entrega Rápida de Dados (Rapid Data Delivery Engine - Roxie). Antes de - começar, você deve decidir a largura do cluster (ou seja, quantos nós - de agente você terá). - - - - - - Selecione Roxie Cluster no - painel do navegador ao lado esquerdo. - - Observação: Se você não - especificou um valor no campo Number of nodes for Roxie - cluster (Número de nós para o cluster Roxie) ao - configurar seu ambiente pela primeira vez, você não terá um Roxie - Cluster. Para adicionar um componente Roxie Cluster: Clique com o - botão direito do mouse no componente Software no Painel Navigator, selecione - New Components e depois roxie nas listas suspensas. - - - - Selecione a aba Servidores - . - + Esse processo TopoServer é configurado através do servidor + Topo Server - mytoposerver do Configuration + Manager. Você define as instâncias e, em seguida, define os + atributos de configuração. - - - - Clique com o botão direito do mouse nos Servidores Roxie e - selecione Reconfigure Servers. - - - - - - - - - - Selecione os computadores a serem usados como servidores na - lista e pressione o botão OK. - - - - - - - - Selecione a aba Redundancy - . - - - - - - Selecione o esquema de redundância a ser usado. Normalmente, - isso é redundância cíclica, conforme mostrado abaixo. - - - - - - - - Clique no - ícone do disco para salvar - - - - Feche o Gerenciador de Configuração, pressionando ctrl+C na - janela de comando em que está sendo executado. - - - - - Atributos de Configuração do Roxie - - O Roxie possui muitos atributos configuráveis que podem ser - usados para personalizar e ajustar suas necessidades específicas. A - seção a seguir se expande em cada uma das guias Roxie e nos - atributos disponíveis. Há informações adicionais sobre a - configuração do Roxie na seção imediatamente após essas - tabelas. - - - - - - - - - - - - - <sect3> - <title>Itens adicionais de Configuração Roxie - - - - - Conexões Persistentes para o - Roxie - - - As conexões persistentes podem causar problemas de - compatibilidade com versões anteriores com clientes HTTP não padrão. - Você pode desativar esse recurso definindo - maxHttpConnectionRequests como 0 ou 1. - - Além disso, conexões persistentes podem afetar o balanceamento - de carga do Roxie. Se isso for uma preocupação, você poderá ajustar - os valores maxHttpConnectionRequests e - maxHttpKeepAliveWait para otimizar por quanto - tempo todas as transações de uma conexão persistente específica vão - para um único nó, em vez de serem distribuídas pelo cluster. - - - Adicionar servidores ao Roxie - Farm - - - Para adicionar servidores ao Roxie Farm - - - - Selecione o Roxie Cluster - - myroxie (padrão) na janela Navigator no lado - esquerdo. - - - - Selecione a aba Servidores . - - - - Clique com o botão direito do mouse em Roxie Servers. - - - - Selecione Reconfigure - Servers. - - - - Pressione o botão Add - Hardware. - - - - Digite os valores para os novo(s) servidore(s) na caixa - de diálogo e pressione OK. - - Todos os servidores configurados são usados quando - você cria uma porta para escutar. - - - - OBSERVAÇÃO - - - Se estiver trabalhando com um arquivo de ambiente - antigo, esse processo foi alterado. Você não precisa mais - especificar para um servidor usar uma porta específica. - - - - - - - Redundância - - O Roxie pode ser configurado para utilizar alguns modelos de - redundância diferentes. - - Simple Redundancy - um canal por agente. Mais comumente - usado para um único nó Roxie. - - - - Full Redundancy - mais agente que o número de canais. - Vários agentes hospedam cada canal. - - - - Overloaded Redundancy - Existem vários canais por - agente. - - - - Cyclic Redundancy - Cada nó hospeda vários canais em - rotação. A configuração mais usada. - - - - - - - Topology - - Esta seção descreve a aba Topology. - - - - - - - - - - - - - Nome do Atributo - - Definição - - - - - Topology - - - descreve a topologia do sistema - - - - - Cluster - thor - - - descreve os clusters Thor - - - - - Cluster - hthor - - - descreve os clusters de hThor - - - - - Cluster - roxie - - - descreve os clusters Roxie - - - - - - - - - Topo Server - - Esse processo TopoServer é configurado através do servidor - Topo Server - mytoposerver do Configuration - Manager. Você define as instâncias e, em seguida, define os atributos - de configuração. - - - - - - - - - - - - Distribuir alterações na configuração para todos os nós + + + - Após ter configurado seu ambiente da forma desejada, é preciso - copiar o arquivo de configuração para os demais nós. + - + + + Distribuir alterações na configuração para todos os + nós - - - Se o sistema estiver em execução, pare o sistema. + Após ter configurado seu ambiente da forma desejada, é + preciso copiar o arquivo de configuração para os demais + nós. - - - + - - - - - - - - - - - - Certifique-se de que o HPCC System não esteja em - execução antes de tentar copiar o arquivo - Environment.xml. - - - - - - + + + Se o sistema estiver em execução, pare o + sistema. + + + + + + + + + + + + + + + + + Certifique-se de que o HPCC System não esteja + em execução antes de tentar copiar o arquivo + Environment.xml. + + + + + + - - Salve o arquivo environment.xml em um backup. + + Salve o arquivo environment.xml em um backup. - # por exemplo + # por exemplo sudo -u hpcc cp /etc/HPCCSystems/environment.xml /etc/HPCCSystems/environment.bak - Observação: o arquivo environment.xml do ambiente em - execução está localizado em seu diretório /etc/HPCCSystems/. O Gerenciador de - Configurações funciona em arquivos no diretório /etc/HPCCSystems/source. É necessário - copiar deste local para criar um arquivo environment.xml - ativo. - + Observação: o arquivo environment.xml do ambiente em + execução está localizado em seu diretório /etc/HPCCSystems/. O Gerenciador de + Configurações funciona em arquivos no diretório /etc/HPCCSystems/source. É necessário + copiar deste local para criar um arquivo environment.xml + ativo. + - - Copie o novo arquivo NewEnvironment.xml do diretório de - origem para /etc/HPCCSystems e renomeie o arquivo para - environment.xml + + Copie o novo arquivo NewEnvironment.xml do diretório + de origem para /etc/HPCCSystems e renomeie o arquivo para + environment.xml - # por exemplo + # por exemplo sudo -u hpcc cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/environment.xml - - - - - Copie o /etc/HPCCSystems/environment.xml para o - /etc/HPCCSystems/ em cada nó. + + - - + + Copie o /etc/HPCCSystems/environment.xml para o + /etc/HPCCSystems/ em cada nó. + - - Inicie a plataforma do HPCC System - - Se preferir, use um script para automatizar essa etapa, - especialmente se você tiver muitos nós. Consulte a seção Scripts - de exemplo na seção Anexos do manual Como instalar e executar a - plataforma do HPCC System. É possível usar os scripts como um - modelo para criar seu próprio script e copiar o arquivo - environment.xml para todos os seus nós. - - + + Inicie a plataforma do HPCC System + + Se preferir, use um script para automatizar essa + etapa, especialmente se você tiver muitos nós. Consulte a + seção Scripts de exemplo na seção Anexos do manual Como + instalar e executar a plataforma do HPCC System. É possível + usar os scripts como um modelo para criar seu próprio script + e copiar o arquivo environment.xml para todos os seus + nós. + + + + diff --git a/docs/PT_BR/ContainerizedHPCC/ContainerizedHPCCSystemsPlatform.xml b/docs/PT_BR/ContainerizedHPCC/ContainerizedHPCCSystemsPlatform.xml index 546423e35f8..8f31dd9ff65 100644 --- a/docs/PT_BR/ContainerizedHPCC/ContainerizedHPCCSystemsPlatform.xml +++ b/docs/PT_BR/ContainerizedHPCC/ContainerizedHPCCSystemsPlatform.xml @@ -65,10 +65,11 @@ Visão geral do HPCC em contêineres - A partir da versão 8.0, a plataforma HPCC - Systems® focará em deploys em contêineres. Isso - é útil para implantações baseadas em nuvem (grandes ou pequenas) ou - implantações de teste/desenvolvimento locais. + Desde a versão 8.0, a Plataforma HPCC + Systems® começou a focar significativamente em + implantações containerizadas. Isso é útil para implantações baseadas em + nuvem (grandes ou pequenas) ou implantações de teste/desenvolvimento + local. Os contêineres do Docker gerenciados pelo Kubernetes (K8s) são um novo ambiente operacional de destino, juntamente com o suporte contínuo @@ -84,20 +85,19 @@ em como os componentes são configurados, como e quando eles iniciam e onde armazenam seus dados. - Este livro se concentra em implantações em contêineres. A primeira - seção é sobre o uso de contêineres Docker e gráficos Helm localmente. - Docker e Helm fazem muito do trabalho para você. A segunda parte usa as - mesmas técnicas na nuvem. + Este livro se concentra nessas implantações containerizadas. A + primeira seção é sobre o uso de contêineres Docker e gráficos Helm + localmente. Docker e Helm fazem muito do trabalho por você. A segunda + parte usa as mesmas técnicas na nuvem. Para pequenas implantações locais (para desenvolvimento e teste), sugerimos o uso de Docker Desktop e Helm. Isto é útil para aprendizagem, desenvolvimento e teste. - Para implantações em nuvem, você pode usar qualquer tipo de serviço - em nuvem, se for compatível com Docker, Kubernetes e Helm. Este livro, no - entanto, se concentrará no Microsoft Azure para serviços em nuvem. As - versões futuras podem incluir especificações para outros provedores de - nuvem. + Para implantações em nuvem, você pode utilizar qualquer tipo de + serviços de Cloud, desde que suporte Docker, Kubernetes e Helm. Este + livro, no entanto, vai focar nos Serviços de Nuvem da Microsoft + Azure. Se você deseja gerenciar manualmente sua implantação local ou na nuvem, ainda pode usar os instaladores tradicionais e o Configuration @@ -105,33 +105,17 @@ Helm fornecem, como instrumentação, monitoramento, escalonamento e custo ao controle. - Os sistemas HPCC seguem as convenções padrão sobre como as - implantações do Kubernetes são normalmente configuradas e gerenciadas, - portanto, deve ser fácil para alguém familiarizado com o Kubernetes e o - Helm instalar e gerenciar a plataforma HPCC Systems. - - - - Note: - - - A versão tradicional bare-metal da plataforma de sistemas HPCC - está madura e tem sido amplamente usada em aplicativos comerciais - por quase duas décadas e é totalmente destinada para uso em - produção. A versão em contêiner é nova e ainda não está 100% pronta - para produção. Além disso, alguns aspectos dessa versão podem ser - alterados sem aviso prévio. Nós encorajamos você a usá-lo e fornecer - feedback para que possamos tornar esta versão tão robusta quanto uma - instalação bare-metal. - - - + O HPCC Systems segue as convenções padrão sobre como as implantações + do Kubernetes são normalmente configuradas e gerenciadas, portanto, deve + ser fácil para alguém familiarizado com o Kubernetes e o Helm instalar e + gerenciar a plataforma HPCC Systems. Bare-metal vs Containers - Se você estiver familiarizado com a plataforma HPCC Systems, há - algumas mudanças fundamentais a serem observadas. + Se você está familiarizado com as implantações tradicionais da + plataforma HPCC Systems em bare-metal, há algumas mudanças fundamentais + a serem observadas. Processoss e pods, não máquinas diff --git a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml index bd6474cd740..157e6ac5528 100644 --- a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml +++ b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml @@ -917,6 +917,42 @@ incluindo a categoria "ecl", são lidas internamente pelos componentes do sistema e não expostas diretamente ao código ECL. + + + Manipulação de Recursos de Origem Cruzada + + O compartilhamento de recursos de origem cruzada (CORS) é um + mecanismo para integrar aplicativos em diferentes domínios. CORS + define como as aplicações web do cliente em um domínio podem interagir + com recursos em outro domínio. Você pode configurar as configurações + de suporte ao CORS na seção ESP do arquivo values.yaml, conforme + ilustrado abaixo: + + esp: +- name: eclwatch + application: eclwatch + auth: ldap + replicas: 1 + # The following 'corsAllowed' section is used to configure CORS support + # origin - the origin to support CORS requests from + # headers - the headers to allow for the given origin via CORS + # methods - the HTTP methods to allow for the given origin via CORS + # + corsAllowed: + # origin starting with https will only allow https CORS + - origin: https://*.example2.com + headers: + - "X-Custom-Header" + methods: + - "GET" + # origin starting with http will allow http or https CORS + - origin: http://www.example.com + headers: + - "*" + methods: + - "GET" + - "POST" + @@ -943,7 +979,7 @@ - Resources + Recursos A maioria dos componentes tem uma seção de recursos que define quantos recursos são atribuídos a esse componente. Nos arquivos de @@ -969,17 +1005,17 @@ manager, worker e eclagent têm requisitos de recursos diferentes. + - - Taints, Tolerations e Placements + + Valores do Ambiente + + Você pode definir variáveis de ambiente em um arquivo YAML. Os + valores do ambiente são definidos na seção + global.env do arquivo values.yaml fornecido + peloHPCC Systems. Esses valores são especificados como uma lista de + pares de nome-valor, conforme ilustrado abaixo. - Esta é uma consideração importante para sistemas em contêineres. - Taints e Tolerations são tipos de restrições de nó do Kubernetes - também referidas por Node Affinity. A - afinidade do nó é uma maneira de restringir os pods aos nós. Apenas - uma "afinidade" pode ser aplicada a um pod. Se um pod corresponder a - várias listas de "pods" de canais, somente a última definição de - "afinidade" será aplicada. Os taints e as tolerations trabalham juntos para garantir que os pods não sejam agendados em nós inadequados. As tolerâncias são @@ -1060,7 +1096,7 @@ posicionamentos para garantir que os pods com requisitos específicos sejam colocados nos nós apropriados. - + Environment Values @@ -1072,19 +1108,628 @@ conforme ilustrado abaixo. global: + + global: +bbe9bd8001 (HPCC-32050 -HPCC Portuguese language Update 9.6) env: - name: SMTPserver value: mysmtpserver - A seção global.env do arquivo values.yaml fornecido adiciona - variáveis de ambiente padrão para todos os componentes. Você também - pode especificar variáveis de ambiente para os componentes - individuais. Consulte o schema para definir esse valor para - componentes individuais. + A seção global.env do arquivo values.yaml fornecido adiciona + variáveis de ambiente padrão para todos os componentes. Você também pode + especificar variáveis de ambiente para os componentes individuais. + Consulte o esquema para definir este valor para componentes + individuais. + + Para adicionar valores de ambiente, você pode inseri-los em seu + arquivo YAML de configurações personalizadas ao implantar o HPCC Systems + conteinerizados. Esta solicitação é baseada na conversa anterior. + + + Variáveis de Ambiente para Sistemas Containerizados + + Existem várias configurações em environment.conf para sistemas + bare-metal. Embora muitas configurações de environment.conf não sejam + válidas para contêineres, algumas podem ser úteis. Em uma implantação + em nuvem, essas configurações são herdadas de variáveis de ambiente. + Essas variáveis de ambiente são configuráveis usando o arquivo yaml + values, seja globalmente ou no nível do componente. + + Algumas dessas variáveis estão disponíveis para implementações + em contêiner e na nuvem e podem ser definidas usando o chart Helm. Os + seguintes valores de environment.conf para bare-metal têm esses + valores equivalentes que podem ser definidos para instâncias + conteinerizadas. Esta solicitação é baseada na conversa + anterior. + + + + + + Valor + Environment.conf + + Variavel Helm + Environment + + + + skipPythonCleanup + + SKIP_PYTHON_CLEANUP + + + + jvmlibpath + + JAVA_LIBRARY_PATH + + + + jvmoptions + + JVM_OPTIONS + + + + classpath + + CLASSPATH + + + + + + O seguinte exemplo configura uma variavel de ambiente para pular + a limpeza do Python no componente Thor: + + thor: + env: + - name: SKIP_PYTHON_CLEANUP + value: true + + + + + Plano de Construção de Index + + Defina o valor indexBuildPlane como uma opção + de chart helm para permitir que os arquivos de índice sejam escritos por + padrão em um plano de dados diferente. Ao contrário de arquivos planos, + arquivos de índices têm requisitos diferentes. Os arquivos de índice se + beneficiam de armazenamento de acesso aleatório rápido. Normalmente, + arquivos planos e arquivos de índice são resultantes para os planos de + dados padrão definidos. Usando esta opção, você pode definir que os + arquivos de índice são construídos em um plano de dados separado de + outros arquivos comuns. Este valor de chart pode ser fornecido em um + nível de componente ou global. - Para adicionar valores de ambiente, você pode inseri-los em seu - arquivo YAML de configuração de personalização ao implantar seu HPCC - Systems em contêineres. + Por exemplo, adicionando o valor a um nível global sob + global.storage: + + global: + storage: + indexBuildPlane: myindexplane + + Opcionalmente, você pode adicioná-lo ao nível do componente, da + seguinte forma: + + thor: +- name: thor + prefix: thor + numWorkers: 2 + maxJobs: 4 + maxGraphs: 2 + indexBuildPlane: myindexplane + + Quando este valor é definido no nível do componente, ele sobrepõe + o valor definido no nível global. + + + + + Pods e Nós + + Uma das principais características do Kubernetes é sua capacidade de + agendar pods nos nós do cluster. Um pod é a menor e mais simples unidade + no ambiente do Kubernetes que você pode criar ou implantar. Um nó é uma + máquina "trabalhadora" física ou virtual no Kubernetes. + + A tarefa de agendar pods para nós específicos do cluster é + gerenciada pelo kube-scheduler. O comportamento padrão desse componente é + filtrar os nós com base nas solicitações de recursos e limites de cada + contêiner no pod criado. Nós viáveis são então pontuados para encontrar o + melhor candidato para o posicionamento do pod. O agendador também leva em + conta outros fatores como afinidade e anti-afinidade de pods, taints e + tolerations, restrições de distribuição de topologia de pod e os rótulos + do seletor de nó. O agendador pode ser configurado para usar esses + algoritmos e políticas diferentes para otimizar o posicionamento do pod de + acordo com as necessidades do seu cluster. + + Você pode implantar esses valores usando o arquivo values.yaml ou + pode colocá-los em um arquivo e fazer com que o Kubernetes leia os valores + do arquivo fornecido. Consulte a seção acima Técnicas de + Personalização para obter mais informações sobre como + personalizar sua implantação. + + + Placements + + O termo "Placements" é usado pelo HPCC Systems, ao qual o + Kubernetes se refere como scheduler ou agendamento/distribuição. Para + evitar confusão com os termos específicos do scheduler da HPCC Systems e + ECL, referenciaremos o agendamento do Kubernetes como colocações. As + colocações são um valor na configuração do HPCC Systems que está em um + nível acima dos itens, como o nodeSelector, Toleration, Affinity e + Anti-Affinity e TopologySpreadConstraints. + + O placements é responsável por encontrar o melhor nó para um pod. + Na maioria das vezes, o agendamento é realizado automaticamente pelo + Kubernetes. Você pode restringir um Pod para que ele possa funcionar + apenas em um conjunto específico de Nós. + + Os placements, então, seriam usados para garantir que pods ou jobs + que desejam nós com características específicas sejam colocados nesses + nós. + + Por exemplo, um cluster Thor poderia ser direcionado para machine + learning usando nós com GPU. Outro job pode querer nós com boa + quantidade de memória ou outro para mais CPU. + + Usando placements, você pode configurar o agendador do Kubernetes + para usar uma lista de "pods" para aplicar as configurações aos + pods. + + Por exemplo: + + placements: + - pods: [list] + placement: + <supported configurations> + + + Escopo do Placement + + Use padrões de pods para aplicar o escopo para os + placements. + + Os pods: [list] podem conter o seguinte: + + + + + + + + + + Type: <component> + + Cobre todos os pods/trabalhos sob este tipo de + componente. Isso é comumente utilizado para os componentes do + HPCC Systems. Por exemplo, o type:thor + que se aplicará a qualquer componente do tipo Thor; + thoragent, thormanager, thoragent e thorworker, etc. + + + + Target: <name> + + O campo "name" de cada componente, uso típico para + componentes do HPCC Systems refere-se ao nome do cluster. Por + exemplo, Roxie: -name: roxie que será o + destinoalvo "Roxie" (cluster). Você também pode definir vários + alvos, cada um com um nome único, como "roxie", "roxie2", + "roxie-web", etc. + + + + Pod: <name> + + Este é o nome dos metadados de "Implantação" a partir + do nome do item de array de um tipo. Por exemplo, "eclwatch-", + "mydali-", "thor-thoragent", o uso de uma expressão regular é + preferido, pois o Kubernetes usará o nome dos metadados como + prefixo e gerará dinamicamente o nome do pod, como, + eclwatch-7f4dd4dd44cb-c0w3x. + + + + Job name: + + O nome do job é geralmente também uma expressão + regular, já que o nome do job é gerado dinamicamente. Por + exemplo, um job de compilação compile-54eB67e567e, pode usar + "compile-" ou "compile-.*" ou "^compile-.*$" + + + + All: + + Aplica para todos os componentes do HPCC Systems. O + padrão de implantação dos placements para pods é [all] + + + + + + Independentemente da ordem em que os placements aparecem na + configuração, eles serão processadas na seguinte ordem: "all", "type", + "target", e então "pod"/"job". + + + Combinações mistas + + NodeSelector, taints e tolerations, e outros valores pode ser + colocado nos mesmos pods: [list] ambos por zona e por nós no Azure + placements: +- pods: ["eclwatch","roxie-workunit","^compile-.*$","mydali"] + placement: + nodeSelector: + name: npone + + + + + + Node Selection + + In a Kubernetes container environment, there are several ways to + schedule your nodes. The recommended approaches all use label selectors + to facilitate the selection. Generally, you may not need to set such + constraints; as the scheduler usually does reasonably acceptable + placement automatically. However, with some deployments you may want + more control over specific pods. + + Kubernetes uses the following methods to choose where to schedule + pods: + + + + nodeSelector field matching against node labels + + + + Affinity and anti-affinity + + + + Taints and Tolerations + + + + nodeName field + + + + Pod topology spread constraints + + + + + Node Labels + + Kubernetes nodes have labels. Kubernetes has a standard set of + labels used for nodes in a cluster. You can also manually attach + labels which is recommended as the value of these labels is + cloud-provider specific and not guaranteed to be reliable. + + Adding labels to nodes allows you to schedule pods to nodes or + groups of nodes. You can then use this functionality to ensure that + specific pods only run on nodes with certain properties. + + + + The nodeSelector + + The nodeSelector is a field in the Pod specification that allows + you to specify a set of node labels that must be present on the target + node for the Pod to be scheduled there. It is the simplest form of + node selection constraint. It selects nodes based on the labels, but + it has some limitations. It only supports one label key and one label + value. If you wanted to match multiple labels or use more complex + expressions, you need to use node Affinity. + + Add the nodeSelector field to your pod specification and specify + the node labels you want the target node to have. You must have the + node labels defined in the job and pod. Then you need to specify each + node group the node label to use. Kubernetes only schedules the pod + onto nodes that have the labels you specify. + + The following example shows the nodeSelector placed in the pods + list. This example schedules "all" HPCC components to use the node + pool with the label group: "hpcc". + + placements: + - pods: ["all"] + placement: + nodeSelector: + group: "hpcc" + + Note: The label group:hpcc + matches the node pool label:hpcc. + + This next example shows how to configure a node pool to prevent + scheduling a Dali component onto this node pool labelled with the key + spot: via the value false. As this kind of node is not always + available and could get revoked therefore you would not want to use + the spot node pool for Dali components. This is an example for how to + configure a specific type (Dali) of HPCC Systems component not to use + a particular node pool. + + placements: + - pods: ["type:dali"] + placement: + nodeSelector: + spot: "false" + + Quando se usa nodeSelector, vários nodeSelectors podem ser + aplicados. Se chaves duplicadas forem definidas, apenas a última + prevalece. + + + + Taints e Tolerations + + Taints e Tolerations são tipos de restrições de nodes do + Kubernetes também mencionadas como afinidade de nós. Apenas uma + afinidade pode ser aplicada em um pod. Se um pod combinar com várias + listas de 'pods' de placements, então apenas a última definição de + afinidade será aplicada. + + Taints e tolerations trabalham juntos para garantir que os pods + não sejam agendados em nós inadequados. Tolerations são aplicadas aos + pods e permitem (mas não exigem) que os pods sejam agendados em nós + com taints correspondentes. Taints são o oposto - eles permitem que um + nó repila um conjunto de pods. Uma maneira de implantar usando taints, + é configurar para repelir todos, exceto um nó específico. Então, esse + pod pode ser agendado em outro nó que é tolerante. + + Por exemplo, os jobs Thor devem estar todos no tipo apropriado + da VM. Se um job grande Thor vier – então o nível de taints repele + todos os pods que tentam ser agendados em um nó que não atende aos + requisitos. + + Para mais informações e exemplos de nossos Taints, Tolerations e + Placements, por favor, consulte nossa documentação de + desenvolvedor: + + https://github.com/hpcc-systems/HPCC-Platform/blob/master/helm/hpcc/docs/placements.md + + + Exemplos de Taints e Tolerations + + Os exemplos a seguir ilustram como algumas taints e + tolerations podem ser aplicadas. + + O Kubernetes pode agendar um pod em qualquer pool de nós sem + uma taint. Nos exemplos a seguir, o Kubernetes só pode agendar os + dois componentes nas pools de nós com esses lables exatos, grupo e + gpu. + + placements: + - pods: ["all"] + tolerations: + key: "group" + operator: "Equal" + value: "hpcc" + effect: "NoSchedule" + + placements: + - pods: ["type:thor"] + tolerations: + key: "gpu" + operator: "Equal" + value: "true" + effect: "NoSchedule" + + Várias tolerações também podem ser usadas. O exemplo a seguir + possui dois tolerations, grupo e GPU. + + #The settings will be applied to all thor pods/jobs and myeclccserver pod and job +- pods: ["thorworker-", "thor-thoragent", "thormanager-","thor-eclagent","hthor-"] + placement: + nodeSelector: + app: tf-gpu + tolerations: + - key: "group" + operator: "Equal" + value: "hpcc" + effect: "NoSchedule" + - key: "gpu" + operator: "Equal" + value: "true" + effect: "NoSchedule" + + + Neste exemplo, o nodeSelector está impedindo o agendador do + Kubernetes de implementar qualquer/para todos nesta pool de nodes. + Sem taints, o agendador poderia implementar em qualquer pod na pool + de nodes. Utilizando o nodeSelector, a taint forçará o pod a ser + implementado apenas nos pods que correspondem a esse rótulo de node. + Existem duas restrições então, neste exemplo uma da pool de nós e a + outra do pod. + + + + + Restrições de Espalhamento de Topologia + + Você pode usar as restrições de distribuição de topologia para + controlar como os pods são distribuídos pelo seu cluster entre + domínios de falha, como regiões, zonas, nós e outros domínios de + topologia definidos pelo usuário. Isso pode ajudar a alcançar alta + disponibilidade, bem como a utilização eficiente dos recursos. Você + pode definir restrições ao nível do cluster como padrão, ou configurar + restrições de espalhamento de topologia para cargas de trabalho + individuais. As Restrições de Espalhamento de Topologia topologySpreadConstraints requer Kubernetes + v1.19+.ou maior. + + Para mais informações veja: + + https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + and + + https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + + Usando o exemplo de "topologySpreadConstraints", existem dois + agrupamentos de nós que têm "hpcc=nodepool1" e "hpcc=nodepool2" + respectivamente. Os pods Roxie serão agendados uniformemente nos dois + agrupamentos de nós. + + Após a implementação, você pode verificar emitindo o seguinte + comando: + + kubectl get pod -o wide | grep roxie + + O código substituto: + + - pods: ["type:roxie"] + placement: + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: hpcc + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + roxie-cluster: "roxie" + + + + Afinidade (Affinity) e Anti-Afinidade (Anti-Affinity)` + + A afinidade e a anti-afinidade expandem os tipos de restrições + que você pode definir. As regras de afinidade e anti-afinidade ainda + são baseadas nas labels. Além das labels, eles fornecem regras que + orientam o agendador do Kubernetes aonde colocar os pods com base em + critérios específicos. A linguagem de afinidade/anti-afinidade é mais + expressiva do que labels simples e oferece mais controle sobre a + lógica de seleção. + + Há dois tipos principais de afinidade. Afinidade de Nó (Node + Affinity) e Afinidade de Pod (Pod Affinity). + + + Node Affinity + + A afinidade de nós é semelhante ao conceito de seletor de nós, + que permite limitar em quais nós o seu pod pode ser agendado, com + base nas labels desses nós. Estes são usados para limitar os nós que + podem receber um pod, correspondendo às labels desses nós. A + afinidade de nós e a anti-afinidade de nós só podem ser usadas para + estabelecer afinidades positivas que atraem os pods para o nó. Isto + é realizado ao limitar os nós que podem receber um pod, + correspondendo às labels desses nós. A afinidade de nós e a + anti-afinidade de nós só podem ser usadas para estabelecer + afinidades positivas que atraem os pods para o nó. + + Não existe uma verificação de esquema para o conteúdo da + afinidade. Apenas uma afinidade pode ser aplicada a um pod ou job. + Se um pod/job corresponder a várias listas de pods de posição, então + apenas a última definição de afinidade será aplicada. Esta + solicitação foi feita com base no histórico de conversas + anteriores. + + Para mais informações, veja https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + + Existe dois tipos de afinidades de nó: + + requiredDuringSchedulingIgnoredDuringExecution: + O scheduler não consegue marcar o pod a menos que esta regra seja + cumprida. Esta função é semelhante ao nodeSelector, mas com uma + sintaxe mais expressiva. + + preferredDuringSchedulingIgnoredDuringExecution: + O scheduler tenta encontrar um nó que cumpra a regra. Se um nó + correspondente não estiver disponível, o scheduler ainda agenda o + pod. + + Você pode especificar as afiidades do nós usando o campo + .spec.affinity.nodeAffinity na especificação no + seu pod. + + + + Pod Affinity + + O pod Affinity ou Inter-Pod Affinity é usada para limitar os + nós que podem receber um pod, de acordo com as labels dos pods já em + execução nesses nós. A afinidade de pod e a anti-afinidade de pod + podem ser tanto uma afinidade atraente quanto uma repulsa à + afinidade. + + A Inter-Pod Affinity funciona de maneira muito semelhante à + afinidade de nó, mas com algumas diferenças importantes. Os modos + "hard" e "soft" são indicados usando os mesmos campos + requiredDuringSchedulingIgnoredDuringExecution + e + preferredDuringSchedulingIgnoredDuringExecution. + No entanto, estes deveriam estar aninhados sob os campos + spec.affinity.podAffinity ou + spec.affinity.podAntiAffinity dependendo de se + você deseja aumentar ou diminuir a afinidade do Pod. + + + + Exemplo Affinity + + O código a seguir ilustra o exemplo de affinity: + + - pods: ["thorworker-.*"] + placement: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/e2e-az-name + operator: In + values: + - e2e-az1 + - e2e-az2 + + Na seção 'schedulerName' a seguir, as configurações de + 'afinnity' também podem ser incluídas com este exemplo. + + Nota: O valor "affinity" no + campo "schedulerName" é suportado apenas nas versões beta do + Kubernetes 1.20.0 e posteriores. + + + + + schedulerName + + O campo schedulerName + especifica o nome do agendador responsável por agendar um pod ou uma + job. No Kubernetes, você pode configurar vários agendadores com + diferentes nomes e perfis para rodar simultaneamente no + cluster. + + Apenas um "schedulerName" pode ser aplicado a qualquer + pod/job. + + Um exemplo de schedulerName: + + - pods: ["target:roxie"] + placement: + schedulerName: "my-scheduler" +#The settings will be applied to all thor pods/jobs and myeclccserver pod and job +- pods: ["target:myeclccserver", "type:thor"] + placement: + nodeSelector: + app: "tf-gpu" + tolerations: + - key: "gpu" + operator: "Equal" + value: "true" + effect: "NoSchedule" + @@ -1358,4 +2003,4 @@ global: especificado. - + \ No newline at end of file diff --git a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ContainerLogging.xml b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ContainerLogging.xml index 1491815ceda..3fabee378c6 100644 --- a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ContainerLogging.xml +++ b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ContainerLogging.xml @@ -2,31 +2,31 @@ - Logging em contêiner + Registro em Contêiner - Histórico de Logging + Contexto de Registro - Os logs de componentes do Bare-metal HPCC Systems são gravados em + Os logs de componentes do HPCC Systems Bare-metal são escritos em arquivos persistentes no sistema de arquivos local. Em contraste, os logs - HPCC em contêiner são efêmeros e sua localização nem sempre é bem - definida. Os componentes do HPCC Systems fornecem logs informativos no - nível do aplicativo para fins de depuração de problemas, ações de - auditoria e monitoramento do progresso. - - Seguindo as metodologias em contêiner mais amplamente aceitas, as - informações de log de componentes do HPCC Systems são roteadas para os - fluxos de saída padrão em vez de arquivos locais. Em implantações em - contêiner, não há logs de componentes gravados em arquivos como nas - edições anteriores. - - Esses logs são gravados no fluxo de erro padrão (stderr). No nível - do nó, o conteúdo do erro padrão e dos fluxos de saída são redirecionados - para um local de destino por um mecanismo de contêiner. Em um ambiente - Kubernetes, o mecanismo de contêiner do Docker redireciona os fluxos para - um driver de log, que o Kubernetes configura para gravar em um arquivo no - formato JSON. Os logs são expostos pelo Kubernetes por meio do comando - "logs" apropriadamente chamado. + do HPCC containerizados são efêmeros, e sua localização nem sempre é bem + definida. Os componentes do HPCC Systems fornecem logs de aplicação + informativos para o propósito de depuração de problemas, auditoria de + ações e monitoramento de progresso. + + Seguindo as metodologias containerizadas mais amplamente aceitas, as + informações de log dos componentes do HPCC Systems são direcionadas para + os fluxos de saída padrão, em vez de arquivos locais. Em implantações + containerizadas, não existem logs de componentes escritos em arquivos como + nas edições anteriores. + + Esses registros são escritos no fluxo de erro padrão (stderr). No + nível do nó, os conteúdos dos fluxos de erro padrão e saída são + redirecionados para um local alvo por um mecanismo de contêiner. Em um + ambiente Kubernetes, o mecanismo de contêiner Docker redireciona os fluxos + para um driver de log, que o Kubernetes configura para escrever em um + arquivo no formato JSON. Os registros são expostos pelo Kubernetes por + meio do comando apropriadamente chamado "logs" Por exemplo: @@ -35,131 +35,153 @@ >0000CF10 PRG INF 2020-05-12 17:10:34.911 1 10690 "GET /, from 10.240.0.4" >0000CF11 PRG INF 2020-05-12 17:10:34.911 1 10690 “TxSummary[activeReqs=22; rcv=5ms;total=6ms;]" - É importante entender que esses logs são de natureza efêmera e podem - ser perdidos se o pod for despejado, o contêiner travar, o nó morrer etc. - Além disso, devido à natureza das soluções em contêiner, os logs - relacionados provavelmente se originam de vários locais e pode precisar - ser coletado e processado. É altamente recomendável desenvolver uma + É importante entender que esses registros são efêmeros por natureza, + e podem ser perdidos se o pod for despejado, o contêiner travar, o nó + morrer, etc. Devido à natureza dos sistemas containerizados, é provável + que os registros relacionados se originem de vários locais e precisem ser + coletados e processados. É altamente recomendável desenvolver uma estratégia de retenção e processamento com base em suas necessidades. Muitas ferramentas estão disponíveis para ajudar a criar uma solução - apropriada com base em uma abordagem do tipo "faça você mesmo" ou em - recursos gerenciados disponíveis em provedores de nuvem. + apropriada com base em uma abordagem "faça você mesmo", ou recursos + gerenciados disponíveis de provedores de nuvem. Para os ambientes mais simples, pode ser aceitável confiar no processo padrão do Kubernetes que encaminha todo o conteúdo de - stdout/stderr para o arquivo. No entanto, à medida que a complexidade do - cluster aumenta ou a importância de reter o conteúdo dos logs aumenta, uma - arquitetura de log em nível de cluster deve ser empregada. - - O registro em nível de cluster para o cluster do HPCC Systems em - contêiner pode ser realizado incluindo um agente de registro em cada nó. A - tarefa de cada agente é expor os logs ou enviá-los por push para um - back-end de processamento de log. Os agentes de registro geralmente não - são fornecidos prontos para uso, mas há vários disponíveis, como o - Elasticsearch e o Stackdriver Logging. Vários provedores de nuvem oferecem - soluções integradas que coletam automaticamente todos os fluxos stdout/err - e fornecem armazenamento dinâmico e ferramentas analíticas poderosas, além + stdout/stderr para o arquivo. No entanto, conforme a complexidade do + cluster cresce ou a importância de reter o conteúdo dos registros cresce, + uma arquitetura de log de nível de cluster deve ser empregada. + + O registro de nível de cluster para o cluster do HPCC Systems em + contêiner pode ser realizado incluindo um agente de log em cada nó. A + tarefa de cada agente é expor os registros ou empurrá-los para um back-end + de processamento de registro. Os agentes de registro geralmente não são + fornecidos prontos, mas há vários disponíveis, como Elasticsearch e + Stackdriver Logging. Vários provedores de nuvem oferecem soluções + integradas que colhem automaticamente todos os fluxos stdout/err e + fornecem armazenamento dinâmico e ferramentas analíticas poderosas, além da capacidade de criar alertas personalizados com base em dados de log. É sua responsabilidade determinar a solução apropriada para - processar os dados de log de streaming. - - - - Soluções de Processamento de Log - - Existem várias soluções de processamento de log disponíveis. Você - pode optar por integrar os dados de registro do HPCC Systems com qualquer - uma de suas soluções de registro existentes ou implementar outra - especificamente para os dados do HPCC Systems. A partir do HPCC Systems - versão 8.4, fornecemos uma solução de processamento de log leve e completa - para sua conveniência. Como afirmado existem várias soluções possíveis, - você deve escolher a opção que melhor atende às suas necessidades. As - seções a seguir examinarão duas soluções possíveis. - - - O chart Elastic4hpcclogs - - O HPCC Systems fornece um chart Helm gerenciado, - elastic4hpcclogs, que utiliza os charts Elastic - Stack Helm para Elastic Search, Filebeats e Kibana. Este gráfico - descreve uma instância local e mínima do Elastic Stack para - processamento de log de componentes do HPCC Systems. Depois de - implantados com êxito, os logs de componentes do HPCC produzidos no - mesmo namespace devem ser indexados automaticamente no ponto de - extremidade do Elastic Search. Os usuários podem consultar esses logs - emitindo consultas de API RESTful do Elastic Search ou por meio da - interface do usuário do Kibana (depois de criar um padrão de índice - simples). - - Pronto para uso, o Filebeat encaminha as entradas de log do - componente HPCC para um índice com nome genérico: 'hpcc-logs' - - <DATE_STAMP> e grava os dados de log em campos prefixados - 'hpcc.log.*'. Ele também agrega k8s, Docker e metadados do sistema para - ajudar o usuário a consultar as entradas de log de seu interesse. - - Um padrão de índice do Kibana é criado automaticamente com base no - layout de índice de batida de arquivo padrão. + processar os dados do log de streaming. + + + Soluções de Processamento de logs + + Existem várias soluções de processamento de logs disponíveis. Você + poderia optar por integrar os dados de log do HPCC Systems com quaisquer + soluções de log existentes, ou implementar outra especificamente para os + dados do HPCC Systems. A partir da versão 8.4 do HPCC Systems, + fornecemos uma solução de processamento de logs leve, mas completa, para + sua conveniência. As próximas seções irão analisar algumas das possíveis + soluções. - - Instalando o chart elastic4hpcclogs + + Solução Gerenciada Elastic Stack + + O HPCC Systems fornece um chart Helm gerenciado, + elastic4hpcclogs, que utiliza os chart Helm da + Elastic Stack para Elastic Search, Filebeats e Kibana. Este chart descreve + uma instância local mínima da Elastic Stack para processamento de log de + componentes do HPCC Systems. Uma vez implantado com sucesso, os logs de + componentes do HPCC produzidos no mesmo namespace devem ser + automaticamente indexados no ponto de extremidade do Elastic Search. Os + usuários podem consultar esses logs emitindo consultas da API RESTful do + Elastic Search, ou via interface de usuário do Kibana (após a criação de + um padrão de índice simples). + + Pronto para usar, o Filebeat encaminha as entradas de log do + componente HPCC para um índice de nome genérico: 'hpcc-logs'- + <DATE_STAMP> e escreve os dados do log em campos prefixados com + 'hpcc.log.*'. Ele também agrega metadados k8s, Docker e do sistema para + ajudar o usuário a consultar as entradas de log de seu interesse. + + Um padrão de índice Kibana é criado automaticamente com base no + layout de índice filebeat padrão. + + + Instalando o chart elastic4hpcclogs + + Instalar a solução simples fornecida é, como o nome indica, + simples e uma maneira conveniente de reunir e filtrar dados de log. Ela + é instalada através de nossos charts helm do repositório HPCC Systems. + No diretório HPCC-platform/helm, o gráfico + elastic4hpcclogs é entregue junto com os outros + componentes da plataforma HPCC Systems. As próximas seções mostrarão + como instalar e configurar a solução de log da Elastic Stack para o HPCC + Systems. + + + + + + + + + + + + - Instalar a solução simples fornecida é, como o nome indica, simples - e uma maneira conveniente de coletar e filtrar dados de log. Ele é - instalado por meio de nossos gráficos de leme do repositório HPCC Systems. - No diretório HPCC-platform/helm, o gráfico elastic4hpcclogs é fornecido - junto com os outros componentes da plataforma HPCC Systems. As próximas - seções mostrarão como instalar e configurar a solução Elastic stack - logging para HPCC Systems. + NOTA: O chart + elastic4hpcclogs não habilita nenhuma + segurança. A responsabilidade de determinar a necessidade de + segurança e habilitar a segurança em qualquer instância do + Elastic Stack implantada ou componentes é de sua + responsabilidade e de sua organização. + + + + - - Adicionar o Repositório HPCC Systems + + Adicionando o Repositório HPCC Systems - O chart Elastic for HPCC Systems entregue pode ser encontrado no - repositório HPCC Systems Helm. Para buscar e implantar os gráficos - gerenciados do HPCC Systems, adicione o repositório do HPCC Systems - Helm, caso ainda não tenha feito isso: + O chart Elastic para HPCC Systems fornecido pode ser encontrado + no repositório Helm do HPCC Systems. Para buscar e implantar os + gráficos gerenciados pelo HPCC Systems, adicione o repositório Helm do + HPCC Systems, se ainda não o fez: - helm repo add hpcc https://hpcc-systems.github.io/helm-chart/ + helm repo add hpcc https://hpcc-systems.github.io/helm-chart/ - Depois que esse comando for concluído com êxito, o chart - elastic4hpcclogs estará acessível. + Uma vez que este comando tenha sido concluído com sucesso, o + chart elastic4hpcclogs estará acessível. - Confirme se o chart apropriado foi puxado para baixo. + Confirme se o chart apropriado foi descarregado. - helm list + helm list - A emissão do comando helm list exibirá os gráficos e repositórios - do HPCC Systems disponíveis. O gráfico - elastic4hpcclogs está entre eles. + A emissão do comando helm list exibirá os charts e repositórios + do HPCC Systems disponíveis. O chart elastic4hpcclogs está entre + eles. - - + + - - Instalar o chart elastic4hpcc + + Instalando o chart elastic4hpcc - Instalar o chart elastic4hpcclogs utilizando - o seguinte comando: + Instale o chart elastic4hpcclogs utilizando + o seguinte comando: - helm install <Instance_Name> hpcc/elastic4hpcclogs + helm install <Instance_Name> hpcc/elastic4hpcclogs - Forneça o nome que você deseja chamar sua instância do Elastic - Search para o parâmetro <Instance_Name>. Por exemplo, você poderia - chamar sua instância de "myelk" e, nesse caso, emitiria o comando de - instalação da seguinte maneira: + Forneça o nome que você deseja chamar sua instância do Elastic + Search para o parâmetro <Instance_Name>. Por exemplo, você + poderia chamar sua instância de "myelk", caso em que você emitiria o + comando de instalação da seguinte forma: - helm install myelk hpcc/elastic4hpcclogs + helm install myelk hpcc/elastic4hpcclogs - Após a conclusão bem-sucedida, a seguinte mensagem é - exibida: + Após a execução com sucesso, a seguinte mensagem é + exibida: - Thank you for installing elastic4hpcclogs. + Thank you for installing elastic4hpcclogs. A lightweight Elastic Search instance for HPCC component log processing. This deployment varies slightly from defaults set by Elastic, please review the effective values. @@ -168,158 +190,281 @@ PLEASE NOTE: Elastic Search declares PVC(s) which might require explicit manual when no longer needed. - - - - - - - - - - - - - IMPORTANTE: O Elastic - Search declara PVC(s) que podem exigir remoção manual - explícita quando não forem mais necessários. Isso pode ser - particularmente importante para alguns provedores de nuvem que - podem acumular custos mesmo depois de não usar mais sua - instância. Você deve garantir que nenhum componente (como - PVCs) persista e continue acumulando custos. - - - - - - OBSERVAÇÃO: dependendo da versão do Kubernetes, os usuários podem - ser avisados sobre APIs obsoletas nos gráficos elásticos (ClusterRole e - ClusterRoleBinding estão obsoletos na v1.17+). As implantações baseadas - em Kubernetes < v1.22 não devem ser afetadas. - + + + + + + + + + + + + + IMPORTANTE: OBSERVE: + O Elastic Search declara PVC(s) que podem exigir remoção + manual explícita quando não forem mais necessários. Isso + pode ser particularmente importante para alguns provedores + de nuvem que podem continuar a acumular custos mesmo após + não usar mais a sua instância. Você deve garantir que nenhum + componente (como PVCs) persista e continue a acumular + custos. + + + + + + NOTA: Dependendo da versão do Kubernetes, os usuários podem ser + alertados sobre APIs obsoletas nos charts Elastic (ClusterRole e + ClusterRoleBinding estão obsoletos na v1.17+). Implementações baseadas + em Kubernetes < v1.22 não devem ser impactadas. + - - Confirmar se seus Pods estão Prontos + + Confirme se os Pods estão Prontos - Confirme se os pods estão prontos. Ás vezes, após instalação, os - pods podem levar alguns segundos para aparecerem. Confirme se os pods - estão prontos antes de proceder. Para fazer isso, use o seguinte - comando: + Confirme que os pods Elastic estão prontos. Às vezes, após a + instalação, os pods podem demorar alguns segundos para iniciar. + Confirmar que os pods estão em um estado de prontidão é uma boa ideia + antes de prosseguir. Para fazer isso, use o seguinte comando: - kubectl get pods + kubectl get pods - Este comando retorna a seguinte informação, exibindo o status dos - pods. + Este comando retorna as seguintes informações, exibindo os + status dos pods. - elasticsearch-master-0 1/1 Running 0 + elasticsearch-master-0 1/1 Running 0 myelk-filebeat-6wd2g 1/1 Running 0 myelk-kibana-68688b4d4d-d489b 1/1 Running 0 - + - Quando todos os pods estiverem indicando um estado 'ready' e - 'Running', incluindo os três componentes para filebeats, Elastic Search - e Kibana (destacado acima), você poderá prosseguir. - + Uma vez que todos os pods estejam indicando um estado 'ready' e + 'Running', incluindo os três componentes para filebeats, Elastic + Search e Kibana (destacados acima), você pode prosseguir. + - - Confirmar os Serviços Elastic + + Confirmando os serviços Elastic - Para garantir que os serviços Elastic estejam em execução, entre - com o seguinte comando: + Para confirmar se os serviços Elastic estão em execução, utilize + o seguinte comando: - $ kubectl get svc + $ kubectl get svc - Isso exibe as seguintes informações de confirmação: + Isto exibe a seguinte confirmação: - ... + ... elasticsearch-master ClusterIP 10.109.50.54 <none> 9200/TCP,9300/TCP 68m elasticsearch-master-headless ClusterIP None <none> 9200/TCP,9300/TCP 68m myelk-kibana LoadBalancer 10.110.129.199 localhost 5601:31465/TCP 68m ... - Nota: O serviço myelk-kibana é declarado como LoadBalancer por - conveniência. - + Nota: O serviço myelk-kibana é delcarado como LoadBalancer para + conveniência. + - - Configurando Componentes do Elastic Stack + + Configurando os componentes do Elastic Stack - Você pode precisar ou querer personalizar os componentes do - Elastic Stack. Os valores dos charts do componentes Elastic podem ser - substituídos como parte do comando de implantação do HPCC - Systems. + Você pode precisar ou querer personalizar os componentes da + Elastic Stack. Os valores dos charts dos componentes Elastic podem ser + substituídos como parte do comando de implantação do HPCC + Systems. - Por exemplo: + Por exemplo: - helm install myelk hpcc/elastic4hpcclogs --set elasticsearch.replicas=2 + helm install myelk hpcc/elastic4hpcclogs --set elasticsearch.replicas=2 - Consulte o repositório GitHub do Elastic Stack para obter a lista - completa de todas as opções do Filebeat, Elastic Search, LogStash e - Kibana com descrições. - + Por favor, consulte o repositório GitHub do Elastic Stack para a + lista completa de todas as opções do Filebeat, Elastic Search, + LogStash e Kibana com descrições. + + + + Utilizar os componentes de logs do HPCC Systems no + Kibana + + Uma vez habilitado e em execução, você pode explorar e consultar + os logs de componentes do HPCC Systems a partir da interface de + usuário do Kibana. Os padrões de índice do Kibana são necessários para + explorar dados do Elastic Search a partir da interface de usuário do + Kibana. Para mais informações sobre como usar a interface + Elastic-Kibana, por favor, consulte a documentação + correspondente: - - Use of HPCC Systems Component Logs in Kibana - - Uma vez ativado e em execução, você pode explorar e consultar os - logs de componentes do HPCC Systems na interface do usuário do Kibana. O - uso da interface do Kibana é bem suportado e documentado. Os padrões de - índice do Kibana são necessários para explorar os dados do Elastic - Search na interface do usuário do Kibana. A Elastic fornece explicações - detalhadas das informações necessárias para entender e utilizar - efetivamente a interface Elastic-Kibana. A documentação robusta do - Kibana deve ser consultada para obter mais informações sobre como usar a - interface do Kibana. Por favor, veja: https://www.elastic.co/ - e + https://www.elastic.co/ + + + e + https://www.elastic.co/elastic-stack/ - Incluídos na documentação completa também estão vídeos de início - rápido e outros recursos úteis. + https://www.elastic.co/elastic-stack/ + + + + + Configurando o logAccess para Elasticstack + + O recurso logAccess permite que o HPCC + Systems consulte e empacote logs relevantes para vários recursos, como + o relatório ZAP, logs de assistente de WorkUnit, visualizador de logs + ECLWatch, etc. + + Uma vez que os logs são migrados ou direcionados para a + instância de pilha elástica. A plataforma HPCC Systems precisa ser + capaz de acessar esses logs. A forma como você direciona o HPCC + Systems para fazer isso é fornecendo um arquivo de valores que inclui + os mapeamentos de logs. Fornecemos um arquivo de valores padrão e + fornecemos um exemplo de linha de comando que insere esse arquivo de + valores em seu deployment. Esse arquivo de valores de configuração + sugeridos para habilitar o acesso ao log pode ser encontrado no + repositório GitHub da plataforma HPCC Systems. + + https://github.com/hpcc-systems/HPCC-Platform + + Em seguida, navegue até o arquivo + helm/examples/azure/log-analytics/loganalytics-hpcc-logaccess.yaml. + + Você pode usar o gráfico Elastic4HPCCLogs fornecido ou pode + adicionar os valores lá ao seu arquivo yaml de valores de configuração + personalizada. + + Você pode então instalá-lo usando um comando, como o: + + helm install mycluster hpcc/hpcc -f elastic4hpcclogs-hpcc-logaccess.yaml + - - Azure AKS Insights + + Solução de Análise de Logs do Azure - O Azure AKS Insights é um recurso opcional projetado para ajudar a - monitorar o desempenho e a integridade de clusters baseados em Kubernetes. - Uma vez habilitado e associado um determinado AKS a um cluster do HPCC - Systems ativo, os logs do componente HPCC são capturados automaticamente - pelo Insights. Todos os dados STDERR/STDOUT são capturados e + Os Serviços Kubernetes do Azure (AKS) e a Análise de Logs do Azure + (ALA) são um recurso opcional projetado para ajudar a monitorar o + desempenho e a saúde dos clusters baseados em Kubernetes. Uma vez + habilitado e associado a um determinado AKS com um cluster do HPCC + Systems, os logs de componentes do HPCC são automaticamente capturados + pela Análise de Logs. Todos os dados STDERR/STDOUT são capturados e disponibilizados para fins de monitoramento e/ou consulta. Como geralmente - acontece com os recursos do provedor de nuvem, o custo é uma consideração - importante e deve ser bem entendido antes da implementação. O conteúdo do - log é gravado no armazenamento de logs associado ao seu espaço de trabalho - do Log Analytics. + ocorre com os recursos dos provedores de nuvem, o custo é uma consideração + significativa e deve ser bem compreendido antes da implementação. O + conteúdo do log é escrito na loja de logs associada à sua área de trabalho + de Análise de Logs. - Habilitar Azure Insights + Habilitando Azure Log Analytics + + Habilite o Azure Log Analytics (ALA) no cluster AKS alvo usando + uma das seguintes opções: Linha de comando direta, Linha de comando + scriptada, ou pelo portal Azure. - A habilitação do Azure's Insights no cluster AKS de destino pode - ser feita no portal do Azure ou via CLI. Para obter documentação - detalhada do Azure: Habilite insights de contêiner: Enabling Azure's - Insights on the target AKS cluster can be done from the Azure portal or - via CLI. For detailed Azure documentation: Enable Container - insights: + Para obter informações mais detalhadas, consulte a documentação do + Azure: https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-onboard + + Linha de comando + + Para habilitar os insights do Azure Log Analytics a partir da + linha de comando: + + Você pode criar manualmente um workspace dedicado à análise de + logs, ou pode pular esta etapa e utilizar o workspace padrão. + + Para criar um workspace dedicado insira este comando: + + az monitor log-analytics workspace create -g myresourcegroup -n myworkspace --query-access Enabled + + Para habilitar o recurso de Análise de Logs em um cluster AKS de + destino, faça referência ao id do recurso do workspace criado na etapa + anterior: + + az aks enable-addons -g myresourcegroup -n myaks -a monitoring --workspace-resource-id \ + "/subscriptions/xyz/resourcegroups/myresourcegroup/providers/ \ + microsoft.operationalinsights/workspaces/myworkspace" + + + + Linha de Comando Scriptada + + Para conveniência, o HPCC Systems oferece um script para + habilitar o ALA (com um workspace dedicado à análise de logs) no + cluster AKS alvo. + + O script enable-loganalytics.sh está localizado em: + + https://github.com/hpcc-systems/HPCC-Platform/tree/master/helm/examples/azure/log-analytics + + O script requer o preenchimento dos seguintes valores no arquivo + de ambiente env-loganalytics. + + Fornecer esses valores na ordem do arquivo de ambiente + env-loganalytics para criar um novo workspace no + Azure Log Analytics, associá-lo a um cluster AKS de destino, e + habilitar o processamento de logs: + + + + LOGANALYTICS_WORKSPACE_NAME + O nome desejado para o workspace do Azure Log Analytics a ser + associada ao cluster AKS de destino. Um novo workspace é criado se + esse valor não existir. + + + + LOGANALYTICS_RESOURCE_GROUP + O grupo de recursos do Azure associado ao cluster AKS de + destino. + + + + AKS_CLUSTER_NAME O nome do + cluster AKS de destino para associar a análise de logs. + + + + TAGS As tags associadas com + o novo workspace. + + Por exemplo: "admin=MyName email=my.email@mycompany.com + environment=myenv justification=testing" + + + + AZURE_SUBSCRIPTION + [Opcional] Garante que esta assinatura esteja configurada antes de + criar o novo workspace + + + + Uma vez que esses valores estejam preenchidos, o script + enable-loganalyics.sh pode ser executado e ele irá criar o workspace + de análise de logs e associá-la ao cluster AKS de destino. + + Portal Azure - Para habilitar o insights do Azure no portal: + Para habilitar Azure Log Analytics no portal Azure: - Selecione cluster AKS de Destino + Select Target AKS cluster @@ -331,44 +476,124 @@ myelk-kibana LoadBalancer 10.110.129.199 localhost 5601:31465/TCP 68m - Habilite - escolha ao workspace padrão + Enable - escolha default workspace + - - Linha de Comando + + Configure o logAccess do HPCC para Azure - Para habilitar os Azure insights na linha de comando: + O recurso logAccess permite que o HPCC + Systems consulte e empacote logs relevantes para várias funcionalidades, + como o relatório ZAP, logs auxiliares da WorkUnit, visualizador de log + do ECLWatch, etc. - Opcionalmente, crie o espaço de trabalho de análise de log - [espaço de trabalho padrão, caso contrário] + + Obtenha o Service Principal - Entre: + Para conceder acesso à API Log Analytics, o Azure requer uma + aplicação registrada no Azure Active Directory (AAD). Obtenha uma + aplicação registrada no AAD. - az monitor log-analytics workspace create -g myresourcegroup -n myworkspace --query-access Enabled + Para mais informações sobre o registro de um Azure Active + Directory, veja a documentação oficial do Azure: - Habilitar no cluster AKS de destino (referência ao ID do recurso - do workspace da etapa anterior) + https://docs.microsoft.com/en-us/power-apps/developer/data-platform/walkthrough-register-app-azure-active-directory - az aks enable-addons -g myresourcegroup -n myaks -a monitoring --workspace-resource-id \ - "/subscriptions/xyz/resourcegroups/myresourcegroup/providers/ \ - microsoft.operationalinsights/workspaces/myworkspace" + Dependendo da estrutura de sua assinatura Azure, pode ser + necessário solicitar isso de um administrador de assinatura + + + + Forneça Informações da Aplicação Registrada no AAD + + O logAccess do HPCC Systems requer acesso ao inquilino AAD, + cliente, token e ID workspace alvo por meio de um objeto secreto + seguro. Espera-se que o segredo esteja na categoria 'esp', e nomeado + 'azure-logaccess. + + Os seguintes pares de chave-valor são suportados + + + + aad-tenant-id + + + + aad-client-id + + + + aad-client-secret + + + + ala-workspace-id + + - A interface do AKS Insights no Azure fornece visualizações de - métricas de integridade em nível de cluster/nó/contêiner centradas em - Kubernetes e links diretos para logs de contêiner por meio de - interfaces de "análise de log". Os logs podem ser consultados através - da linguagem de consulta “Kusto” (KQL). Consulte a documentação do - Azure para obter detalhes sobre como consultar os logs. + O script está disponível em + 'create-azure-logaccess-secret.sh': - Consulte a documentação do Azure para obter detalhes sobre como - consultar os logs. + https://github.com/hpcc-systems/HPCC-Platform/tree/master/helm/examples/azure/log-analytics - Exemplo de consulta KQL para buscar entradas de registro - "Transaction summary" de um contêiner ECLWatch: + O script pode ser usado para criar o segredo necessário. - let ContainerIdList = KubePodInventory + Exemplo de comando para criação manual de segredo (supondo que + ./secrets-templates contenha um arquivo nomeado exatamente como as + chaves acima): + + create-azure-logaccess-secret.sh .HPCC-Platform/helm/examples/azure/log-analytics/secrets-templates/ + + Caso contrário, crie o segredo manualmente. + + Exemplo de comando para criação manual de segredo (supondo que + ./secrets-templates contenha um arquivo nomeado exatamente como as + chaves acima): + + kubectl create secret generic azure-logaccess \ + --from-file=HPCC-Platform/helm/examples/azure/log-analytics/secrets-templates/ + + + + Configure o logAccess do HPCC + + A implantação do HPCC Systems alvo deve ser configurada para se + direcionar para a área de trabalho do Azure Log Analytics acima, + fornecendo os valores de logAccess apropriados (como + ./loganalytics-hpcc-logaccess.yaml). O secret azure-logaccess + previamente criado deve ser declarado e associado à categoria esp, + isso pode ser realizado através do valor yaml dos segredos (como + ./loganalytics-logaccess-secrets.yaml). + + Exemplo: + + helm install myhpcc hpcc/hpcc \ + -f HPCC-Platform/helm/examples/azure/log-analytics/loganalytics-hpcc-logaccess.yaml + + + + + + Acessando os Logs do HPCC Systems + + A interface AKS Log Analytics no Azure fornece visualizações de + métricas de saúde de cluster/nó/contêiner centradas em Kubernetes e + links diretos para logs de contêineres por meio de interfaces de + "análise de log". Os logs podem ser consultados via a linguagem de + consulta “Kusto” (KQL). + + Consulte a documentação do Azure para detalhes sobre como + consultar os logs. + + Exemplo de consulta KQL para buscar entradas de log de "Resumo de + transações" de um contêiner ECLWatch: + + let ContainerIdList = KubePodInventory | where ContainerName =~ 'xyz/myesp' | where ClusterId =~ '/subscriptions/xyz/resourceGroups/xyz/providers/Microsoft. ContainerService/managedClusters/aks-clusterxyz' @@ -380,111 +605,147 @@ ContainerLog | order by TimeGenerated desc | render table - Amostra de saída + Output de exemplo: - + - Consultas mais complexas podem ser formuladas para buscar - informações específicas fornecidas em qualquer uma das colunas de log, - incluindo dados não formatados na mensagem de log. A interface do - Insights facilita a criação de alertas com base nessas consultas, que - podem ser usadas para acionar e-mails, SMS, execução de Logic App e - muitas outras ações. - + Consultas mais complexas podem ser formuladas para buscar + informações específicas fornecidas em qualquer uma das colunas de log, + incluindo dados não formatados na mensagem do log. A interface de + Análise de Log facilita a criação de alertas baseados nessas consultas, + que podem ser usados para acionar e-mails, SMS, execução de Logic App, e + muitas outras ações. - Controlando a saída de registro do HPCC Systems + Controlling HPCC Systems Logging Output Os logs do HPCC Systems fornecem uma riqueza de informações que - podem ser usadas para benchmarking, auditoria, depuração, monitoramento, - etc. O tipo de informação fornecida nos logs e seu formato são controlados - trivialmente através da configuração padrão do Helm. Tenha em mente que no - modo de contêiner, cada linha de saída de log é passível de incorrer em um - custo dependendo do provedor e do plano que você possui e a verbosidade - deve ser cuidadosamente controlada usando as opções a seguir. Por padrão, - os logs de componentes não são filtrados e contêm as seguintes - colunas. - - Por padrão, os logs de componentes não são filtrados e contêm as + podem ser usadas para benchmarking, auditoria, debugging, monitoramento, + etc. O tipo de informação fornecida nos logs e seu formato é trivialmente + controlado via configuração padrão do Helm. Lembre-se que, no modo de + contêiner, cada linha de saída de log é passível de incorrer um custo, + dependendo do provedor e plano que você possui, e a verbosidade deve ser + cuidadosamente controlada usando as seguintes opções. + + Por padrão, os logs do componente não são filtrados, e contêm as seguintes colunas: MessageID TargetAudience LogEntryClass JobID DateStamp TimeStamp ProcessId ThreadID QuotedLogMessage - Os logs podem ser filtrados por TargetAudience, Category ou Detail - Level. Além disso, as colunas de saída podem ser configuradas. As - definições de configuração de registro podem ser aplicadas no nível global - ou de componente. + Os logs podem ser filtrados por Público-Alvo, Categoria ou Nível de + Detalhe. Além disso, as colunas de saída podem ser configuradas. As + configurações de logging podem ser aplicadas no nível global ou no nível + do componente. - Target Audience Filtering + Filtragem do Público-Alvo Os públicos-alvo disponíveis incluem operador (OPR), usuário - (USR), programador (PRO), auditoria (ADT) ou todos. O filtro é - controlado pelo valor <section>.logging.audiences. O valor da - string é composto por códigos de 3 letras delimitados pelo operador de - agregação (+) ou pelo operador de remoção (-). + (USR), programador (PRO), monitor (MON), auditoria (ADT), ou todos. O + filtro é controlado pelo valor <section>.logging.audiences. O + valor da string é composto por códigos de 3 letras delimitados pelo + operador de agregação (+) ou pelo operador de remoção (-). - Por exemplo, todas as saídas de log de componentes devem incluir - apenas mensagens do programador e do usuário: + Por exemplo, toda a saída do log do componente para incluir apenas + mensagens de Programador e Usuário: helm install myhpcc ./hpcc --set global.logging.audiences="PRO+USR" - Filtragem de Categoria de Destino + Filtragem da Categoria de Destino - As categorias de destino disponíveis incluem desastre (DIS), erro - (ERR), informações (INF), aviso (WRN), progresso (PRO), métricas (MET). - O filtro de categoria (ou classe) é controlado pelo valor + As categorias de destino disponíveis incluem disaster (DIS), error + (ERR), information (INF), warning (WRN), progress (PRO), event (EVT), + metrics (MET). O filtro de categoria (ou classe) é controlado pelo valor <section>.logging.classes, composto por códigos de 3 letras delimitados pelo operador de agregação (+) ou pelo operador de remoção (-). - Por exemplo, a saída do log da instância mydali para incluir todas - as classes, exceto o progresso: + Por exemplo, a saída de log da instância mydali deve incluir todas + as classes, exceto progress: helm install myhpcc ./hpcc --set dali[0].logging.classes="ALL-PRO" --set dali[0].name="mydali" - Log Detail Level Configuration + Configuração do Nível de Detalhe do Log + + A verbosidade da saída do log pode ser ajustada de "apenas + mensagens críticas" (1) até "relatar todas as mensagens" (100). O nível + de log padrão é bastante alto (80) e deve ser ajustado de acordo. + + Estes são os níveis de log disponíveis: + + + + CriticalMsgThreshold = 1; + + + + FatalMsgThreshold = 1; + + + + ErrMsgThreshold = 10; + - A verbosidade da saída do log pode ser ajustada de "critical - messages only" (1) até "report all messages" (100). O nível de log - padrão é bastante alto (80) e deve ser ajustado de acordo. + + WarnMsgThreshold = 20; + - Por exemplo, a verbosidade deve ser média para todos os - componentes: + + AudMsgThreshold = 30; + + + + ProgressMsgThreshold = 50; + + + + InfoMsgThreshold = 60; + + + + DebugMsgThreshold = 80; + + + + ExtraneousMsgThreshold = 90; + + + + Por exemplo, para exibir somente o progresso e o baixo nível (mais + critico) helm install myhpcc ./hpcc --set global.logging.detail="50" - Configuração da Coluna de Dados de Registro - - As colunas de dados de log disponíveis incluem messageid(MID), - público(AUD), class(CLS), date(DAT), time(TIM), node(NOD), - militime(MLT), microtime(MCT), nanotime(NNT) , processid(PID), - threadid(TID), job(JOB), use(USE), session(SES), code(COD), - component(COM), quotemessage(QUO), prefix(PFX), all(ALL) e padrão (STD). - A configuração das colunas (ou campos) de dados de log é controlada pelo - valor <section>.logging.fields, composto por códigos de 3 letras - delimitados pelo operador de agregação (+) ou pelo operador de remoção - (-). Por exemplo, todas as saídas de log de componentes devem incluir as - colunas padrão, exceto a coluna de ID do job: + Configuração da Coluna de Dados de Log + + As colunas de dados de log disponíveis incluem messageid (MID), + audience (AUD), class (CLS), date(DAT), time (TIM), node (NOD), + millitime (MLT), microtime (MCT), nanotime (NNT), processid (PID), + threadid (TID), job (JOB), use(USE), session(SES), code(COD), + component(COM), quotedmessage(QUO), prefix (PFX), all (ALL), e + standard(STD). A configuração das colunas (ou campos) de dados de log é + controlada pelo valor <section>.logging.fields, composto por + códigos de 3 letras delimitados pelo operador de agregação (+) ou pelo + operador de remoção (-). - Por exemplo, todas as saídas de log de componentes devem incluir - as colunas padrão, exceto a coluna de ID do job: + Por exemplo, toda a saída de log do componente deve incluir as + colunas padrão, exceto a coluna do ID do job: helm install myhpcc ./hpcc --set global.logging.fields="STD-JOB" - O ajuste de valores de registro por componente pode exigir a - afirmação de vários valores específicos de componentes, o que pode ser - inconveniente de fazer por meio do parâmetro de linha de comando --set. - Nesses casos, um arquivo de valores personalizados pode ser usado para - definir todos os campos obrigatórios. + O ajuste dos valores de log por componente pode exigir a afirmação + de vários valores específicos do componente, o que pode ser + inconveniente de fazer via o parâmetro da linha de comando --set. Nestes + casos, um arquivo de valores personalizados poderia ser usado para + definir todos os campos requeridos. Por exemplo, a instância do componente ESP 'eclwatch' deve gerar um log mínimo: @@ -493,39 +754,32 @@ ContainerLog - Asychronous logging configuration + Configuração de Logging Assíncrono Por padrão, as entradas de log serão criadas e registradas de forma assíncrona, para não bloquear o cliente que está registrando. As - entradas de log serão mantidas em uma fila e geradas em uma thread em - segundo plano. Esta fila tem um limite, uma vez atingido, o cliente - bloqueará aguardando capacidade. Como alternativa, o comportamento pode - ser configurado de forma que, quando esse limite for atingido, as - entradas de log sejam descartadas e perdidas para evitar qualquer - possível bloqueio. - - By default log entries will be created and logged asynchronously, - so as not to block the client that is logging. Log entries will be held - in a queue and output on a background thread. This queue has a maximum - depth, once hit, the client will block waiting for capacity. - Alternatively, the behaviour can be be configured such that when this - limit is hit, logging entries are dropped and lost to avoid any - potential blocking. - - NB: normalmente, espera-se que a pilha de registro continue e o - limite de fila padrão seja suficiente para evitar qualquer + entradas de log serão mantidas em uma fila e dispensadas em uma thread + em segundo plano. Essa fila tem um limite, quando atingido, o cliente + ficará bloqueado aguardando disponibilidade. Alternativamente, o + comportamento pode ser configurado para que, quando esse limite for + atingido, as entradas de log sejam descartadas e perdidas para evitar + qualquer bloqueio potencial. + + NB: normalmente, espera-se que a pilha de registro acompanhe e que + o limite de fila padrão seja suficiente para evitar qualquer bloqueio. - Os padrões podem ser configurados definindo o + Os padrões podem ser configurados definindo a <section>.logging.queueLen e/ou <section>.logging.queueDrop. - Ajustar <section>.logging.queueLen para 0 desabilitará o log - assíncrono, ou seja, cada log será bloqueado até ser concluído. + Definir <section>.logging.queueLen como 0, desativará o + registro assíncrono, ou seja, cada registro bloqueará até ser + concluído. - Ajustar <section>.logging.queueDrop para um valor não-zero - (N) fará com que N entradas de log da fila sejam descartadas se o - queueLen for atingido. + Definir <section>.logging.queueDrop para um valor diferente + de zero (N) fará com que N entradas de registro da fila sejam + descartadas se a queueLen for atingida. diff --git a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/CustomConfig.xml b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/CustomConfig.xml index e2cab9b320d..9a72a0a39e1 100644 --- a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/CustomConfig.xml +++ b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/CustomConfig.xml @@ -2,17 +2,17 @@ - Configurações Customizadas + Configurações Personalizadas Técnicas de Customização - Nesta seção, nós vamos abordar a criação de uma configuração - customizada do arquivo YAML e do lançamento de uma plataforma HPCC - Systems® utilizando os configurações padrão mas - as customizações. Depois de entender os conceitos de deste capítulo, você - pode consultar o próximo para uma referência a todos as configurações de - valores de configuração. + Esta seção percorre a criação de um arquivo de configuração YAML + personalizado e a implantação de uma plataforma HPCC Systems + ® usando a configuração padrão mais as + personalizações. Uma vez que você entenda os conceitos neste capítulo, + você pode se referir ao próximo capítulo para uma referência a todos os + ajustes de valor de configuração. Há várias maneiras de personalizar uma implantação da plataforma. Nós recomendamos o uso de métodos que permitem que você aproveite melhor o @@ -220,8 +220,8 @@ Crie um novo arquivo de texto e nomeie-o twothors.yaml, em seguida abra em um editor - de texto. + role="bold">twothors.yaml e abra-o em um editor de + texto. Você pode usar qualquer editor de texto. @@ -315,6 +315,600 @@ helm upgrade mycluster hpcc/hpcc -f tworoxies.yaml -f twothors.yaml + + + Crie um Chart de Configuração Personalizado para + AllowPipePrograms + + Você pode especificar mais de uma configuração personalizada + repetindo o parâmetro -f. + + Por exemplo: + + helm install mycluster hpcc/hpcc -f tworoxies.yaml -f thorWithPipe.yaml + + Nesta seção, modificaremos o Thor para permitir alguns Programas + PIPE. Na versão 9.2.0 e superior, os comandos usados ​​no PIPE são + restritos por padrão em implantações containerizadas a menos que sejam + explicitamente permitidos no Helm chart. + + + + Se você ainda não adicionou o repositório HPCC Systems a lista + de repositórios Helm, faça agora. + + helm repo add hpcc https://hpcc-systems.github.io/helm-chart/ + + Se você ja adicionou, atualiza os últimos: + + helm repo update + + + + Crie um novo arquivo de texto e o nomeie de thorWithPipe.yaml e abra-o em um editor de + texto. + + Você pode usar qualque editor de texto. + + + + Abra o arquivo padrão de valores que você salvou previamente + (myvalues.yaml) em um editor de texto. + + + + Copie a seção thor: toda e + cole no novo arquivo thorWithPipe.yaml. + + + + Adicione um bloco no fim: + + allowedPipePrograms: + - sort + - grep + - echoEste exemplo habilita três programas. Você pode um + deles. + + + + Salve o arquivo e feche o editor de texto. + + O resultado deve do arquivo thorWithPipe.yaml deve ficar + assim + + Nota: Os comentários foram + removidos para simplificar o exemplo: + + thor: +- name: thor + prefix: thor + numWorkers: 2 + maxJobs: 4 + maxGraphs: 2 + allowedPipePrograms: + - sort + - grep + - echo + + + + Faça o deploy utilizando o novo chart de + configuração customizado. + + + + Abra uma janela de terminal e navegue até o diretório onde o + arquivo thorWithPipe.yaml foi salvo. + + + + Faça o deploy da sua plataforma HPCC Systems, adicionando o + novo arquivo de configuração por meio do comando: + + # If you have previously stopped your cluster + +helm install mycluster hpcc/hpcc -f thorWithPipe.yaml + +# To upgrade without stopping + +helm upgrade mycluster hpcc/hpcc -f thorWithPipe.yaml + + + + + Após confirmar o deploy em execução, execute um job que use + uma ação PIPE e especifica um dos programas especificado. + + Nota: Se o job for muito + simples, será executado no hThor ao inves do Thor e este exemplo não + habilita programas PIPE no hThor. + + Você pode criar um novo arquivo yaml para permitir Programas + PIPE no ECL Agent ou pode utilizar: + + #OPTION('pickBestEngine',FALSE); + + para forçar o job ser executado no Thor. + + + + + + + + Criar uma Configuração Personalizada do Chart para o Thor + + Nesta seção, criaremos um arquivo YAML para especificar uma + implementação de plataforma sem Thor. + + + + Se você ainda não adicionou o repositório HPCC Systems + repository to a lista de repositórios do helm, faça agora. + + + helm repo add hpcc https://hpcc-systems.github.io/helm-chart/ + + + Caso já tenha feito, atualize os últimos charts: + + + helm repo update + + + + + Crie um novo arquivo de texto e o nomeie para nothor.yaml e abra-o em um editor de + texto. + + Você pode usar qualquer editor. + + + + Edite, conforme o exemplo a seguir, o arquivo para desabilitar + o Thor: + + + thor: [] + + + + + Salve o arquivo e feche o editor de texto. + + + + + Deploy utilizando um novo chart de configuração + customizado. + + + + + Abra o terminal e navegue até o diretório onde está salvo o + arquivo nothor.yaml. + + + + Faça o deploy do HPCC Systems, incluindo no seu comando a nova + configuração: + + + # If you have previously stopped your cluster + +helm install mycluster hpcc/hpcc -f nothor.yaml + +# To upgrade without stopping + +helm upgrade mycluster hpcc/hpcc -f nothor.yaml + + + + + Após confirmar que seu deploy está em execução, abra o ECL + Watch. + + Você não irá ver nenhum cluster Thor como destino. + + + + + + Crie um Chart de Configuração Personalizada para o Roxie + + Nesta seção, criaremos um arquivo YAML para especificar uma + implementação de plataforma sem Roxie. Embora o resultado seja + semelhante ao que fizemos na seção anterior para sem Thor, a técnica é + diferente. + + + + Se você ainda não adicionou o respositório do HPCC Systems a + lista de respositórios helm, faça agora. + + + helm repo add hpcc https://hpcc-systems.github.io/helm-chart/ + + + Caso já tenha feito, atualize os últimos charts: + + + helm repo update + + + + + Crie um novo arquivo de texto e o nomeie para noroxie.yaml e abra-o em um editor de + texto. + + Você pode usar qualquer editor de texto. + + + + Salve os valores padrão em um arquivo de texto: + + + helm show values hpcc/hpcc > myvalues.yaml + + + + + Abra o arquivo salvo (myvalues.yaml) em um editor de + texto. + + + + Copie a seção roxie: e cole + no novo arquivo noroxie.yaml. + + + + Copie a seção eclagent: e + cole no novo arquivo noroxie.yaml. + + + + No bloco roxie, edite o valor + para disabled: e mude para + true. + + Você pode remover todo o resto do bloco roxie: exceto o + nome. + + + + No bloco eclagent, delete + todo o bloco name: + roxie-workunit. + + Isso remove a instância de um Roxie atuando como um Agente + ECL. + + + + Salve e feche o editor de texto. + + O arquivo noroxie.yaml deverá parecer como este: + + Nota: Os comentários foram + removidos para simplificar o exemplo: + + + roxie: +- name: roxie + disabled: true + +eclagent: +- name: hthor + replicas: 1 + maxActive: 4 + prefix: hthor + useChildProcesses: false + type: hthor + + + + + + Faça o deploy usando o novo chart de + configuração personalizada. + + + + + Abra uma janela de terminal e navegue até o diretório onde + está salvo o arquivo noroxie.yaml. + + + + Faça o deploy do HPCC Systems, incluindo no seu comando a nova + configuração: + + + helm install mycluster hpcc/hpcc -f noroxie.yaml + + + + + Após confirmar que seu deploy está em execução, abra o ECL + Watch. + + Você não irá mais ver nenhum cluster Roxie disponível como + destino. + + + + + + Crie um Chart de Configuração Personalizado para Múltiplos Thors + Ouvindo uma Fila Comum. + + Nesta seção, criaremos três Thors que escutam uma fila comum (além + de sua própria fila). Isso fornece a capacidade de definir configurações + distintas de cluster Thor, mas permite que eles formem um único alvo + atrás de uma única fila. Esses clusters podem ser vinculados a + determinados conjuntos de nós em diferentes zonas de disponibilidade, se + desejado. Você pode usar este exemplo como um ponto de partida e ajustar + o número de clusters Thor que deseja. + + Isso é alcançado definindo filas de destino auxiliares adicionais + para cada definição de Thor e usando um nome comum como uma fila + auxiliar. + + + + Se você ainda não adicionou o respositório do HPCC Systems a + lista de respositórios helm, faça agora. + + + helm repo add hpcc https://hpcc-systems.github.io/helm-chart/ + + + Se já fez isso, atualize os últimos charts: + + + helm repo update + + + + + Crie um novo arquivo de texto e o nomeie para threethorsonequeue.yaml e abra-o em um editor + de texto. + + Você pode usar qualquer editor de texto. + + + + Abra o arquivo padrão de valores que você salvou previamente + (myvalues.yaml) em um editor de texto. + + + + Copie thor: e cole no novo + arquivo threethorsonequeue.yaml file. + + + + Copie todo o conteúdo do novo arquivo yaml file, exceto a + primeira linha (thor:), e cole no final do arquivo twice. + + Isso cria três seções - name:. + + + + Edite o arquivo da seguinte maneira: + + + + + De um único valor de name para cada Thor:. + + Neste exemplo, utilizaremos thor1, + thor2, e thor3. + + + + Adicione a entrada auxQueues: para cada block Thor block + utilizando um nome comum + + Neste exemplo estamos utilizando: + + + auxQueues: [ thorQ ] + + + + + Certifique-se que o prefix: é o mesmo que cada bloco do + Thor. + + + + + + + Salve e feche o editor de texto. + + O arquivo threethorsonequeue.yaml deverá ficar assim: + + Note: Os comentários foram + removidos para simplificar o exemplo: + + + thor: +- name: thor1 + auxQueues: [ thorQ ] + maxGraphs: 2 + maxJobs: 2 + numWorkers: 4 + numWorkersPerPod: 2 + prefix: thor +- name: thor2 + maxGraphs: 2 + maxJobs: 2 + numWorkers: 4 + numWorkersPerPod: 2 + prefix: thor + auxQueues: [ thorQ ] +- name: thor3 + maxGraphs: 2 + maxJobs: 2 + numWorkers: 4 + numWorkersPerPod: 2 + prefix: thor + auxQueues: [ thorQ ] + + + + + + Faça o deploy usando o novo chart de + configuração personalizada. + + + + + Abra uma janela de terminal e navegue até o diretório onde + está salvo o arquivo threethorsonequeue.yaml. + + + + Faça o deploy do HPCC Systems, incluindo no seu comando a nova + configuração: + + + # If you have previously stopped your cluster + +helm install mycluster hpcc/hpcc -f threethorsonequeue.yaml + +# To upgrade without stopping + +helm upgrade mycluster hpcc/hpcc -f threethorsonequeue.yaml + + + + + + Após confirmar que seu deploy está em execução, abra o ECL + Watch. + + Você deve ver quatro clusters Thor disponíveis como Alvos - + thor1, thor2, thor3 e uma quarta fila que todos os três Thors ouvem + - thorQ. + + + + + + Crie um Chart de Configuração Personalizado somente para Landing + Zone + + Nessa seção, nós iremos criar uma configuração personalizada que + implementa uma "plataforma" contendo apenas uma Landing Zone. Isso pode + ser útil se tudo o que você precisa é um servidor de landing zone com o + dafilesrv rodando. + + Nota: Isso só pode ser + implementado em um namespace diferente de qualquer outra instância de + plataforma. + + + + Se você ainda não adicionou o respositório do HPCC Systems a + lista de respositórios helm, faça agora. + + + helm repo add hpcc https://hpcc-systems.github.io/helm-chart/ + + + Se já fez isso, atualize os últimos charts: + + + helm repo update + + + + + Criar um novo arquivo de texto e o nomeie para lz.yaml e abra-o em um editor de + texto. + + Você pode usar qualquer editor de texto. + + + + Copie e cole este código no arquivo: + + + dafilesrv: +- name: direct-access + application: directio + service: + servicePort: 7100 + visibility: local + tls: false + resources: + cpu: "2" + memory: "8G" +dali: [] +dfuserver: [] +eclagent: [] +eclccserver: [] +eclscheduler: [] +esp: [] +roxie: [] +sasha: null +thor: [] + + + + + + Salve e feche o editor de texto. + + + + + Faça o deploy usando o novo chart de + configuração personalizada. + + + + + Abra uma janela de terminal e navegue até o diretório onde + está salvo o arquivo lz.yaml file. + + + + Implante esta "plataforma" LZ somente com a nova configuração + adicionada ao seu comando. + + + helm install mylz hpcc/hpcc -f lz.yaml + + + + + + Confirme que está instalado usando este comando: + + + helm list + + + + @@ -448,7 +1042,7 @@ helm upgrade mycluster hpcc/hpcc -f tworoxies.yaml -f twothors.yaml Os custos de armazenamento não podem ser vistos como um valor separado no ECL Watch. Eles só podem ser visualizados como parte de um - campo de custo na página de resumo de um arquivo lógico. + campo de custo na página de resumo de um arquivo lógico. @@ -697,4 +1291,183 @@ helm upgrade mycluster hpcc/hpcc -f tworoxies.yaml -f twothors.yaml + + + Protegendo Credenciais + + Utilizar o HPCC Systems em um ambiente containerizado tem algumas + preocupações de segurança únicas, ao externalizar componentes normalmente + internalizados, como as credenciais dos administradores LDAP. + + A proteção das credenciais dos administradores LDAP é realizada + usando os segredos do Kubernetes ou do Hashicorp Vault. Como + pré-requisito, você deve estar familiarizado com a configuração de + segredos do Kubernetes e/ou do Hashicorp Vault. + + A conta dos administradores LDAP deve ter direitos de administrador + para todos os Base DNs usados pela plataforma HPCC Systems. Em uma + implantação na nuvem, essas credenciais podem ser expostas. Portanto, uma + boa prática para essas credenciais de administrador é serem protegidas + usando segredos do Kubernetes ou o HashiCorp Vault. + + + Protegedo as credenciais no Kubernetes + + Para criar um secret no Kubernetes para armazenar as credenciais + da conta do usuário do administrador do LDAP, use uma interface de linha + de comando para o Kubernetes e execute um comando semelhante ao seguinte + exemplo: + + kubectl create secret generic admincredssecretname --from-literal=username=hpcc_admin \ + --from-literal=password=t0pS3cr3tP@ssw0rd + + + No exemplo acima, o nome do secret do Kubernetes é + "admincredssecretname", e ele contém as chaves/valores "username" e + "password" da conta do administrador do LDAP. Isso armazena o nome de + usuário e a senha do administrador do LDAP como um segredo do + Kubernetes. Quaisquer propriedades adicionais são ignoradas. + + Você pode verificar o secret que acabou de criar executando o + seguinte comando no Kubernetes. + + kubectl get secret admincredssecretname + + Para obter mais informações sobre o Kubernetes, consulte a + documentação específica para a sua implementação.. + + + Utilizando secrets do Kubernetes + + Para implementar os secrets do Kubernetes, substitua a seção + "secrets:" em HPCC-Platform/helm/hpcc/values.yaml, ou implemente com o + seu próprio gráfico personalizado. Para mais informações sobre como + personalizar sua implementação contêinerizada de Sistemas HPCC, veja + as seções acima sobre técnicas de personalização. + + No seu chat, crie um nome de chave único usado para referenciar + o segredo, e defina-o como o nome do secret que você criou na etapa + anterior. No exemplo acima, era "admincredssecretname". + + Você pode, opcionalmente, definir segredos adicionais conforme + necessário pela configuração de segurança da sua plataforma. Cada um + desses segredos seria criado conforme descrito acima e receberia nomes + únicos. O exemplo abaixo indica como você pode adicionar quaisquer + credenciais ou secrets adicionais aos seus chart Helm, se + necessário. + + A chave/valor "admincredsmountname" já existe por padrão no + arquivo values.yaml fornecido pelo HPCC Systems. A chave é + referenciada no arquivo ldap.yaml do componente. Você pode substituir + estes e adicionar chaves/valores adicionais conforme necessário. O + seguinte exemplo ilustra a adição de "additionalsecretname" e esse + nome deve corresponder ao nome do segredo adicional criado usando as + etapas acima. + + secrets: + authn: + admincredsmountname: "admincredssecretname" #exernalize HPCC Admin creds + additionalmountname: "additionalsecretname" #alternate HPCC Admin creds + + + + + Habilitar autenticação LDAP + + No arquivo ldap.yaml fornecido em + HPCC-Platform/esp/applications/common/ldap/, a "ldapAdminSecretKey" já + está configurada para o nome da chave de montagem ilustrado no exemplo + acima. Para habilitar a autenticação LDAP e modificar este valor, você + ou seu administrador de sistemas podem substituir o componente Helm + ESP/ECLWatch localizado no chart values.yaml, conforme ilustrado no + exemplo seguinte: + + esp: +- name: eclwatch + application: eclwatch + auth: ldap + ldap: + ldapAddress: "myldapserver" + ldapAdminSecretKey: "additionaltmountname" # use alternate secrets creds + + + + + + + + Protegendo crendeciais no HashiCorp Vault + + Para criar e armazenar secrets no HashiCorp Vault, a partir da + linha de comando, execute os seguintes comandos Vault. O nome secreto + usado no exemplo abaixo é "myvaultadmincreds" e deve ser prefixado com + "secret/authn/" conforme ilustrado. As chaves/valores "username" e + "password" do administrador LDAP são necessários. Propriedades + adicionais são ignoradas. + + vault kv put secret/authn/myvaultadmincreds username=hpcc_admin password=t0pS3cr3tP@ssw0rd + + Onde "secret/authn/myvaultadmincreds" é o nome do secret que + contém o nome de usuário e a senha do administrador LDAP. + + Para verificar e confirmar o valores do secret, execute o seguinte + comando: + + vault kv get secret/authn/myvaultadmincreds + + Para obter mais informações sobre como criar segredos para o + HashiCorp Vault, consulte a documentação apropriada da HashiCorp para + sua implementação. + + + Deploy do HashiCorp + Vault + + Deploy dos secrets do HashiCorp Vault quando você sobrescrever a + seção "secrets:" em HPCC-Platform/helm/hpcc/values.yaml, ou em seu + chart de configuração personalizado. Para mais informações sobre como + personalizar sua implantação containerizada do HPCC Systems, veja as + seções acima sobre técnicas de personalização. + + O valor do nome do Vault é definido para este exemplo no chart + gráfico de configuração values-secrets.yaml. Você pode encontrar um + exemplo deste gráfico no repositório HPCC-Platform em + /helm/examples/secrets/values-secrets.yaml. + + vaults: + authn: + - name: my-authn-vault + #The data node in the URL is there for use by the REST API + #The path inside the vault starts after /data + url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/authn/${secret} + kind: kv-v2 + + + Você pode inserir isso em seu próprio gráfico de personalização + onde você fornece à sua implantação o nome do cofre que contém as + credenciais. + + + + Referenciando Vault Stored Authentication + + Os nomes de chave "ldapAdminSecretKey" e "ldapAdminVaultId" são + usados pelo gerente de segurança do HPCC Systems para resolver os + segredos, e devem corresponder exatamente quando se utiliza o nome do + Vault configurado nas etapas anteriores. + + esp: +- name: eclwatch + application: eclwatch + auth: ldap + ldap: + ldapAddress: "myldapserver" + ldapAdminSecretKey: "myvaultadmincreds" + ldapAdminVaultId: "my-authn-vault" + + + + + + diff --git a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/LocalDeployment.xml b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/LocalDeployment.xml index f6766ee81e2..ccad69b4f94 100644 --- a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/LocalDeployment.xml +++ b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/LocalDeployment.xml @@ -11,7 +11,7 @@ Pré-requisitos - + Todas ferramentas de terceiros devem ser 64-bits. @@ -371,9 +371,9 @@ sasha: Isto não é necessário em ambientes MacOS ou WSL 2. - + - + @@ -426,7 +426,7 @@ OUTPUT(allPeople,,'MyData::allPeople',THOR,OVERWRITE); - + @@ -459,7 +459,7 @@ OUTPUT(allPeople,,'MyData::allPeople',THOR,OVERWRITE); A nova sintaxe é: - ~plane::<storage-plane-name>::<path>::<filename> + '~plane::hpcc-data::tutorial::originalperson' Onde a sintaxe do caminho e do nome do arquivo são as mesmas usadas com a sintaxe file:: . Isso inclui exigir @@ -473,7 +473,8 @@ OUTPUT(allPeople,,'MyData::allPeople',THOR,OVERWRITE); role="bold">C:\hpccdata\hpcc-data\tutorial, poderá fazer referência ao arquivo usando esta sintaxe: - '~plane::data::tutorial::originalperson' + '~plane::data::tutorial::originalperson' + Nota: @@ -485,10 +486,10 @@ OUTPUT(allPeople,,'MyData::allPeople',THOR,OVERWRITE); - + - + - + diff --git a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/Contains.xml b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/Contains.xml index 5960734f750..af00a78b6d0 100644 --- a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/Contains.xml +++ b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/Contains.xml @@ -71,5 +71,5 @@ B:= STD.Str.Contains( 'abcdefghijklmnopqrstuvwxyz', false); //returns FALSE -- 'z' is missing - + Veja também: Find diff --git a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/Copy.xml b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/Copy.xml index 592a674633c..a292ca555f5 100644 --- a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/Copy.xml +++ b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/Copy.xml @@ -28,6 +28,7 @@ [ ,preserveCompression ] [ ,noSplit ] [ ,expireDays ] [ ,ensure ]); dfuwuid := @@ -55,7 +56,8 @@ [ ,preserveCompression ] [ ,noSplit ] [ ,expireDays ]); + role="bold">] [ ,ensure]); @@ -193,9 +195,16 @@ expireDays - Opcional. Um valor inteiro indicando o número de dias -           antes de remover automaticamente o arquivo. Se omitido, o - padrão é           -1 (nunca expira). + Opcional. Um valor inteiro indicando o número de dias antes + de remover automaticamente o arquivo. Se omitido, o padrão é -1 + (nunca expira). + + + + ensure + + Opcional. Copia o arquivo lógico, mas não copia partes do + arquivo se elas já existirem. O padrão é FALSO. diff --git a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/CreateExternalDirectory.xml b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/CreateExternalDirectory.xml index ce3e3da457e..cd85870b6e7 100644 --- a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/CreateExternalDirectory.xml +++ b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/CreateExternalDirectory.xml @@ -10,7 +10,9 @@ File.CreateExternalDirectory CreateExternalDirectory - ( location, path ( location, path [ , planename ] ) @@ -23,8 +25,8 @@ location - Uma string terminada por nulo que contém o endereço IP da - máquina remota. + Uma string terminada em nulo contendo o endereço IP da + máquina remota. Opcional se planename for fornecido. @@ -33,6 +35,15 @@ Uma string terminada por nulo que contém o caminho do diretório a ser criado. + + + planename + + Uma string terminada em nulo contendo o nome do plano de + dados que contém o arquivo. Opcional se + localização for fornecida, mas + planename é preferível. + @@ -44,8 +55,10 @@ Exemplo: - IP := '10.150.254.6'; -path := '/c$/training/import/NewDir'; -STD.File.CreateExternalDirectory(IP,path); + IMPORT STD; +IP := ''; +path := '/var/lib/HPCCSystems/dropzone/advancedtraining/'; +planename := 'mydropzone'; +STD.File.CreateExternalDirectory(IP,path,planename); diff --git a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/DeleteExternalFile.xml b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/DeleteExternalFile.xml index 7e8f29f4458..eaa2ce6d77d 100644 --- a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/DeleteExternalFile.xml +++ b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/DeleteExternalFile.xml @@ -10,7 +10,9 @@ File.DeleteExternalFile DeleteExternalFile - ( location, path ( location, path [ , planename ] ) @@ -23,8 +25,8 @@ location - Uma string terminada por nulo que contém o endereço IP da - máquina remota. + Uma string terminada em nulo contendo o endereço IP da + máquina remota. Opcional se planename for fornecido. @@ -33,6 +35,15 @@ Uma string terminada por nulo que contém o caminho/nome do arquivo a ser removido. + + + planename + + Uma string terminada em nulo contendo o nome do plano de + dados que contém o arquivo. Opcional se + localização for fornecida, mas + planename é preferível. + diff --git a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/Find.xml b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/Find.xml index d0aaca5be4f..93efec74ab1 100644 --- a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/Find.xml +++ b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/Find.xml @@ -99,4 +99,6 @@ D := IF(STD.Str.Find('', 'BD', 1) = 0, 'Success', 'Failure - 4'); //success + + Veja também: Contains diff --git a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/MoveExternalFile.xml b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/MoveExternalFile.xml index 0b84281c8ff..a0151682c30 100644 --- a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/MoveExternalFile.xml +++ b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/MoveExternalFile.xml @@ -10,8 +10,10 @@ File.MoveExternalFile MoveExternalFile - ( location, frompath, topath - ) + ( location, frompath, topath [ , planename ] ) @@ -23,7 +25,9 @@ location - Uma string terminada por nulo que contém o endereço IP da + Uma string terminada em nulo contendo o endereço IP da + máquina remota. Opcional se planename for + fornecido.Uma string terminada por nulo que contém o endereço IP da máquina remota. @@ -40,6 +44,15 @@ Uma string terminada por nulo que contém o caminho/nome do arquivo de destino. + + + planeName + + Uma string terminada em nulo contendo o nome do plano de + dados que contém o arquivo. Opcional se + machineIP for fornecido, mas + planeName é preferível. + @@ -53,8 +66,10 @@ Exemplo: - IP := '10.150.254.6'; -infile := '/c$/training/import/AdvancedECL/people'; -outfile := '/c$/training/import/DFUtest/people'; -STD.File.MoveExternalFile(IP,infile,outfile); + IMPORT STD; +IP := ''; +infile := '/var/lib/HPCCSystems/dropzone/originalperson'; +outfile := '/var/lib/HPCCSystems/dropzone/originalperson_bak'; +planename := 'mydropzone'; +STD.File.MoveExternalFile(IP,infile,outfile,planename); diff --git a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/RemoteDirectory.xml b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/RemoteDirectory.xml index 27e4495ccbb..a1e9d865eba 100644 --- a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/RemoteDirectory.xml +++ b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/RemoteDirectory.xml @@ -4,17 +4,19 @@ RemoteDirectory - STD.File.RemoteDirectory + STD.File.RemoteDirectory( + machineIP, dir [ , + mask ][ , recurse + ][ , planeName ] ) + STD.File.RemoteDirectory File.RemoteDirectory RemoteDirectory - ( machineIP, dir [ , mask ][ , recurse ] ) + @@ -26,7 +28,9 @@ machineIP - Uma string terminada por nulo que contém o endereço IP da + Uma string terminada em nulo contendo o endereço IP da + máquina remota. Opcional se planeName for + fornecido.Uma string terminada por nulo que contém o endereço IP da máquina remota. @@ -55,6 +59,15 @@ FALSE. + + planeName + + Uma string terminada em nulo contendo o nome do plano de + dados que contém o arquivo. Opcional se + machineIP for fornecido, mas + planeName é preferível. + + Return: @@ -78,7 +91,10 @@ END; Exemplo: - OUTPUT(STD.File.RemoteDirectory('edata12','\in','*.d00')); -OUTPUT(STD.File.RemoteDirectory('10.150.254.6', - '/var/lib/HPCCSystems/hpcc-data/thor/','acc*',TRUE)); + IMPORT STD; +machineIP := ''; +dir := '/var/lib/HPCCSystems/dropzone/training'; +recurse:= FALSE; +planeName := 'mydropzone'; +OUTPUT(STD.File.RemoteDirectory(machineIP,dir,'*.csv',recurse,planeName)); diff --git a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/TimestampToString.xml b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/TimestampToString.xml new file mode 100644 index 00000000000..69d560cb3e5 --- /dev/null +++ b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/TimestampToString.xml @@ -0,0 +1,88 @@ + + + + TimestampToString + + STD.Date.TimestampToString + (timestamp, format) + + + + + + + + + + timestamp + + Um INTEGER8 contendo o número de microssegundos desde a época + (1 de janeiro de 1970 UTC) + + + + format + + OPCIONAL. O formato da string a ser retornada. Veja a + documentação strftime para detalhes (http://strftime.org/). Se + omitido, o padrão é '%Y-%m-%dT%H:%M:%S.%@' que é no formato + YYYY-MM-DDTHH:MM:SS.ssssss. + + + + Return: + + O timestamp convertido como uma string + no formato especificado. + + + + + + A função TimestampToString + TimestampToString + converte um valor Timestamp_t contendo o número de + microssegundos desde a época (1 de janeiro de 1970 UTC) em uma string + legível por humanos usando um modelo de formato dos padrões strftime. Dois + especificadores de formato adicionais estão disponíveis para exibir segundos + fracionados: + + + + + + + + + + %@ + + Fração de segundos em microssegundos (6 dígitos) + + + + %# + + Fração de segundos em microssegundos (3 dígitos) + + + + + + As frações de milissegundo são truncadas de microssegundos quando + necessário. + + O comprimento máximo da string resultante é de 255 caracteres. + + Exemplo: + + IMPORT STD; +STD.Date.TimestampToString(1048998120000000, '%A %B %d, %Y T%H:%M:%S.%#'); + // returns Sunday March 30, 2003 T04:22:00.000 + + + + diff --git a/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/getElapsedMs.xml b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/getElapsedMs.xml new file mode 100644 index 00000000000..44296549cf6 --- /dev/null +++ b/docs/PT_BR/ECLStandardLibraryReference/SLR-Mods/getElapsedMs.xml @@ -0,0 +1,55 @@ + + + + getElapsedMs + + result := + STD.System.Log.getElapsedMs + STD.System.Log.getElapsedMs + + System.Log.getElapsedMs + + Log.getElapsedMs + + getElapsedMs + + (); + + + + + + + + + + Return: + + getElapsedMs retorna o tempo decorrido em + milissegundos.. + + + + + + A função getElapsedMs retorna o tempo + decorrido atual da consulta (em ms) no Roxie. + + Este é o tempo decorrido quando STD.System.Log.getElapsedMs() é + chamado. Como ECL é uma linguagem declarativa, o código não é + necessariamente executado em sequência. Você tem que ter cuidado ao tentar + obter o tempo decorrido para um ponto específico no seu código. Você pode + olhar os gráficos do Workunit para ver o ponto exato em que a atividade é + executada. > + + Para uso apenas no Roxie. Um erro é + retornado se você tentar executar no Thor ou hThor. + + Exemplo: + + IMPORT STD; +STD.System.Debug.Sleep (1054); // pause processing for 1054 milliseconds. +OUTPUT(STD.System.Log.getElapsedMs(), NAMED('Elapsed')); //returns total time elapsed + + diff --git a/docs/PT_BR/ECLStandardLibraryReference/SLR-includer.xml b/docs/PT_BR/ECLStandardLibraryReference/SLR-includer.xml index 631f541f9be..dfe37d4b67b 100644 --- a/docs/PT_BR/ECLStandardLibraryReference/SLR-includer.xml +++ b/docs/PT_BR/ECLStandardLibraryReference/SLR-includer.xml @@ -63,9 +63,7 @@ - - <emphasis>Arquivos lógicos</emphasis> - + <emphasis>Arquivos lógicos</emphasis> @@ -401,7 +399,9 @@ xmlns:xi="http://www.w3.org/2001/XInclude"/> - <emphasis>Manipulação de Data e Hora</emphasis> + + <emphasis>Manipulação de Data e Hora</emphasis> + @@ -490,6 +490,9 @@ + + - <emphasis>Logging</emphasis> + + <emphasis>Logging</emphasis> + @@ -603,6 +608,9 @@ + + diff --git a/docs/PT_BR/HPCCClientTools/CT_Mods/CT_Comm_Line_DFU.xml b/docs/PT_BR/HPCCClientTools/CT_Mods/CT_Comm_Line_DFU.xml index e886ed47f0d..363b13d6246 100644 --- a/docs/PT_BR/HPCCClientTools/CT_Mods/CT_Comm_Line_DFU.xml +++ b/docs/PT_BR/HPCCClientTools/CT_Mods/CT_Comm_Line_DFU.xml @@ -868,6 +868,14 @@ dfuplus action=despray srcname=mytest::test:spraytest se a compactação do arquivo de origem deve ser preservada. Se omitido, o padrão é 1. + + + ensure + + Opcional. Copia o arquivo lógico, mas não copia + partes do arquivo se eles já existirem. O padrão é + FALSE. + diff --git a/docs/PT_BR/HPCCClientTools/CT_Mods/CT_ECL_CLI.xml b/docs/PT_BR/HPCCClientTools/CT_Mods/CT_ECL_CLI.xml index 2feab2c86f5..6120dc370c2 100644 --- a/docs/PT_BR/HPCCClientTools/CT_Mods/CT_ECL_CLI.xml +++ b/docs/PT_BR/HPCCClientTools/CT_Mods/CT_ECL_CLI.xml @@ -165,6 +165,13 @@ retorna uma lista de todos os IDs de usuário-chave que podem ser usados pelo comando sign. + + + url-secret-name + + gerar um nome secreto a partir de uma URL para + mapeamento automático de URL + @@ -8561,6 +8568,65 @@ ecl getname -wu W201407* + + ecl url-secret-name + + ecl url-secret-name url + [--username=<username>] + + + + + + + + + + ecl url-secret-name + + Gera um nome secreto a partir de uma URL para + mapeamento automático de URL + + + + url + + URL para convert em um nome secreto. + + + + --username + + Opcional. O username associado a URL. Isto sobrepõe + qualquer username inserida na URL. + + + + O comando ecl url-secret-name + gera um nome secreto a partir de uma URL que pode ser usado para + suportar o mapeamento automático da URL para segredo no ECL + SOAPCALL/HTTPCALL. + + Um nome de usuário pode ser inserido na URL, como + https://username@example.com, ou passado como um parâmetro usando a + opção --username=username. Se um nome de usuário for passado como + parâmetro, ele substitui um nome de usuário na URL. + + As senhas inseridas na URL não são necessárias e serão + ignoradas. + + Quando o mapeamento secreto de URL do ECL SOAPCALL está ativado, + SOAPCALL converterá a URL fornecida em um nome deste formato. ECL, + então, tentará localizar o segredo e, se encontrado, usará o conteúdo + do segredo, ao invés da URL original. + + Exemplos: + + ecl url-secret-name https://example.com --username jimi +ecl url-secret-name http://example.com --username jimi + + + ecl roxie memlock diff --git a/docs/PT_BR/HPCCClientTools/CT_Mods/CT_Overview.xml b/docs/PT_BR/HPCCClientTools/CT_Mods/CT_Overview.xml index d1fcf97ed58..ec4954ab02c 100644 --- a/docs/PT_BR/HPCCClientTools/CT_Mods/CT_Overview.xml +++ b/docs/PT_BR/HPCCClientTools/CT_Mods/CT_Overview.xml @@ -135,55 +135,40 @@ Instale o software de ferramentas do cliente em sua máquina. + + - - - Nota: - - - Para grandes workunits ECL, o compilador de 32 bits - (eclcc) pode ficar sem memória mais rapidamente do que a - versão de 64 bits. Portanto, na maioria dos casos, você deve - instalar a versão de 64 bits. No entanto, para máquinas com - memória de 4 GB ou menos, você deve usar as ferramentas de - cliente de 32 bits. - - - - - Windows: + Windows: - Execute o arquivo executável, p.ex.: - hpccsystems-clienttools_community-7.X.X-XWindows-i386.exe em sua - máquina. Siga os comandos para concluir a instalação. + Execute o arquivo executável, p.ex.: + hpccsystems-clienttools_community-7.X.X-XWindows-i386.exe em sua + máquina. Siga os comandos para concluir a instalação. - Sistemas baseados em RPM - (CentOS/RedHat): + Sistemas baseados em RPM (CentOS/RedHat): + - Será fornecido um pacote de instalação do RPM. Instale o RPM - com o comutador -Uvh, o “U” ou “atualização” fará uma atualização se - uma versão prévia já tiver sido instalada. sudo rpm -Uvh <rpm file name> + Será fornecido um pacote de instalação do RPM. Instale o RPM com o + comutador -Uvh, o “U” ou “atualização” fará uma atualização se uma + versão prévia já tiver sido instalada. sudo rpm -Uvh <rpm file name> - Sistemas baseados em Debian - (Ubuntu): + Sistemas baseados em Debian + (Ubuntu): - Um pacote Debian é disponibilizado para instalações Ubuntu. - Para instalar o pacote, use: + Um pacote Debian é disponibilizado para instalações Ubuntu. Para + instalar o pacote, use: - sudo dpkg -i <deb filename> + sudo dpkg -i <deb filename> - Após instalar o pacote, execute o comando a seguir para - "arrumar" as dependências: + Após instalar o pacote, execute o comando a seguir para "arrumar" + as dependências: - sudo apt-get install -f + sudo apt-get install -f - Mac OSX: + Mac OSX: - Execute o arquivo de instalação, p.ex.: - hpccsystems-clienttools_community-7.X.X-XDarwin-x86_64.dmg. Siga os - comandos para concluir a instalação. - - + Execute o arquivo de instalação, p.ex.: + hpccsystems-clienttools_community-7.X.X-XDarwin-x86_64.dmg. Siga os + comandos para concluir a instalação. diff --git a/docs/PT_BR/HPCCClientTools/CT_Mods/CT_Overview_withoutIDE.xml b/docs/PT_BR/HPCCClientTools/CT_Mods/CT_Overview_withoutIDE.xml index dc76b2f9915..0e8c4e2300e 100644 --- a/docs/PT_BR/HPCCClientTools/CT_Mods/CT_Overview_withoutIDE.xml +++ b/docs/PT_BR/HPCCClientTools/CT_Mods/CT_Overview_withoutIDE.xml @@ -126,55 +126,40 @@ Instale o software de ferramentas do cliente em sua máquina. + + - - - Nota: - - - Para grandes workunits ECL, o compilador de 32 bits - (eclcc) pode ficar sem memória mais rapidamente do que a - versão de 64 bits. Portanto, na maioria dos casos, você deve - instalar a versão de 64 bits. No entanto, para máquinas com - memória de 4 GB ou menos, você deve usar as ferramentas de - cliente de 32 bits. - - - - - Windows: + Windows: - Execute o arquivo executável, p.ex.: - hpccsystems-clienttools_community-7.X.X-XWindows-i386.exe em sua - máquina. Siga os comandos para concluir a instalação. + Execute o arquivo executável, p.ex.: + hpccsystems-clienttools_community-7.X.X-XWindows-i386.exe em sua + máquina. Siga os comandos para concluir a instalação. - Sistemas baseados em RPM - (CentOS/RedHat): + Sistemas baseados em RPM (CentOS/RedHat): + - Será fornecido um pacote de instalação do RPM. Instale o RPM - com o comutador -Uvh, o “U” ou “atualização” fará uma atualização se - uma versão prévia já tiver sido instalada. sudo rpm -Uvh <rpm file name> + Será fornecido um pacote de instalação do RPM. Instale o RPM com o + comutador -Uvh, o “U” ou “atualização” fará uma atualização se uma + versão prévia já tiver sido instalada. sudo rpm -Uvh <rpm file name> - Sistemas baseados em Debian - (Ubuntu): + Sistemas baseados em Debian + (Ubuntu): - Um pacote Debian é disponibilizado para instalações Ubuntu. - Para instalar o pacote, use: + Um pacote Debian é disponibilizado para instalações Ubuntu. Para + instalar o pacote, use: - sudo dpkg -i <deb filename> + sudo dpkg -i <deb filename> - Após instalar o pacote, execute o comando a seguir para - "arrumar" as dependências: + Após instalar o pacote, execute o comando a seguir para "arrumar" + as dependências: - sudo apt-get install -f + sudo apt-get install -f - Mac OSX: + Mac OSX: - Abra o arquivo de imagem de disco da Apple (.dmg) e execute o - pacote de instalação (.pkg). Siga os comandos para concluir a - instalação. - - + Abra o arquivo de imagem de disco da Apple (.dmg) e execute o + pacote de instalação (.pkg). Siga os comandos para concluir a + instalação. diff --git a/docs/PT_BR/HPCCClientTools/CT_Mods/ECLCC.xml b/docs/PT_BR/HPCCClientTools/CT_Mods/ECLCC.xml index bbfcd6021b5..de59480df0c 100644 --- a/docs/PT_BR/HPCCClientTools/CT_Mods/ECLCC.xml +++ b/docs/PT_BR/HPCCClientTools/CT_Mods/ECLCC.xml @@ -180,15 +180,6 @@ listagem do arquivo a serem adicionados - - -checkDirty - - Faz com que o eclcc gere um aviso para qualquer - atributo que tenha sido modificado (conforme o resultado do - status git) O uso dessa função exige que o git esteja - instalado e disponível no caminho. - - -foption[=value] @@ -346,13 +337,6 @@ Passa a opção xx para o compilador c++ - - -Dname=value - - Substitui a definição do “nome” global do - atributo - - -Wl,xx @@ -378,6 +362,12 @@ -g) + + -save-temps + + Não exclui arquivos intermediários + + -shared @@ -390,6 +380,50 @@ + Opções de resolução de arquivos: + + + + + + + + + + + + -dfs=ip + + Use IP especificado para a resolução do nome do arquivo + DFS + + + + -scope=prefix + + Use o prefixo de escopo especificado na resolução do + nome do arquivo DFS + + + + -user=id + + Use o nome de usuário especificado na resolução do nome + do arquivo + + + + -password=xxx + + Use a senha especificada na resolução do nome do + arquivo DFS (em branco para solicitar) + + + + + + + Outras Opções: @@ -401,6 +435,12 @@ + + -aoption[=value] + + Configura opções da aplicação + + --allow=str @@ -414,6 +454,20 @@ recursos + + -allowsigned + + Permite somente acesso a funcionalidade com código + assinado + + + + -fisComplexCompile + + Impede tentativas de compilação como um processo filho + quando uma consulta é complexa + + -b @@ -429,6 +483,28 @@ arquivos + + -checkDirty + + Faz com que o eclcc gere um aviso para qualquer + atributo que tenha sido modificado (conforme o resultado do + status git) O uso dessa função exige que o git esteja + instalado e disponível no caminho. + + + + --component + + Defina o nome do componente em nome do qual isso está + sendo executado + + + + -Dname=value + + Sobrepõem a definição global do atributo 'name' + + --deny=all @@ -464,12 +540,28 @@ Executa testes internos + + + + + + + Other options (continued): + + + + + + + + + - --legacy + --leakcheck - Usa semântica de importação antiga - (descontinuada) + Limpe a memória, pois está verificando se há vazamentos + de memória @@ -479,6 +571,20 @@ stdout (formato XML) + + -legacyimport + + Utiliza semântica legado de importação + (descontinuado) + + + + -legacywhen + + Use semântica de legado quando/há efeitos colaterais + (descontinuado) + + --logfile <file> @@ -491,6 +597,67 @@ Define o nível de detalhe no arquivo de log + + --maxErrors=<n> + + Número limite de erros, abortando no nth (padrão = + 5) + + + + --metacache=x + + Especifique o diretório para armazenar as informações + meta distribuídas do indexador eclcc. Para desabilitar o + indexador, defina um valor vazio usando '--metacache='. Se + omitido, o local padrão é .eclcc/metacache. + + + + --nologfile + + Não registra nenhum log + + + + --nogpg + + Não execute o gpg para verificar assinaturas em códigos + assinados + + + + --nosourcepath + + Compila como se a fonte fosse de stdin + + + + --nostdinc + + Não inclui o diretório atual em -I + + + + -pch + + Generate precompiled header for eclinclude4.hpp + + + + -P <path> + + Especifica o caminho para os arquivos de saída (somente + com a opção -b) + + + + -showpaths + + Exibe informações sobre os caminhos de busca eclcc + utilizando + + -specs <file> @@ -512,6 +679,16 @@ da compilação + + -wxxxx=level + + Configura a severidade para um warning code em + particular ou categoria. Todas as opções para os níveis + são: all | ignore | log | warning | error | fail + -wall aplica a severidade + padrão para todos os alertas + + --version diff --git a/docs/PT_BR/HPCCSystemAdmin/SA-Mods/SecMgrMod.xml b/docs/PT_BR/HPCCSystemAdmin/SA-Mods/SecMgrMod.xml index 3f9a709baf6..30e0e42555e 100644 --- a/docs/PT_BR/HPCCSystemAdmin/SA-Mods/SecMgrMod.xml +++ b/docs/PT_BR/HPCCSystemAdmin/SA-Mods/SecMgrMod.xml @@ -9,7 +9,7 @@ Plugin) oferece um mecanismo de criação e implementação de Security Manager Plugins customizados. - + Desenvolvimento do Plugin Um Security Manager Plugin customizado consiste de um arquivo de @@ -47,9 +47,9 @@ Definição de Buildset O plugin - declara a si mesmo como um componente do Security Manager Plugin do HPCC , - assim como declara a localização dos arquivos do plugin e o esquema de - definição de configuração. + declara a si mesmo como um componente do Security Manager Plugin do HPCC + Systems, assim como declara a localização dos arquivos do plugin e o + esquema de definição de configuração. EXEMPLO: @@ -160,8 +160,6 @@ </xs:element> </xs:schema> - - genenvrules.conf – (opcional) Este arquivo permite que o plugin adicione a si mesmo na lista "do_not(automatically)_generate". Embora seja um arquivo opcional, ele é diff --git a/docs/PT_BR/HPCCSystemAdmin/SA-Mods/SecMgrModConfDeploy.xml b/docs/PT_BR/HPCCSystemAdmin/SA-Mods/SecMgrModConfDeploy.xml index 9f65d004678..f8a5d09de83 100644 --- a/docs/PT_BR/HPCCSystemAdmin/SA-Mods/SecMgrModConfDeploy.xml +++ b/docs/PT_BR/HPCCSystemAdmin/SA-Mods/SecMgrModConfDeploy.xml @@ -8,7 +8,8 @@ As seções a seguir detalham o processo de configuração de seu HPCC Systems para usar o Plugin do Gerenciador de Segurança. - + Como Configurar um Plugin do Gerenciador de Segurança Após ter sido instalado, o plugin pode ser configurado na plataforma diff --git a/docs/PT_BR/HPCCSystemAdmin/SA-Mods/WUTool.xml b/docs/PT_BR/HPCCSystemAdmin/SA-Mods/WUTool.xml index 0ffc40b1f86..95d78fcdf5d 100644 --- a/docs/PT_BR/HPCCSystemAdmin/SA-Mods/WUTool.xml +++ b/docs/PT_BR/HPCCSystemAdmin/SA-Mods/WUTool.xml @@ -17,13 +17,13 @@ - + - Actions + Actions @@ -53,6 +53,21 @@ Dump de resultados de uma workunit específica. + + info <workunits> <filter> + + Este comando fornece acesso filtrado a estatísticas e outras + informações de uma workunit.Consulte a tabela seguinte para + obter informações adicionais sobre o parâmetro info. + + + + analisa a <workunit> + + Analisa a workunit para destacar possíveis economias de + custos. + + archive <workunits> @@ -84,7 +99,7 @@ validate Verifique o conteúdo do repositório da workunit quanto a - erros. [FIX=1] will try to repair any issues found. + erros. [FIX=1] irá reparar qualquer ocorrência encontrada. @@ -99,6 +114,221 @@ Inicializa o respositório de uma nova workunit + + + graph <wu> + + Gera uma representação alternativa do graph com detalhes da + execução + + + + activity <wu> + + Quais atividades estão em execução em um determinado + intervalo de tempo (em ordem cronológica)<wu> + [">scope|mintime"] ["<scope|maxtime"] + [threshold=n%] + + + + hotspot <wu> [<activity>] + + Localiza hotspots para workunit (ou uma atividade em + particular) + + + + critical <wu> <activity> + + Quais atividades estão em execução em ordem de + execução + + + + depend <wu> <activity> +<activity> + + Localiza padrões entre duas atividades + + + + depend <wu> ?<activity>:startTime + + Quais depedências consomem um grande % no início da + atividade + + + + help <command> + + Mais ajuda por meio de comando + + + + + + A tabela a seguir fornece mais informações para a utilidade wutool + emitida com o parâmetro action=info: + + + + + + + + + + info + parâmetros + + + + + + info <workunits> <filter> + + Este comando fornece acesso filtrado às estatísticas e outras + informações de uma workunit.O filtro pode incluir os seguintes + elementos (aqueles indicados por * podem ser repetidos): + + + + + + + Quais escopos + correspondem: + + + + + scope[<scope-id>]* + + escope para correspodência + + + + + stype[<scope-type>]* + + escope do tipo de correspondência + + + + id[<id>]* + + o id do escopo para correspondência + + + + + + NOTA: escopo, stype e id não + podem ser especificados no mesmo filtro + + + + depth[n | + low..high] + + intervalo de profundidades para buscar uma + correspondência. + + + + source[global|stats|graph|all]* + + quais fontes dentro da workunit a pesquisar. O padrão são as + melhores fontes para o resto do filtro. + + + + where[<statistickind> | <statistickind> +(=|<|<=|>|>=) value | +<statistickind>=low..high] + + filtrar pela existência de estatística ou intervalo de + valores. + + + + + + Quais escopos estão inclusos nos + resultados: + + + + matched[true|false] + + os escopos correspondentes são retornados? + + + + nested[<depth>|all] + + qual aninhamento de escopos dentro de um escopo + correspondente estão nos resultados (padrão para '0' se + correspondente[true] e 'todos' se correspondente[false]). + + + + includetype[<scope-type>]* + + quais tipos de escopo devem ser incluídos? + + + + + + Qual informação sobre o escopo é + reportado: + + + + properties[statistics|hints| +attributes|scope|all]* + + + + + + statistic[<statistic-kind>|none|all]* + + + + + + attribute[<attribute-name>|none|all]* + + + + + + hint[<hint-name>]* + + + + + + property[<statistic-kind>| +<attribute-name>|<hint-name>]* + + incluir propriedade (categoria é deduzida) + + + + measure[<measure>] + + todas as estatísticas com medidas específicas. + + + + version[<version>] + + versão mínima para retornar + diff --git a/docs/PT_BR/Installing_and_RunningTheHPCCPlatform/Inst-Mods/UserSecurityMaint.xml b/docs/PT_BR/Installing_and_RunningTheHPCCPlatform/Inst-Mods/UserSecurityMaint.xml index 87b13636cd0..90defe664ac 100644 --- a/docs/PT_BR/Installing_and_RunningTheHPCCPlatform/Inst-Mods/UserSecurityMaint.xml +++ b/docs/PT_BR/Installing_and_RunningTheHPCCPlatform/Inst-Mods/UserSecurityMaint.xml @@ -1622,6 +1622,31 @@ Leitura + + + SashaAccess + + Acesso para o serviço WsSasha + + Access + + + + + + Listar Workunits + + Read + + + + + + Archivar Workunits, restaurar Workunits + arquivadas + + Full + diff --git a/docs/PT_BR/Installing_and_RunningTheHPCCPlatform/Inst-Mods/hpcc_ldap.xml b/docs/PT_BR/Installing_and_RunningTheHPCCPlatform/Inst-Mods/hpcc_ldap.xml index d4cb1a63b8b..f4724a7e8fe 100644 --- a/docs/PT_BR/Installing_and_RunningTheHPCCPlatform/Inst-Mods/hpcc_ldap.xml +++ b/docs/PT_BR/Installing_and_RunningTheHPCCPlatform/Inst-Mods/hpcc_ldap.xml @@ -293,6 +293,27 @@ + + Gerente de Segurança de Usuário Único + + O gerenciador de segurança de usuário único é um gerenciador de + segurança especializado que permite que uma combinação de nome de + usuário/senha seja especificada na linha de comando de inicialização do + ESP. Em tempo de execução, quando você tenta acessar qualquer recurso do + ESP que exija autenticação, como o ECL Watch, deve especificar uma + combinação de nome de usuário/senha. + + Um gerenciador de segurança de usuário único pode ser útil para uma + implantação personalizada onde você não deseja configurar um servidor LDAP + inteiro ou criar um arquivo HTPASSWD do Linux, como um ambiente de sala de + aula ou uma Máquina Virtual personalizada do HPCC Systems. + + Veja o documento Security + Manager Plugin Framework para maiores informações sobre + configurações e implantar os plugins Security Manager. + + Utilizando Autenticação LDAP From a94651f378bf7be5da461168317ddb7f8bfbbce9 Mon Sep 17 00:00:00 2001 From: Ken Rowland Date: Wed, 19 Jun 2024 11:12:24 -0400 Subject: [PATCH 081/151] HPCC-32095 Set LDAP default file scope caching option to new caching Set the default flag to true Signed-Off-By: Kenneth Rowland kenneth.rowland@lexisnexisrisk.com --- system/security/LdapSecurity/ldapsecurity.ipp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/security/LdapSecurity/ldapsecurity.ipp b/system/security/LdapSecurity/ldapsecurity.ipp index b7d4b35ae6b..47d3e4cfb81 100644 --- a/system/security/LdapSecurity/ldapsecurity.ipp +++ b/system/security/LdapSecurity/ldapsecurity.ipp @@ -323,7 +323,7 @@ private: static const SecFeatureSet s_safeFeatures = SMF_ALL_FEATURES; static const SecFeatureSet s_implementedFeatures = s_safeFeatures & ~(SMF_RetrieveUserData | SMF_RemoveResources); StringBuffer m_hpccInternalScope; - bool m_useLegacyDefaultFileScopePermissionCaching = true; + bool m_useLegacyDefaultFileScopePermissionCaching = false; public: IMPLEMENT_IINTERFACE From 7fd4dbeb264a64039ba723f0437ae593e004c323 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Wed, 19 Jun 2024 11:27:46 +0100 Subject: [PATCH 082/151] HPCC-32094 Bump vcpkg versions for 9.8.x Switch vcpkg host triplet to match the target triplet (reduces the number of buils items) Signed-off-by: Gordon Smith --- cmake_modules/vcpkg.cmake | 3 + .../configmgrlib/XSDComponentParser.cpp | 12 +- .../configmgrlib/XSDSchemaParser.cpp | 18 +- .../plugins/jwtSecurity/jwtSecurity.cpp | 32 +- vcpkg | 2 +- vcpkg-configuration.json | 2 +- vcpkg_overlays/pcre2/fix-cmake.patch | 323 ------------------ vcpkg_overlays/pcre2/no-static-suffix.patch | 33 -- .../pcre2/pcre2-10.35_fix-uwp.patch | 10 - vcpkg_overlays/pcre2/portfile.cmake | 77 ----- vcpkg_overlays/pcre2/usage | 6 - vcpkg_overlays/pcre2/vcpkg.json | 38 --- 12 files changed, 41 insertions(+), 515 deletions(-) delete mode 100644 vcpkg_overlays/pcre2/fix-cmake.patch delete mode 100644 vcpkg_overlays/pcre2/no-static-suffix.patch delete mode 100644 vcpkg_overlays/pcre2/pcre2-10.35_fix-uwp.patch delete mode 100644 vcpkg_overlays/pcre2/portfile.cmake delete mode 100644 vcpkg_overlays/pcre2/usage delete mode 100644 vcpkg_overlays/pcre2/vcpkg.json diff --git a/cmake_modules/vcpkg.cmake b/cmake_modules/vcpkg.cmake index 1ed1d9988cb..d79e28dff18 100644 --- a/cmake_modules/vcpkg.cmake +++ b/cmake_modules/vcpkg.cmake @@ -9,10 +9,13 @@ set(VCPKG_INSTALL_OPTIONS "--x-abi-tools-use-exact-versions;--downloads-root=${V set(VCPKG_VERBOSE OFF) if(WIN32) + set(VCPKG_HOST_TRIPLET "x64-windows" CACHE STRING "host triplet") set(VCPKG_TARGET_TRIPLET "x64-windows" CACHE STRING "target triplet") elseif(APPLE) + set(VCPKG_HOST_TRIPLET "x64-osx" CACHE STRING "host triplet") set(VCPKG_TARGET_TRIPLET "x64-osx" CACHE STRING "target triplet") elseif(UNIX) + set(VCPKG_HOST_TRIPLET "x64-linux-dynamic" CACHE STRING "host triplet") set(VCPKG_TARGET_TRIPLET "x64-linux-dynamic" CACHE STRING "target triplet") endif() diff --git a/configuration/configmgr/configmgrlib/XSDComponentParser.cpp b/configuration/configmgr/configmgrlib/XSDComponentParser.cpp index 0448eaa980a..7e072d7c304 100644 --- a/configuration/configmgr/configmgrlib/XSDComponentParser.cpp +++ b/configuration/configmgr/configmgrlib/XSDComponentParser.cpp @@ -27,7 +27,8 @@ void XSDComponentParser::parseXSD(const pt::ptree &compTree) { bool foundComponentDef = false; - pt::ptree tree = compTree.get_child("", pt::ptree()); + pt::ptree treeDefault; + pt::ptree tree = compTree.get_child("", treeDefault); // // First time through look for attributeGroups that can be defined and for existence of a sequence element that actually defines the component @@ -49,7 +50,8 @@ void XSDComponentParser::parseXSD(const pt::ptree &compTree) if (foundComponentDef) { - pt::ptree elemTree = tree.get_child("xs:sequence.xs:element", pt::ptree()); + pt::ptree elemTreeDefault; + pt::ptree elemTree = tree.get_child("xs:sequence.xs:element", elemTreeDefault); if (!elemTree.empty()) { std::string elementName = getXSDAttributeValue(elemTree, ".name"); @@ -72,7 +74,8 @@ void XSDComponentParser::parseXSD(const pt::ptree &compTree) // // Parse any attributes, these are located in the xs:complexType section - pt::ptree attributeTree = elemTree.get_child("xs:complexType", pt::ptree()); + pt::ptree attributeTreeDefault; + pt::ptree attributeTree = elemTree.get_child("xs:complexType", attributeTreeDefault); for (auto attrIt = attributeTree.begin(); attrIt != attributeTree.end(); ++attrIt) { // @@ -90,7 +93,8 @@ void XSDComponentParser::parseXSD(const pt::ptree &compTree) // // Now parse the sequence section (these are sub keys for the component) - XSDSchemaParser::parseXSD(elemTree.get_child("xs:complexType.xs:sequence", pt::ptree())); + pt::ptree seqDefault; + XSDSchemaParser::parseXSD(elemTree.get_child("xs:complexType.xs:sequence", seqDefault)); // // If there were other keys that we needed to support, this is where a loop would be added diff --git a/configuration/configmgr/configmgrlib/XSDSchemaParser.cpp b/configuration/configmgr/configmgrlib/XSDSchemaParser.cpp index d1e49fe3271..c9f1ebca4f4 100644 --- a/configuration/configmgr/configmgrlib/XSDSchemaParser.cpp +++ b/configuration/configmgr/configmgrlib/XSDSchemaParser.cpp @@ -171,7 +171,8 @@ void XSDSchemaParser::parseXSD(const pt::ptree &keys) } else if (elemType == "xs:sequence") { - parseXSD(it->second.get_child("", pt::ptree())); + pt::ptree emptyTree; + parseXSD(it->second.get_child("", emptyTree)); } else if (elemType == "xs:element") { @@ -233,7 +234,8 @@ void XSDSchemaParser::parseAttributeGroup(const pt::ptree &attributeTree) std::shared_ptr pXSDValueSetParaser = std::make_shared(pValueSet); std::string groupByName = getXSDAttributeValue(attributeTree, ".hpcc:groupByName", false, ""); pXSDValueSetParaser->setGroupByName(groupByName); - pXSDValueSetParaser->parseXSD(attributeTree.get_child("", pt::ptree())); + pt::ptree emptyTree; + pXSDValueSetParaser->parseXSD(attributeTree.get_child("", emptyTree)); m_pSchemaItem->addSchemaType(pValueSet, groupName); m_pSchemaItem->setProperty("attribute_group_default_overrides", getXSDAttributeValue(attributeTree, ".hpcc:presetValue", false, "")); } @@ -300,7 +302,8 @@ void XSDSchemaParser::parseComplexType(const pt::ptree &typeTree) std::shared_ptr pComplexType = std::make_shared(complexTypeName, "component", m_pSchemaItem); pComplexType->setProperty("itemType", complexTypeName); - pt::ptree childTree = typeTree.get_child("", pt::ptree()); + pt::ptree childTreeDefault; + pt::ptree childTree = typeTree.get_child("", childTreeDefault); if (!childTree.empty()) { std::shared_ptr pXSDParaser = std::make_shared(pComplexType); @@ -317,7 +320,8 @@ void XSDSchemaParser::parseComplexType(const pt::ptree &typeTree) // Just a complexType delimiter, ignore and parse the children else { - parseXSD(typeTree.get_child("", pt::ptree())); + pt::ptree emptyTree; + parseXSD(typeTree.get_child("", emptyTree)); } } @@ -709,7 +713,8 @@ std::shared_ptr XSDSchemaParser::getType(const pt::ptree &typeTree, if (!restriction->second.empty()) { - pt::ptree restrictTree = restriction->second.get_child("", pt::ptree()); + pt::ptree restrictTreeDefault; + pt::ptree restrictTree = restriction->second.get_child("", restrictTreeDefault); if (std::dynamic_pointer_cast(pLimits) != nullptr) { std::shared_ptr pBaseIntLimits = std::dynamic_pointer_cast(pLimits); @@ -905,7 +910,8 @@ std::shared_ptr XSDSchemaParser::getSchemaValue(const pt::ptree &at } else { - std::shared_ptr pType = getType(attr.get_child("xs:simpleType", pt::ptree()), false); + pt::ptree simpleTypeDefault; + std::shared_ptr pType = getType(attr.get_child("xs:simpleType", simpleTypeDefault), false); if (!pType->isValid()) { throw(ParseException("Attribute " + m_pSchemaItem->getProperty("name") + "[@" + attrName + "] does not have a valid type")); diff --git a/system/security/plugins/jwtSecurity/jwtSecurity.cpp b/system/security/plugins/jwtSecurity/jwtSecurity.cpp index 5fcc9464715..7ca7578407f 100644 --- a/system/security/plugins/jwtSecurity/jwtSecurity.cpp +++ b/system/security/plugins/jwtSecurity/jwtSecurity.cpp @@ -376,7 +376,7 @@ class CJwtSecurityManager : implements IDaliLdapConnection, public CBaseSecurity userInfo->setExpirationTime(std::chrono::system_clock::to_time_t(decodedToken.get_expires_at())); userInfo->setRefreshToken(refreshToken); userInfo->setJWTToken(token); - for (auto& e : decodedToken.get_payload_claims()) + for (auto& e : decodedToken.get_payload_json()) { std::string key(e.first); @@ -398,50 +398,50 @@ class CJwtSecurityManager : implements IDaliLdapConnection, public CBaseSecurity if (isPrefixString("AllowWorkunitScope", key) || isPrefixString("DenyWorkunitScope", key)) { // Collect permissions for later batch processing - if (e.second.get_type() == jwt::json::type::string) + if (e.second.is()) { - wuScopePerms.push_back(ScopePermission(key, e.second.as_string())); + wuScopePerms.push_back(ScopePermission(key, e.second.get())); } else { - jwt::claim::set_t valueSet = e.second.as_set(); + auto valueSet = e.second.get(); - for (jwt::claim::set_t::const_iterator x = valueSet.begin(); x != valueSet.end(); x++) + for (auto x = valueSet.begin(); x != valueSet.end(); x++) { - wuScopePerms.push_back(ScopePermission(key, *x)); + wuScopePerms.push_back(ScopePermission(key, x->get())); } } } else if (isPrefixString("AllowFileScope", key) || isPrefixString("DenyFileScope", key)) { // Collect permissions for later batch processing - if (e.second.get_type() == jwt::json::type::string) + if (e.second.is()) { - fileScopePerms.push_back(ScopePermission(key, e.second.as_string())); + fileScopePerms.push_back(ScopePermission(key, e.second.get())); } else { - jwt::claim::set_t valueSet = e.second.as_set(); + auto valueSet = e.second.get(); - for (jwt::claim::set_t::const_iterator x = valueSet.begin(); x != valueSet.end(); x++) + for (auto x = valueSet.begin(); x != valueSet.end(); x++) { - fileScopePerms.push_back(ScopePermission(key, *x)); + fileScopePerms.push_back(ScopePermission(key, x->get())); } } } - else if (e.second.get_type() == jwt::json::type::string) + else if (e.second.is()) { // Feature permission where value is a single string - userInfo->mergeFeaturePerm(key, e.second.as_string()); + userInfo->mergeFeaturePerm(key, e.second.get()); } else { // Feature permission where value is an array of strings - jwt::claim::set_t valueSet = e.second.as_set(); + auto valueSet = e.second.get(); - for (jwt::claim::set_t::const_iterator x = valueSet.begin(); x != valueSet.end(); x++) + for (auto x = valueSet.begin(); x != valueSet.end(); x++) { - userInfo->mergeFeaturePerm(key, *x); + userInfo->mergeFeaturePerm(key, x->get()); } } } diff --git a/vcpkg b/vcpkg index ddf0c6df807..2415a95badf 160000 --- a/vcpkg +++ b/vcpkg @@ -1 +1 @@ -Subproject commit ddf0c6df807a454c74f33d7ba75037314bf9a79c +Subproject commit 2415a95badf265e13fdbdb2a709929bf56b0aaa2 diff --git a/vcpkg-configuration.json b/vcpkg-configuration.json index 4c5c1289b8b..7ed29e6c79b 100644 --- a/vcpkg-configuration.json +++ b/vcpkg-configuration.json @@ -2,7 +2,7 @@ "default-registry": { "kind": "git", "repository": "https://github.com/microsoft/vcpkg", - "baseline": "fba75d09065fcc76a25dcf386b1d00d33f5175af" + "baseline": "f7423ee180c4b7f40d43402c2feb3859161ef625" }, "registries": [], "overlay-ports": [ diff --git a/vcpkg_overlays/pcre2/fix-cmake.patch b/vcpkg_overlays/pcre2/fix-cmake.patch deleted file mode 100644 index 088be8425a6..00000000000 --- a/vcpkg_overlays/pcre2/fix-cmake.patch +++ /dev/null @@ -1,323 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index 3c915d9..d5963f8 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -143,6 +143,7 @@ INCLUDE(CheckFunctionExists) - INCLUDE(CheckSymbolExists) - INCLUDE(CheckIncludeFile) - INCLUDE(CheckTypeSize) -+INCLUDE(CMakePackageConfigHelpers) - INCLUDE(GNUInstallDirs) # for CMAKE_INSTALL_LIBDIR - - CHECK_INCLUDE_FILE(dirent.h HAVE_DIRENT_H) -@@ -696,7 +697,9 @@ IF(PCRE2_BUILD_PCRE2_8) - VERSION ${LIBPCRE2_8_VERSION} - SOVERSION ${LIBPCRE2_8_SOVERSION}) - TARGET_COMPILE_DEFINITIONS(pcre2-8-static PUBLIC PCRE2_STATIC) -- TARGET_INCLUDE_DIRECTORIES(pcre2-8-static PUBLIC ${PROJECT_BINARY_DIR}) -+ TARGET_INCLUDE_DIRECTORIES(pcre2-8-static PUBLIC -+ $ -+ $) - IF(REQUIRE_PTHREAD) - TARGET_LINK_LIBRARIES(pcre2-8-static Threads::Threads) - ENDIF(REQUIRE_PTHREAD) -@@ -709,7 +712,9 @@ IF(PCRE2_BUILD_PCRE2_8) - VERSION ${LIBPCRE2_POSIX_VERSION} - SOVERSION ${LIBPCRE2_POSIX_SOVERSION}) - TARGET_LINK_LIBRARIES(pcre2-posix-static pcre2-8-static) -- TARGET_INCLUDE_DIRECTORIES(pcre2-posix-static PUBLIC ${PROJECT_SOURCE_DIR}/src) -+ TARGET_INCLUDE_DIRECTORIES(pcre2-posix-static PUBLIC -+ $ -+ $) - set(targets ${targets} pcre2-posix-static) - - IF(MSVC) -@@ -726,7 +731,9 @@ IF(PCRE2_BUILD_PCRE2_8) - - IF(BUILD_SHARED_LIBS) - ADD_LIBRARY(pcre2-8-shared SHARED ${PCRE2_HEADERS} ${PCRE2_SOURCES} ${PROJECT_BINARY_DIR}/config.h) -- TARGET_INCLUDE_DIRECTORIES(pcre2-8-shared PUBLIC ${PROJECT_BINARY_DIR}) -+ TARGET_INCLUDE_DIRECTORIES(pcre2-8-shared PUBLIC -+ $ -+ $) - SET_TARGET_PROPERTIES(pcre2-8-shared PROPERTIES - COMPILE_DEFINITIONS PCRE2_CODE_UNIT_WIDTH=8 - MACHO_COMPATIBILITY_VERSION "${LIBPCRE2_8_MACHO_COMPATIBILITY_VERSION}" -@@ -740,7 +747,9 @@ IF(PCRE2_BUILD_PCRE2_8) - set(targets ${targets} pcre2-8-shared) - - ADD_LIBRARY(pcre2-posix-shared SHARED ${PCRE2POSIX_HEADERS} ${PCRE2POSIX_SOURCES}) -- TARGET_INCLUDE_DIRECTORIES(pcre2-posix-shared PUBLIC ${PROJECT_SOURCE_DIR}/src) -+ TARGET_INCLUDE_DIRECTORIES(pcre2-posix-shared PUBLIC -+ $ -+ $) - SET_TARGET_PROPERTIES(pcre2-posix-shared PROPERTIES - COMPILE_DEFINITIONS PCRE2_CODE_UNIT_WIDTH=8 - MACHO_COMPATIBILITY_VERSION "${LIBPCRE2_POSIX_MACHO_COMPATIBILITY_VERSION}" -@@ -779,7 +788,9 @@ ENDIF(PCRE2_BUILD_PCRE2_8) - IF(PCRE2_BUILD_PCRE2_16) - IF(BUILD_STATIC_LIBS) - ADD_LIBRARY(pcre2-16-static STATIC ${PCRE2_HEADERS} ${PCRE2_SOURCES} ${PROJECT_BINARY_DIR}/config.h) -- TARGET_INCLUDE_DIRECTORIES(pcre2-16-static PUBLIC ${PROJECT_BINARY_DIR}) -+ TARGET_INCLUDE_DIRECTORIES(pcre2-16-static PUBLIC -+ $ -+ $) - SET_TARGET_PROPERTIES(pcre2-16-static PROPERTIES UNITY_BUILD OFF - COMPILE_DEFINITIONS PCRE2_CODE_UNIT_WIDTH=16 - MACHO_COMPATIBILITY_VERSION "${LIBPCRE2_32_MACHO_COMPATIBILITY_VERSION}" -@@ -804,7 +815,9 @@ IF(PCRE2_BUILD_PCRE2_16) - - IF(BUILD_SHARED_LIBS) - ADD_LIBRARY(pcre2-16-shared SHARED ${PCRE2_HEADERS} ${PCRE2_SOURCES} ${PROJECT_BINARY_DIR}/config.h) -- TARGET_INCLUDE_DIRECTORIES(pcre2-16-shared PUBLIC ${PROJECT_BINARY_DIR}) -+ TARGET_INCLUDE_DIRECTORIES(pcre2-16-shared PUBLIC -+ $ -+ $) - SET_TARGET_PROPERTIES(pcre2-16-shared PROPERTIES UNITY_BUILD OFF - COMPILE_DEFINITIONS PCRE2_CODE_UNIT_WIDTH=16 - MACHO_COMPATIBILITY_VERSION "${LIBPCRE2_32_MACHO_COMPATIBILITY_VERSION}" -@@ -841,7 +854,9 @@ ENDIF(PCRE2_BUILD_PCRE2_16) - IF(PCRE2_BUILD_PCRE2_32) - IF(BUILD_STATIC_LIBS) - ADD_LIBRARY(pcre2-32-static STATIC ${PCRE2_HEADERS} ${PCRE2_SOURCES} ${PROJECT_BINARY_DIR}/config.h) -- TARGET_INCLUDE_DIRECTORIES(pcre2-32-static PUBLIC ${PROJECT_BINARY_DIR}) -+ TARGET_INCLUDE_DIRECTORIES(pcre2-32-static PUBLIC -+ $ -+ $) - SET_TARGET_PROPERTIES(pcre2-32-static PROPERTIES UNITY_BUILD OFF - COMPILE_DEFINITIONS PCRE2_CODE_UNIT_WIDTH=32 - MACHO_COMPATIBILITY_VERSION "${LIBPCRE2_32_MACHO_COMPATIBILITY_VERSION}" -@@ -866,7 +881,9 @@ IF(PCRE2_BUILD_PCRE2_32) - - IF(BUILD_SHARED_LIBS) - ADD_LIBRARY(pcre2-32-shared SHARED ${PCRE2_HEADERS} ${PCRE2_SOURCES} ${PROJECT_BINARY_DIR}/config.h) -- TARGET_INCLUDE_DIRECTORIES(pcre2-32-shared PUBLIC ${PROJECT_BINARY_DIR}) -+ TARGET_INCLUDE_DIRECTORIES(pcre2-32-shared PUBLIC -+ $ -+ $) - SET_TARGET_PROPERTIES(pcre2-32-shared PROPERTIES UNITY_BUILD OFF - COMPILE_DEFINITIONS PCRE2_CODE_UNIT_WIDTH=32 - MACHO_COMPATIBILITY_VERSION "${LIBPCRE2_32_MACHO_COMPATIBILITY_VERSION}" -@@ -1107,9 +1124,13 @@ ENDIF(PCRE2_BUILD_TESTS) - SET(CMAKE_INSTALL_ALWAYS 1) - - INSTALL(TARGETS ${targets} -- RUNTIME DESTINATION bin -+ EXPORT pcre2-targets -+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) -+INSTALL(EXPORT pcre2-targets -+ DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/pcre2 -+ NAMESPACE pcre2::) - INSTALL(FILES ${pkg_config_files} DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) - INSTALL(FILES "${CMAKE_CURRENT_BINARY_DIR}/pcre2-config" - DESTINATION bin -@@ -1121,11 +1142,12 @@ INSTALL(FILES ${PCRE2_HEADERS} ${PCRE2POSIX_HEADERS} DESTINATION include) - # CMake config files. - set(PCRE2_CONFIG_IN ${CMAKE_CURRENT_SOURCE_DIR}/cmake/pcre2-config.cmake.in) - set(PCRE2_CONFIG_OUT ${CMAKE_CURRENT_BINARY_DIR}/cmake/pcre2-config.cmake) --configure_file(${PCRE2_CONFIG_IN} ${PCRE2_CONFIG_OUT} @ONLY) --set(PCRE2_CONFIG_VERSION_IN ${CMAKE_CURRENT_SOURCE_DIR}/cmake/pcre2-config-version.cmake.in) -+configure_package_config_file(${PCRE2_CONFIG_IN} ${PCRE2_CONFIG_OUT} INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/pcre2) - set(PCRE2_CONFIG_VERSION_OUT ${CMAKE_CURRENT_BINARY_DIR}/cmake/pcre2-config-version.cmake) --configure_file(${PCRE2_CONFIG_VERSION_IN} ${PCRE2_CONFIG_VERSION_OUT} @ONLY) --install(FILES ${PCRE2_CONFIG_OUT} ${PCRE2_CONFIG_VERSION_OUT} DESTINATION cmake) -+write_basic_package_version_file(${PCRE2_CONFIG_VERSION_OUT} -+ VERSION ${PCRE2_MAJOR}.${PCRE2_MINOR}.0 -+ COMPATIBILITY SameMajorVersion) -+install(FILES ${PCRE2_CONFIG_OUT} ${PCRE2_CONFIG_VERSION_OUT} DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/pcre2) - - FILE(GLOB html ${PROJECT_SOURCE_DIR}/doc/html/*.html) - FILE(GLOB man1 ${PROJECT_SOURCE_DIR}/doc/*.1) -diff --git a/cmake/pcre2-config-version.cmake.in b/cmake/pcre2-config-version.cmake.in -index dac149e..e69de29 100644 ---- a/cmake/pcre2-config-version.cmake.in -+++ b/cmake/pcre2-config-version.cmake.in -@@ -1,15 +0,0 @@ --set(PACKAGE_VERSION_MAJOR @PCRE2_MAJOR@) --set(PACKAGE_VERSION_MINOR @PCRE2_MINOR@) --set(PACKAGE_VERSION_PATCH 0) --set(PACKAGE_VERSION @PCRE2_MAJOR@.@PCRE2_MINOR@.0) -- --# Check whether the requested PACKAGE_FIND_VERSION is compatible --if(PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION OR -- PACKAGE_VERSION_MAJOR GREATER PACKAGE_FIND_VERSION_MAJOR) -- set(PACKAGE_VERSION_COMPATIBLE FALSE) --else() -- set(PACKAGE_VERSION_COMPATIBLE TRUE) -- if(PACKAGE_VERSION VERSION_EQUAL PACKAGE_FIND_VERSION) -- set(PACKAGE_VERSION_EXACT TRUE) -- endif() --endif() -diff --git a/cmake/pcre2-config.cmake.in b/cmake/pcre2-config.cmake.in -index 12f3a35..159669b 100644 ---- a/cmake/pcre2-config.cmake.in -+++ b/cmake/pcre2-config.cmake.in -@@ -5,11 +5,17 @@ - # - # Static vs. shared - # ----------------- --# To make use of the static library instead of the shared one, one needs -+# To force using the static library instead of the shared one, one needs - # to set the variable PCRE2_USE_STATIC_LIBS to ON before calling find_package. -+# If the variable is not set, the static library will be used if only that has -+# been built, otherwise the shared library will be used. -+# -+# The following components are supported: 8BIT, 16BIT, 32BIT and POSIX. -+# They used to be required but not anymore; all available targets will -+# be defined regardless of the requested components. - # Example: - # set(PCRE2_USE_STATIC_LIBS ON) --# find_package(PCRE2 CONFIG COMPONENTS 8BIT) -+# find_package(PCRE2 CONFIG) - # - # This will define the following variables: - # -@@ -23,70 +29,42 @@ - # PCRE2::32BIT - The 32 bit PCRE2 library. - # PCRE2::POSIX - The POSIX PCRE2 library. - --set(PCRE2_NON_STANDARD_LIB_PREFIX @NON_STANDARD_LIB_PREFIX@) --set(PCRE2_NON_STANDARD_LIB_SUFFIX @NON_STANDARD_LIB_SUFFIX@) --set(PCRE2_8BIT_NAME pcre2-8) --set(PCRE2_16BIT_NAME pcre2-16) --set(PCRE2_32BIT_NAME pcre2-32) --set(PCRE2_POSIX_NAME pcre2-posix) --find_path(PCRE2_INCLUDE_DIR NAMES pcre2.h DOC "PCRE2 include directory") --if (PCRE2_USE_STATIC_LIBS) -- if (MSVC) -- set(PCRE2_8BIT_NAME pcre2-8-static) -- set(PCRE2_16BIT_NAME pcre2-16-static) -- set(PCRE2_32BIT_NAME pcre2-32-static) -- set(PCRE2_POSIX_NAME pcre2-posix-static) -- endif () -+@PACKAGE_INIT@ - -- set(PCRE2_PREFIX ${CMAKE_STATIC_LIBRARY_PREFIX}) -- set(PCRE2_SUFFIX ${CMAKE_STATIC_LIBRARY_SUFFIX}) --else () -- set(PCRE2_PREFIX ${CMAKE_SHARED_LIBRARY_PREFIX}) -- if (MINGW AND PCRE2_NON_STANDARD_LIB_PREFIX) -- set(PCRE2_PREFIX "") -- endif () -+include(CMakeFindDependencyMacro) -+if("@REQUIRE_PTHREAD@") # REQUIRE_PTHREAD -+ find_dependency(Threads) -+endif() - -- set(PCRE2_SUFFIX ${CMAKE_SHARED_LIBRARY_SUFFIX}) -- if (MINGW AND PCRE2_NON_STANDARD_LIB_SUFFIX) -- set(PCRE2_SUFFIX "-0.dll") -- endif () --endif () --find_library(PCRE2_8BIT_LIBRARY NAMES ${PCRE2_PREFIX}${PCRE2_8BIT_NAME}${PCRE2_SUFFIX} ${PCRE2_PREFIX}${PCRE2_8BIT_NAME}d${PCRE2_SUFFIX} DOC "8 bit PCRE2 library") --find_library(PCRE2_16BIT_LIBRARY NAMES ${PCRE2_PREFIX}${PCRE2_16BIT_NAME}${PCRE2_SUFFIX} ${PCRE2_PREFIX}${PCRE2_16BIT_NAME}d${PCRE2_SUFFIX} DOC "16 bit PCRE2 library") --find_library(PCRE2_32BIT_LIBRARY NAMES ${PCRE2_PREFIX}${PCRE2_32BIT_NAME}${PCRE2_SUFFIX} ${PCRE2_PREFIX}${PCRE2_32BIT_NAME}d${PCRE2_SUFFIX} DOC "32 bit PCRE2 library") --find_library(PCRE2_POSIX_LIBRARY NAMES ${PCRE2_PREFIX}${PCRE2_POSIX_NAME}${PCRE2_SUFFIX} ${PCRE2_PREFIX}${PCRE2_POSIX_NAME}d${PCRE2_SUFFIX} DOC "8 bit POSIX PCRE2 library") --unset(PCRE2_NON_STANDARD_LIB_PREFIX) --unset(PCRE2_NON_STANDARD_LIB_SUFFIX) --unset(PCRE2_8BIT_NAME) --unset(PCRE2_16BIT_NAME) --unset(PCRE2_32BIT_NAME) --unset(PCRE2_POSIX_NAME) -+include("${CMAKE_CURRENT_LIST_DIR}/pcre2-targets.cmake") - - # Set version --if (PCRE2_INCLUDE_DIR) -- set(PCRE2_VERSION "@PCRE2_MAJOR@.@PCRE2_MINOR@.0") --endif () -+set(PCRE2_VERSION "@PCRE2_MAJOR@.@PCRE2_MINOR@.0") - --# Which components have been found. --if (PCRE2_8BIT_LIBRARY) -- set(PCRE2_8BIT_FOUND TRUE) --endif () --if (PCRE2_16BIT_LIBRARY) -- set(PCRE2_16BIT_FOUND TRUE) --endif () --if (PCRE2_32BIT_LIBRARY) -- set(PCRE2_32BIT_FOUND TRUE) --endif () --if (PCRE2_POSIX_LIBRARY) -- set(PCRE2_POSIX_FOUND TRUE) --endif () -- --# Check if at least one component has been specified. --list(LENGTH PCRE2_FIND_COMPONENTS PCRE2_NCOMPONENTS) --if (PCRE2_NCOMPONENTS LESS 1) -- message(FATAL_ERROR "No components have been specified. This is not allowed. Please, specify at least one component.") --endif () --unset(PCRE2_NCOMPONENTS) -+# Chooses the linkage of the library to expose in the -+# unsuffixed edition of the target. -+macro(_pcre2_add_component_target component target) -+ # If the static library exists and either PCRE2_USE_STATIC_LIBS -+ # is defined, or the dynamic library does not exist, use the static library. -+ if(NOT TARGET PCRE2::${component}) -+ if(TARGET pcre2::pcre2-${target}-static AND (PCRE2_USE_STATIC_LIBS OR NOT TARGET pcre2::pcre2-${target}-shared)) -+ add_library(PCRE2::${component} ALIAS pcre2::pcre2-${target}-static) -+ set(PCRE2_${component}_FOUND TRUE) -+ # Otherwise use the dynamic library if it exists. -+ elseif(TARGET pcre2::pcre2-${target}-shared AND NOT PCRE2_USE_STATIC_LIBS) -+ add_library(PCRE2::${component} ALIAS pcre2::pcre2-${target}-shared) -+ set(PCRE2_${component}_FOUND TRUE) -+ endif() -+ if(PCRE2_${component}_FOUND) -+ get_target_property(PCRE2_${component}_LIBRARY PCRE2::${component} IMPORTED_LOCATION) -+ set(PCRE2_LIBRARIES ${PCRE2_LIBRARIES} ${PCRE2_${component}_LIBRARY}) -+ endif() -+ endif() -+endmacro() -+_pcre2_add_component_target(8BIT 8) -+_pcre2_add_component_target(16BIT 16) -+_pcre2_add_component_target(32BIT 32) -+_pcre2_add_component_target(POSIX posix) - - # When POSIX component has been specified make sure that also 8BIT component is specified. - set(PCRE2_8BIT_COMPONENT FALSE) -@@ -105,42 +83,5 @@ endif() - unset(PCRE2_8BIT_COMPONENT) - unset(PCRE2_POSIX_COMPONENT) - --include(FindPackageHandleStandardArgs) --set(${CMAKE_FIND_PACKAGE_NAME}_CONFIG "${CMAKE_CURRENT_LIST_FILE}") --find_package_handle_standard_args(PCRE2 -- FOUND_VAR PCRE2_FOUND -- REQUIRED_VARS PCRE2_INCLUDE_DIR -- HANDLE_COMPONENTS -- VERSION_VAR PCRE2_VERSION -- CONFIG_MODE --) -- --set(PCRE2_LIBRARIES) --if (PCRE2_FOUND) -- foreach(component ${PCRE2_FIND_COMPONENTS}) -- if (PCRE2_USE_STATIC_LIBS) -- add_library(PCRE2::${component} STATIC IMPORTED) -- target_compile_definitions(PCRE2::${component} INTERFACE PCRE2_STATIC) -- else () -- add_library(PCRE2::${component} SHARED IMPORTED) -- endif () -- set_target_properties(PCRE2::${component} PROPERTIES -- IMPORTED_LOCATION "${PCRE2_${component}_LIBRARY}" -- IMPORTED_IMPLIB "${PCRE2_${component}_LIBRARY}" -- INTERFACE_INCLUDE_DIRECTORIES "${PCRE2_INCLUDE_DIR}" -- ) -- if (component STREQUAL "POSIX") -- set_target_properties(PCRE2::${component} PROPERTIES -- INTERFACE_LINK_LIBRARIES "PCRE2::8BIT" -- LINK_LIBRARIES "PCRE2::8BIT" -- ) -- endif () -- -- set(PCRE2_LIBRARIES ${PCRE2_LIBRARIES} ${PCRE2_${component}_LIBRARY}) -- mark_as_advanced(PCRE2_${component}_LIBRARY) -- endforeach() --endif () -- --mark_as_advanced( -- PCRE2_INCLUDE_DIR --) -+# Check for required components. -+check_required_components("PCRE2") diff --git a/vcpkg_overlays/pcre2/no-static-suffix.patch b/vcpkg_overlays/pcre2/no-static-suffix.patch deleted file mode 100644 index 23cbc1ca224..00000000000 --- a/vcpkg_overlays/pcre2/no-static-suffix.patch +++ /dev/null @@ -1,33 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index 2815dbb..3c915d9 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -713,8 +713,8 @@ IF(PCRE2_BUILD_PCRE2_8) - set(targets ${targets} pcre2-posix-static) - - IF(MSVC) -- SET_TARGET_PROPERTIES(pcre2-8-static PROPERTIES OUTPUT_NAME pcre2-8-static) -- SET_TARGET_PROPERTIES(pcre2-posix-static PROPERTIES OUTPUT_NAME pcre2-posix-static) -+ SET_TARGET_PROPERTIES(pcre2-8-static PROPERTIES OUTPUT_NAME pcre2-8) -+ SET_TARGET_PROPERTIES(pcre2-posix-static PROPERTIES OUTPUT_NAME pcre2-posix) - ELSE(MSVC) - SET_TARGET_PROPERTIES(pcre2-8-static PROPERTIES OUTPUT_NAME pcre2-8) - SET_TARGET_PROPERTIES(pcre2-posix-static PROPERTIES OUTPUT_NAME pcre2-posix) -@@ -793,7 +793,7 @@ IF(PCRE2_BUILD_PCRE2_16) - set(targets ${targets} pcre2-16-static) - - IF(MSVC) -- SET_TARGET_PROPERTIES(pcre2-16-static PROPERTIES OUTPUT_NAME pcre2-16-static) -+ SET_TARGET_PROPERTIES(pcre2-16-static PROPERTIES OUTPUT_NAME pcre2-16) - ELSE(MSVC) - SET_TARGET_PROPERTIES(pcre2-16-static PROPERTIES OUTPUT_NAME pcre2-16) - ENDIF(MSVC) -@@ -855,7 +855,7 @@ IF(PCRE2_BUILD_PCRE2_32) - set(targets ${targets} pcre2-32-static) - - IF(MSVC) -- SET_TARGET_PROPERTIES(pcre2-32-static PROPERTIES OUTPUT_NAME pcre2-32-static) -+ SET_TARGET_PROPERTIES(pcre2-32-static PROPERTIES OUTPUT_NAME pcre2-32) - ELSE(MSVC) - SET_TARGET_PROPERTIES(pcre2-32-static PROPERTIES OUTPUT_NAME pcre2-32) - ENDIF(MSVC) diff --git a/vcpkg_overlays/pcre2/pcre2-10.35_fix-uwp.patch b/vcpkg_overlays/pcre2/pcre2-10.35_fix-uwp.patch deleted file mode 100644 index 476dde0f6a4..00000000000 --- a/vcpkg_overlays/pcre2/pcre2-10.35_fix-uwp.patch +++ /dev/null @@ -1,10 +0,0 @@ ---- a/CMakeLists.txt 2020-05-09 16:43:10.000000000 +0200 -+++ b/CMakeLists.txt 2020-06-03 20:57:17.026182500 +0200 -@@ -619,6 +619,7 @@ - - IF(MSVC) - ADD_DEFINITIONS(-D_CRT_SECURE_NO_DEPRECATE -D_CRT_SECURE_NO_WARNINGS) -+ add_compile_options(/wd4146) - ENDIF(MSVC) - - SET(CMAKE_INCLUDE_CURRENT_DIR 1) diff --git a/vcpkg_overlays/pcre2/portfile.cmake b/vcpkg_overlays/pcre2/portfile.cmake deleted file mode 100644 index 9d2f32ca14a..00000000000 --- a/vcpkg_overlays/pcre2/portfile.cmake +++ /dev/null @@ -1,77 +0,0 @@ -vcpkg_from_github( - OUT_SOURCE_PATH SOURCE_PATH - REPO PCRE2Project/pcre2 - REF "pcre2-${VERSION}" - SHA512 50f3b8b10faf432e0ffe87eca84fdebdb10869b092e4dc66c33bd6e2657638d4433698669af2c5ad9f691d27789663682a7235943761f716f5f2e0637deafc97 - HEAD_REF master - PATCHES - pcre2-10.35_fix-uwp.patch - no-static-suffix.patch - fix-cmake.patch -) - -string(COMPARE EQUAL "${VCPKG_LIBRARY_LINKAGE}" "static" BUILD_STATIC) -string(COMPARE EQUAL "${VCPKG_LIBRARY_LINKAGE}" "dynamic" INSTALL_PDB) -string(COMPARE EQUAL "${VCPKG_CRT_LINKAGE}" "static" BUILD_STATIC_CRT) - -vcpkg_check_features( - OUT_FEATURE_OPTIONS FEATURE_OPTIONS - FEATURES - jit PCRE2_SUPPORT_JIT -) - -vcpkg_cmake_configure( - SOURCE_PATH "${SOURCE_PATH}" - OPTIONS - ${FEATURE_OPTIONS} - -DBUILD_STATIC_LIBS=${BUILD_STATIC} - -DPCRE2_STATIC_RUNTIME=${BUILD_STATIC_CRT} - -DPCRE2_BUILD_PCRE2_8=ON - -DPCRE2_BUILD_PCRE2_16=ON - -DPCRE2_BUILD_PCRE2_32=ON - -DPCRE2_SUPPORT_UNICODE=ON - -DPCRE2_BUILD_TESTS=OFF - -DPCRE2_BUILD_PCRE2GREP=OFF - -DCMAKE_DISABLE_FIND_PACKAGE_BZip2=ON - -DCMAKE_DISABLE_FIND_PACKAGE_ZLIB=ON - -DCMAKE_DISABLE_FIND_PACKAGE_Readline=ON - -DCMAKE_DISABLE_FIND_PACKAGE_Editline=ON - -DINSTALL_MSVC_PDB=${INSTALL_PDB} - ) - -vcpkg_cmake_install() -vcpkg_copy_pdbs() - -file(READ "${CURRENT_PACKAGES_DIR}/include/pcre2.h" PCRE2_H) -if(BUILD_STATIC) - string(REPLACE "defined(PCRE2_STATIC)" "1" PCRE2_H "${PCRE2_H}") -else() - string(REPLACE "defined(PCRE2_STATIC)" "0" PCRE2_H "${PCRE2_H}") -endif() -file(WRITE "${CURRENT_PACKAGES_DIR}/include/pcre2.h" "${PCRE2_H}") - -vcpkg_fixup_pkgconfig() -vcpkg_cmake_config_fixup(CONFIG_PATH lib/cmake/${PORT}) - -file(REMOVE_RECURSE - "${CURRENT_PACKAGES_DIR}/man" - "${CURRENT_PACKAGES_DIR}/share/doc" - "${CURRENT_PACKAGES_DIR}/debug/include" - "${CURRENT_PACKAGES_DIR}/debug/man" - "${CURRENT_PACKAGES_DIR}/debug/share") - -file(MAKE_DIRECTORY "${CURRENT_PACKAGES_DIR}/tools/pcre2") -file(RENAME "${CURRENT_PACKAGES_DIR}/bin/pcre2-config" "${CURRENT_PACKAGES_DIR}/tools/pcre2/pcre2-config") -vcpkg_replace_string("${CURRENT_PACKAGES_DIR}/tools/pcre2/pcre2-config" "${CURRENT_PACKAGES_DIR}" [[$(cd "$(dirname "$0")/../.."; pwd -P)]]) -if(NOT VCPKG_BUILD_TYPE) - file(MAKE_DIRECTORY "${CURRENT_PACKAGES_DIR}/tools/pcre2/debug") - file(RENAME "${CURRENT_PACKAGES_DIR}/debug/bin/pcre2-config" "${CURRENT_PACKAGES_DIR}/tools/pcre2/debug/pcre2-config") - vcpkg_replace_string("${CURRENT_PACKAGES_DIR}/tools/pcre2/debug/pcre2-config" "${CURRENT_PACKAGES_DIR}/debug" [[$(cd "$(dirname "$0")/../../../debug"; pwd -P)]]) - vcpkg_replace_string("${CURRENT_PACKAGES_DIR}/tools/pcre2/debug/pcre2-config" [[${prefix}/include]] [[${prefix}/../include]]) -endif() -if(VCPKG_LIBRARY_LINKAGE STREQUAL "static") - file(REMOVE_RECURSE "${CURRENT_PACKAGES_DIR}/debug/bin" "${CURRENT_PACKAGES_DIR}/bin") -endif() - -file(INSTALL "${CMAKE_CURRENT_LIST_DIR}/usage" DESTINATION "${CURRENT_PACKAGES_DIR}/share/${PORT}") -vcpkg_install_copyright(FILE_LIST "${SOURCE_PATH}/COPYING") diff --git a/vcpkg_overlays/pcre2/usage b/vcpkg_overlays/pcre2/usage deleted file mode 100644 index 1ef36ecab71..00000000000 --- a/vcpkg_overlays/pcre2/usage +++ /dev/null @@ -1,6 +0,0 @@ -The package pcre2 is compatible with built-in CMake targets: - - # Each component imports a target: - # TARGETS: PCRE2::8BIT PCRE2::16BIT PCRE2::32BIT PCRE2::POSIX - find_package(pcre2 CONFIG REQUIRED) - target_link_libraries(main PRIVATE PCRE2::8BIT PCRE2::16BIT PCRE2::32BIT PCRE2::POSIX) diff --git a/vcpkg_overlays/pcre2/vcpkg.json b/vcpkg_overlays/pcre2/vcpkg.json deleted file mode 100644 index af255b21b51..00000000000 --- a/vcpkg_overlays/pcre2/vcpkg.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "name": "pcre2", - "version": "10.43", - "description": "Regular Expression pattern matching using the same syntax and semantics as Perl 5.", - "homepage": "https://github.com/PCRE2Project/pcre2", - "license": "BSD-3-Clause", - "dependencies": [ - { - "name": "vcpkg-cmake", - "host": true - }, - { - "name": "vcpkg-cmake-config", - "host": true - } - ], - "default-features": [ - "platform-default-features" - ], - "features": { - "jit": { - "description": "Enable support for Just-In-Time compiling regex matchers", - "supports": "!emscripten & !ios" - }, - "platform-default-features": { - "description": "Enable default features", - "dependencies": [ - { - "name": "pcre2", - "features": [ - "jit" - ], - "platform": "!emscripten & !ios" - } - ] - } - } -} From a1d1e7577645c94ea9d4e63bc9e616fe5ea98633 Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Wed, 19 Jun 2024 11:57:42 -0400 Subject: [PATCH 083/151] HPCC-31896 ECL Watch v9 fix Log viewer filter by audience pulls in a change from the @hpcc-js/comms library which fixes an issue with the k8s Logs list, where attempting to filter by "Audience" would not set the request parameters correctly. Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- esp/src/package-lock.json | 8 ++++---- esp/src/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/esp/src/package-lock.json b/esp/src/package-lock.json index bc4b85a6248..1a1d9ac0fbf 100644 --- a/esp/src/package-lock.json +++ b/esp/src/package-lock.json @@ -18,7 +18,7 @@ "@hpcc-js/chart": "2.83.3", "@hpcc-js/codemirror": "2.61.4", "@hpcc-js/common": "2.71.17", - "@hpcc-js/comms": "2.92.1", + "@hpcc-js/comms": "2.92.2", "@hpcc-js/dataflow": "8.1.6", "@hpcc-js/eclwatch": "2.74.3", "@hpcc-js/graph": "2.85.15", @@ -1855,9 +1855,9 @@ } }, "node_modules/@hpcc-js/comms": { - "version": "2.92.1", - "resolved": "https://registry.npmjs.org/@hpcc-js/comms/-/comms-2.92.1.tgz", - "integrity": "sha512-nx4JJUSpU1m/Yd53PsbEL26DQFx+3UF0Pelk5O0BY2eFGpPVaQ9jHeWvuMJCHHpHcbxUtWN5nlwZlZgE9CFptA==", + "version": "2.92.2", + "resolved": "https://registry.npmjs.org/@hpcc-js/comms/-/comms-2.92.2.tgz", + "integrity": "sha512-9AbPnCYuTF6OhbSiG5QMDA2vuF457YL88h2ltuxPOjsOxp9Dp5VFlTkh88vW1W3Yph/+faGhiqUSvLMgFIwXEA==", "dependencies": { "@hpcc-js/ddl-shim": "^2.20.6", "@hpcc-js/util": "^2.51.0", diff --git a/esp/src/package.json b/esp/src/package.json index f823572534d..b104c55aed4 100644 --- a/esp/src/package.json +++ b/esp/src/package.json @@ -44,7 +44,7 @@ "@hpcc-js/chart": "2.83.3", "@hpcc-js/codemirror": "2.61.4", "@hpcc-js/common": "2.71.17", - "@hpcc-js/comms": "2.92.1", + "@hpcc-js/comms": "2.92.2", "@hpcc-js/dataflow": "8.1.6", "@hpcc-js/eclwatch": "2.74.3", "@hpcc-js/graph": "2.85.15", From d76308621ee70672dfb666aaccdc03348470eab1 Mon Sep 17 00:00:00 2001 From: Jim DeFabia Date: Wed, 19 Jun 2024 14:38:39 -0400 Subject: [PATCH 084/151] HPCC-32098 Modify incorrect file name in Containerized book Signed-off-by: Jim DeFabia --- .../ContainerizedMods/ConfigureValues.xml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml index f3b52e78f51..d424a8d8a54 100644 --- a/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml +++ b/docs/EN_US/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml @@ -22,7 +22,7 @@ The entire HPCC Systems configuration in the container space, is governed by a single file, a values.yaml file, and - its associated schema (values-schema.json) + its associated schema (values.schema.json) file. @@ -509,7 +509,7 @@ components - + @@ -845,7 +845,7 @@ Preferred Storage The preferredReadPlanes option is available - for each type of cluster--hThor, Thor, and Roxie. + for each type of cluster--hThor, Thor, and Roxie. This option is only significant for logical files which reside on multiple storage planes. When specified, the HPCC Systems platform @@ -1332,9 +1332,9 @@ thor: - + - + From 7997a0a75fc4a7404fb1d5a41c8ef97ba46cb7eb Mon Sep 17 00:00:00 2001 From: "Dan S. Camper" Date: Thu, 30 May 2024 14:57:10 -0500 Subject: [PATCH 085/151] HPCC-31921 Add caching of regex compiled search patterns --- ecl/hql/hqlfold.cpp | 2 +- rtl/eclrtl/eclregex.cpp | 443 ++++++++++++++---- rtl/eclrtl/eclrtl.cpp | 13 + rtl/eclrtl/eclrtl.hpp | 1 + system/jlib/jhash.hpp | 100 ++++ .../regress/ecl/key/regex_cache_string.xml | 3 + .../regress/ecl/key/regex_cache_unicode.xml | 3 + testing/regress/ecl/key/regex_cache_utf8.xml | 3 + testing/regress/ecl/regex_cache_string.ecl | 70 +++ testing/regress/ecl/regex_cache_unicode.ecl | 70 +++ testing/regress/ecl/regex_cache_utf8.ecl | 70 +++ 11 files changed, 677 insertions(+), 101 deletions(-) create mode 100644 testing/regress/ecl/key/regex_cache_string.xml create mode 100644 testing/regress/ecl/key/regex_cache_unicode.xml create mode 100644 testing/regress/ecl/key/regex_cache_utf8.xml create mode 100644 testing/regress/ecl/regex_cache_string.ecl create mode 100644 testing/regress/ecl/regex_cache_unicode.ecl create mode 100644 testing/regress/ecl/regex_cache_utf8.ecl diff --git a/ecl/hql/hqlfold.cpp b/ecl/hql/hqlfold.cpp index 7e1dcf9eda0..4a0226cbcfd 100644 --- a/ecl/hql/hqlfold.cpp +++ b/ecl/hql/hqlfold.cpp @@ -2796,7 +2796,7 @@ IHqlExpression * foldConstantOperator(IHqlExpression * expr, unsigned foldOption StringBuffer pattern, search; v0->getUTF8Value(pattern); v1->getUTF8Value(search); - ICompiledStrRegExpr * compiled = rtlCreateCompiledU8StrRegExpr(pattern, !expr->hasAttribute(noCaseAtom)); + ICompiledStrRegExpr * compiled = rtlCreateCompiledU8StrRegExpr(pattern.lengthUtf8(), pattern, !expr->hasAttribute(noCaseAtom)); compiled->getMatchSet(isAllResult, resultBytes, matchResults.refdata(), search.lengthUtf8(), search.str()); rtlDestroyCompiledU8StrRegExpr(compiled); } diff --git a/rtl/eclrtl/eclregex.cpp b/rtl/eclrtl/eclregex.cpp index c86a4a3e79c..275aa19cd4d 100644 --- a/rtl/eclrtl/eclregex.cpp +++ b/rtl/eclrtl/eclregex.cpp @@ -28,7 +28,12 @@ #include "platform.h" #include "eclrtl.hpp" #include "eclrtl_imp.hpp" +#include "jhash.hpp" #include "jlib.hpp" +#include "jmisc.hpp" +#include "jprop.hpp" + +#include //--------------------------------------------------------------------------- @@ -57,14 +62,16 @@ static void pcre2Free(void * block, void * /*userData*/) rtlFree(block); } -/// @brief Convert a PCRE2 error code to a string and throw an exception -/// @param errCode PCRE2 error code -/// @param msgPrefix Prefix for error message; can be an empty string; -/// include a trailing space if a non-empty message is passed -/// @param regex OPTIONAL; regex pattern that was in play when error occurred -/// @param errOffset OPTIONAL; offset in regex pattern where error occurred; -/// ignored if regex is null or empty -static void failWithPCRE2Error(int errCode, const char * msgPrefix, const char * regex = nullptr, int errOffset = -1) +/** + * @brief Handles failure reporting with a regex and throws an exception with the given error code and message. + * + * @param errCode The error code indicating the type of error that occurred. + * @param msgPrefix The prefix to be added to the error message; can be an empty string; include a trailing space if a non-empty regex is passed. + * @param regex The regular expression pattern; may be an empty string. + * @param regexLength The length (in code points) of the regular expression pattern. + * @param errOffset The offset into regex at which the error occurred. + */ +static void failWithPCRE2Error(int errCode, const std::string & msgPrefix, const std::string & regex, int errOffset) { const int errBuffSize = 120; char errBuff[errBuffSize]; @@ -80,7 +87,7 @@ static void failWithPCRE2Error(int errCode, const char * msgPrefix, const char * msg += std::to_string(errCode); msg += " (no error message available)"; } - if (regex && regex[0]) + if (!regex.empty()) { msg += " (regex: '"; msg += regex; @@ -95,25 +102,143 @@ static void failWithPCRE2Error(int errCode, const char * msgPrefix, const char * rtlFail(0, msg.c_str()); } -/// @brief Convert a PCRE2 error code to a string and throw an exception -/// @param errCode PCRE2 error code -/// @param msgPrefix Prefix for error message; can be an empty string; -/// include a trailing space if a non-empty message is passed -/// @param regex OPTIONAL; Unicode regex pattern that was in play when error occurred -/// @param errOffset OPTIONAL; offset in regex pattern where error occurred; -/// ignored if regex is null or empty -static void failWithUPCRE2Error(int errCode, const char * msgPrefix, const UChar * regex = nullptr, int errOffset = -1) +/** + * @brief Handles the failure of a regular expression operation and throws an exception with the given error code and message. + * + * @param errCode The error code associated with the failure. + * @param msg The error message describing the failure. + */ +static void failWithPCRE2Error(int errCode, const std::string & msg) +{ + failWithPCRE2Error(errCode, msg, "", -1); +} + +/** + * @brief Handles failure reporting with Unicode regex and throws an exception with the given error code and message. + * + * @param errCode The error code indicating the type of error that occurred. + * @param msgPrefix The prefix to be added to the error message; can be an empty string; include a trailing space if a non-empty message is passed. + * @param regex The regular expression pattern in UChar format. + * @param regexLength The length (in code points) of the regular expression pattern. + * @param errOffset The offset into regex at which the error occurred. + */ +static void failWithPCRE2Error(int errCode, const std::string & msgPrefix, const UChar * regex, int regexLength, int errOffset) { std::string regexPattern; - if (regex) + if (regex && regex[0]) { char * regexStr = nullptr; unsigned regexStrLen; - rtlUnicodeToEscapedStrX(regexStrLen, regexStr, rtlUnicodeStrlen(regex), regex); + rtlUnicodeToEscapedStrX(regexStrLen, regexStr, regexLength, regex); regexPattern = std::string(regexStr, regexStrLen); rtlFree(regexStr); } - failWithPCRE2Error(errCode, msgPrefix, regexPattern.c_str(), errOffset); + failWithPCRE2Error(errCode, msgPrefix, regexPattern, errOffset); +} + +//--------------------------------------------------------------------------- + +/** + * @brief Parent class of all compiled regular expression pattern classes; used for caching. + */ +class RegexCacheEntry +{ +private: + uint32_t savedOptions = 0; // set when the object is cached + std::string savedPattern; // used as a blob store; set when the object is cached + std::shared_ptr compiledRegex8 = nullptr; + std::shared_ptr compiledRegex16 = nullptr; + +public: + RegexCacheEntry() = delete; + + RegexCacheEntry(size32_t _patternSize, const char * _pattern, uint32_t _options, std::shared_ptr _compiledRegex8) + : savedOptions(_options), savedPattern(_pattern, _patternSize), compiledRegex8(_compiledRegex8) + {} + + RegexCacheEntry(size32_t _patternSize, const char * _pattern, uint32_t _options, std::shared_ptr _compiledRegex16) + : savedOptions(_options), savedPattern(_pattern, _patternSize), compiledRegex16(_compiledRegex16) + {} + + RegexCacheEntry(const RegexCacheEntry & other) = delete; + + static hash64_t hashValue(size32_t patternSize, const char * pattern, uint32_t options) + { + hash64_t hash = HASH64_INIT; + hash = rtlHash64Data(patternSize, pattern, hash); + hash = rtlHash64Data(sizeof(options), &options, hash); + return hash; + } + + bool hasSamePattern(size32_t patternSize, const char * pattern, uint32_t options) const + { + if ((patternSize == 0) || (patternSize != savedPattern.size())) + return false; + if (options != savedOptions) + return false; + return (memcmp(pattern, savedPattern.data(), patternSize) == 0); + } + + std::shared_ptr getCompiledRegex8() const { return compiledRegex8; } + std::shared_ptr getCompiledRegex16() const { return compiledRegex16; } +}; + +//--------------------------------------------------------------------------- + +#define DEFAULT_CACHE_MAX_SIZE 500 +static CLRUCache> compiledStrRegExprCache(DEFAULT_CACHE_MAX_SIZE); +static CriticalSection compiledStrRegExprLock; +static bool compiledCacheEnabled = true; + +/** + * @brief Provide an optional override to the maximum cache size for regex patterns. + * + * Functions searches with the containerized "expert" section or the bare-metal + * section for an optional "regex" subsection with a "cacheSize" attribute + * By default, the maximum cache size is set to 500 patterns. Override with 0 to disable caching. + */ +static void initMaxCacheSize() +{ +#ifdef _CONTAINERIZED + Owned expert; +#else + Owned envtree; + IPropertyTree * expert = nullptr; +#endif + + try + { +#ifdef _CONTAINERIZED + expert.setown(getGlobalConfigSP()->getPropTree("expert")); +#else + envtree.setown(getHPCCEnvironment()); + if (envtree) + expert = envtree->queryPropTree("Software/Globals"); +#endif + } + catch (IException *e) + { + e->Release(); + } + catch (...) + { + } + + size32_t cacheMaxSize = DEFAULT_CACHE_MAX_SIZE; + + if (expert) + { + IPropertyTree *regexProps = expert->queryPropTree("regex"); + if (regexProps) + { + cacheMaxSize = regexProps->getPropInt("@cacheSize", cacheMaxSize); + } + } + + if (cacheMaxSize > 0) + compiledStrRegExprCache.setMaxCacheSize(cacheMaxSize); + else + compiledCacheEnabled = false; } //--------------------------------------------------------------------------- @@ -122,18 +247,18 @@ class CStrRegExprFindInstance : implements IStrRegExprFindInstance { private: bool matched = false; - pcre2_code_8 * compiledRegex = nullptr; // do not free; this will be owned by caller + std::shared_ptr compiledRegex = nullptr; pcre2_match_data_8 * matchData = nullptr; const char * subject = nullptr; // points to current subject of regex; do not free char * sample = nullptr; //only required if findstr/findvstr will be called public: - CStrRegExprFindInstance(pcre2_code_8 * _compiledRegex, const char * _subject, size32_t _from, size32_t _len, bool _keep) + CStrRegExprFindInstance(std::shared_ptr _compiledRegex, const char * _subject, size32_t _from, size32_t _len, bool _keep) : compiledRegex(_compiledRegex) { // See if UTF-8 is enabled on this compiled regex uint32_t option_bits; - pcre2_pattern_info_8(compiledRegex, PCRE2_INFO_ALLOPTIONS, &option_bits); + pcre2_pattern_info_8(compiledRegex.get(), PCRE2_INFO_ALLOPTIONS, &option_bits); bool utf8Enabled = (option_bits & PCRE2_UTF) != 0; // Make sure the offset and length is in code points (bytes), not characters size32_t subjectOffset = (utf8Enabled ? rtlUtf8Size(_from, _subject) : _from); @@ -152,9 +277,9 @@ class CStrRegExprFindInstance : implements IStrRegExprFindInstance } matched = false; - matchData = pcre2_match_data_create_from_pattern_8(compiledRegex, pcre2GeneralContext8); + matchData = pcre2_match_data_create_from_pattern_8(compiledRegex.get(), pcre2GeneralContext8); - int numMatches = pcre2_match_8(compiledRegex, (PCRE2_SPTR8)subject, subjectSize, 0, 0, matchData, pcre2MatchContext8); + int numMatches = pcre2_match_8(compiledRegex.get(), (PCRE2_SPTR8)subject, subjectSize, 0, 0, matchData, pcre2MatchContext8); matched = numMatches > 0; @@ -219,25 +344,10 @@ class CStrRegExprFindInstance : implements IStrRegExprFindInstance class CCompiledStrRegExpr : implements ICompiledStrRegExpr { private: - pcre2_code_8 * compiledRegex = nullptr; + std::shared_ptr compiledRegex = nullptr; bool isUTF8Enabled = false; public: - CCompiledStrRegExpr(const char * _regex, bool _isCaseSensitive, bool _enableUTF8) - : isUTF8Enabled(_enableUTF8) - { - int errNum = 0; - PCRE2_SIZE errOffset; - uint32_t options = ((_isCaseSensitive ? 0 : PCRE2_CASELESS) | (_enableUTF8 ? PCRE2_UTF : 0)); - - compiledRegex = pcre2_compile_8((PCRE2_SPTR8)_regex, PCRE2_ZERO_TERMINATED, options, &errNum, &errOffset, pcre2CompileContext8); - - if (compiledRegex == nullptr) - { - failWithPCRE2Error(errNum, "Error in regex pattern: ", _regex, errOffset); - } - } - CCompiledStrRegExpr(int _regexLength, const char * _regex, bool _isCaseSensitive, bool _enableUTF8) : isUTF8Enabled(_enableUTF8) { @@ -246,26 +356,29 @@ class CCompiledStrRegExpr : implements ICompiledStrRegExpr uint32_t options = ((_isCaseSensitive ? 0 : PCRE2_CASELESS) | (_enableUTF8 ? PCRE2_UTF : 0)); size32_t regexSize = (isUTF8Enabled ? rtlUtf8Size(_regexLength, _regex) : _regexLength); - compiledRegex = pcre2_compile_8((PCRE2_SPTR8)_regex, regexSize, options, &errNum, &errOffset, pcre2CompileContext8); + pcre2_code_8 * newCompiledRegex = pcre2_compile_8((PCRE2_SPTR8)_regex, regexSize, options, &errNum, &errOffset, pcre2CompileContext8); - if (compiledRegex == nullptr) + if (newCompiledRegex == nullptr) { - failWithPCRE2Error(errNum, "Error in regex pattern: ", _regex, errOffset); + failWithPCRE2Error(errNum, "Error in regex pattern: ", std::string(_regex, _regexLength), errOffset); } - } - ~CCompiledStrRegExpr() //CAVEAT non-virtual destructor ! - { - pcre2_code_free_8(compiledRegex); + compiledRegex = std::shared_ptr(newCompiledRegex, pcre2_code_free_8); } + CCompiledStrRegExpr(const RegexCacheEntry& cacheEntry, bool _enableUTF8) + : compiledRegex(cacheEntry.getCompiledRegex8()), isUTF8Enabled(_enableUTF8) + {} + + std::shared_ptr getCompiledRegex() const { return compiledRegex; } + //ICompiledStrRegExpr void replace(size32_t & outlen, char * & out, size32_t slen, char const * str, size32_t rlen, char const * replace) const { PCRE2_SIZE pcreLen = 0; outlen = 0; - pcre2_match_data_8 * matchData = pcre2_match_data_create_from_pattern_8(compiledRegex, pcre2GeneralContext8); + pcre2_match_data_8 * matchData = pcre2_match_data_create_from_pattern_8(compiledRegex.get(), pcre2GeneralContext8); // This method is often called through an ECL interface and the provided lengths // (slen and rlen) are in characters, not bytes; we need to convert these to a @@ -279,7 +392,7 @@ class CCompiledStrRegExpr : implements ICompiledStrRegExpr // Note that pcreLen will include space for a terminating null character; // we have to allocate memory for that byte to avoid a buffer overrun, // but we won't count that terminating byte - int replaceResult = pcre2_substitute_8(compiledRegex, (PCRE2_SPTR8)str, sourceSize, 0, replaceOptions|PCRE2_SUBSTITUTE_OVERFLOW_LENGTH, matchData, pcre2MatchContext8, (PCRE2_SPTR8)replace, replaceSize, nullptr, &pcreLen); + int replaceResult = pcre2_substitute_8(compiledRegex.get(), (PCRE2_SPTR8)str, sourceSize, 0, replaceOptions|PCRE2_SUBSTITUTE_OVERFLOW_LENGTH, matchData, pcre2MatchContext8, (PCRE2_SPTR8)replace, replaceSize, nullptr, &pcreLen); if (replaceResult < 0 && replaceResult != PCRE2_ERROR_NOMEMORY) { @@ -292,7 +405,7 @@ class CCompiledStrRegExpr : implements ICompiledStrRegExpr { out = (char *)rtlMalloc(pcreLen); - replaceResult = pcre2_substitute_8(compiledRegex, (PCRE2_SPTR8)str, sourceSize, 0, replaceOptions, matchData, pcre2MatchContext8, (PCRE2_SPTR8)replace, replaceSize, (PCRE2_UCHAR8 *)out, &pcreLen); + replaceResult = pcre2_substitute_8(compiledRegex.get(), (PCRE2_SPTR8)str, sourceSize, 0, replaceOptions, matchData, pcre2MatchContext8, (PCRE2_SPTR8)replace, replaceSize, (PCRE2_UCHAR8 *)out, &pcreLen); // Note that, weirdly, pcreLen will now contain the number of code points // in the result *excluding* the null terminator, so pcreLen will @@ -323,7 +436,7 @@ class CCompiledStrRegExpr : implements ICompiledStrRegExpr PCRE2_SIZE offset = 0; uint32_t matchOptions = 0; PCRE2_SIZE subjectSize = (isUTF8Enabled ? rtlUtf8Size(_subjectLen, _subject) : _subjectLen); - pcre2_match_data_8 * matchData = pcre2_match_data_create_from_pattern_8(compiledRegex, pcre2GeneralContext8); + pcre2_match_data_8 * matchData = pcre2_match_data_create_from_pattern_8(compiledRegex.get(), pcre2GeneralContext8); // Capture groups are ignored when gathering match results into a set, // so we will focus on only the first match (the entire matched string); @@ -332,7 +445,7 @@ class CCompiledStrRegExpr : implements ICompiledStrRegExpr while (offset < subjectSize) { - int numMatches = pcre2_match_8(compiledRegex, (PCRE2_SPTR8)_subject, subjectSize, offset, matchOptions, matchData, pcre2MatchContext8); + int numMatches = pcre2_match_8(compiledRegex.get(), (PCRE2_SPTR8)_subject, subjectSize, offset, matchOptions, matchData, pcre2MatchContext8); if (numMatches < 0) { @@ -389,16 +502,62 @@ class CCompiledStrRegExpr : implements ICompiledStrRegExpr // STRING implementation //--------------------------------------------------------------------------- +/** + * @brief Fetches or creates a compiled string regular expression object. + * + * This function fetches a compiled string regular expression object from the cache if it exists, + * or creates a new one if it doesn't. The regular expression object is created based on the provided + * regex pattern, length, and case sensitivity flag. The created object is then cached for future use. + * + * @param _regexLength The length of the regex pattern. + * @param _regex The regex pattern. + * @param _isCaseSensitive Flag indicating whether the regex pattern is case sensitive or not. + * @return A pointer to a copy of the fetched or created CCompiledStrRegExpr object. The returned object + * * must eventually be deleted. + */ +CCompiledStrRegExpr* fetchOrCreateCompiledStrRegExpr(int _regexLength, const char * _regex, bool _isCaseSensitive) +{ + if (compiledCacheEnabled) + { + CCompiledStrRegExpr * compiledObjPtr = nullptr; + uint32_t options = (_isCaseSensitive ? 0 : PCRE2_CASELESS); + hash64_t regexHash = RegexCacheEntry::hashValue(_regexLength, _regex, options); + + // Check the cache + { + CriticalBlock lock(compiledStrRegExprLock); + RegexCacheEntry * cacheEntry = compiledStrRegExprCache.get(regexHash).get(); + + if (cacheEntry && cacheEntry->hasSamePattern(_regexLength, _regex, options)) + { + // Return a new compiled pattern object based on the cached information + return new CCompiledStrRegExpr(*cacheEntry, false); + } + + // Create a new compiled pattern object + compiledObjPtr = new CCompiledStrRegExpr(_regexLength, _regex, _isCaseSensitive, false); + // Create a cache entry for the new object + compiledStrRegExprCache.set(regexHash, std::make_shared(_regexLength, _regex, options, compiledObjPtr->getCompiledRegex())); + } + + return compiledObjPtr; + } + else + { + return new CCompiledStrRegExpr(_regexLength, _regex, _isCaseSensitive, false); + } +} + +//--------------------------------------------------------------------------- + ECLRTL_API ICompiledStrRegExpr * rtlCreateCompiledStrRegExpr(const char * regExpr, bool isCaseSensitive) { - CCompiledStrRegExpr * expr = new CCompiledStrRegExpr(regExpr, isCaseSensitive, false); - return expr; + return fetchOrCreateCompiledStrRegExpr(strlen(regExpr), regExpr, isCaseSensitive); } ECLRTL_API ICompiledStrRegExpr * rtlCreateCompiledStrRegExpr(int regExprLength, const char * regExpr, bool isCaseSensitive) { - CCompiledStrRegExpr * expr = new CCompiledStrRegExpr(regExprLength, regExpr, isCaseSensitive, false); - return expr; + return fetchOrCreateCompiledStrRegExpr(regExprLength, regExpr, isCaseSensitive); } ECLRTL_API void rtlDestroyCompiledStrRegExpr(ICompiledStrRegExpr * compiledExpr) @@ -417,16 +576,63 @@ ECLRTL_API void rtlDestroyStrRegExprFindInstance(IStrRegExprFindInstance * findI // UTF8 implementation //--------------------------------------------------------------------------- +/** + * @brief Fetches or creates a compiled UTF-8 regular expression object. + * + * This function fetches a compiled UTF-8 regular expression object from the cache if it exists, + * or creates a new one if it doesn't. The regular expression object is created based on the provided + * regex pattern, length, and case sensitivity flag. The created object is then cached for future use. + * + * @param _regexLength The length of the regex pattern, in code points. + * @param _regex The regex pattern. + * @param _isCaseSensitive Flag indicating whether the regex pattern is case sensitive or not. + * @return A pointer to a copy of the fetched or created CCompiledStrRegExpr object. The returned object + * * must eventually be deleted. + */ +CCompiledStrRegExpr* fetchOrCreateCompiledU8StrRegExpr(int _regexLength, const char * _regex, bool _isCaseSensitive) +{ + if (compiledCacheEnabled) + { + CCompiledStrRegExpr * compiledObjPtr = nullptr; + unsigned int regexSize = rtlUtf8Size(_regexLength, _regex); + uint32_t options = PCRE2_UTF | (_isCaseSensitive ? 0 : PCRE2_CASELESS); + hash64_t regexHash = RegexCacheEntry::hashValue(regexSize, _regex, options); + + // Check the cache + { + CriticalBlock lock(compiledStrRegExprLock); + RegexCacheEntry * cacheEntry = compiledStrRegExprCache.get(regexHash).get(); + + if (cacheEntry && cacheEntry->hasSamePattern(regexSize, _regex, options)) + { + // Return a new compiled pattern object based on the cached information + return new CCompiledStrRegExpr(*cacheEntry, true); + } + + // Create a new compiled pattern object + compiledObjPtr = new CCompiledStrRegExpr(_regexLength, _regex, _isCaseSensitive, true); + // Create a cache entry for the new object + compiledStrRegExprCache.set(regexHash, std::make_shared(regexSize, _regex, options, compiledObjPtr->getCompiledRegex())); + } + + return compiledObjPtr; + } + else + { + return new CCompiledStrRegExpr(_regexLength, _regex, _isCaseSensitive, true); + } +} + +//--------------------------------------------------------------------------- + ECLRTL_API ICompiledStrRegExpr * rtlCreateCompiledU8StrRegExpr(const char * regExpr, bool isCaseSensitive) { - CCompiledStrRegExpr * expr = new CCompiledStrRegExpr(regExpr, isCaseSensitive, true); - return expr; + return fetchOrCreateCompiledU8StrRegExpr(rtlUtf8Length(regExpr), regExpr, isCaseSensitive); } ECLRTL_API ICompiledStrRegExpr * rtlCreateCompiledU8StrRegExpr(int regExprLength, const char * regExpr, bool isCaseSensitive) { - CCompiledStrRegExpr * expr = new CCompiledStrRegExpr(regExprLength, regExpr, isCaseSensitive, true); - return expr; + return fetchOrCreateCompiledU8StrRegExpr(regExprLength, regExpr, isCaseSensitive); } ECLRTL_API void rtlDestroyCompiledU8StrRegExpr(ICompiledStrRegExpr * compiledExpr) @@ -451,25 +657,25 @@ class CUStrRegExprFindInstance : implements IUStrRegExprFindInstance { private: bool matched = false; - pcre2_code_16 * compiledRegex = nullptr; // do not free; this will be owned by caller + std::shared_ptr compiledRegex = nullptr; pcre2_match_data_16 * matchData = nullptr; const UChar * subject = nullptr; // points to current subject of regex; do not free public: - CUStrRegExprFindInstance(pcre2_code_16 * _compiledRegex, const UChar * _subject, size32_t _from, size32_t _len) + CUStrRegExprFindInstance(std::shared_ptr _compiledRegex, const UChar * _subject, size32_t _from, size32_t _len) : compiledRegex(_compiledRegex) { subject = _subject + _from; matched = false; - matchData = pcre2_match_data_create_from_pattern_16(compiledRegex, pcre2GeneralContext16); - int numMatches = pcre2_match_16(compiledRegex, (PCRE2_SPTR16)subject, _len, 0, 0, matchData, pcre2MatchContext16); + matchData = pcre2_match_data_create_from_pattern_16(compiledRegex.get(), pcre2GeneralContext16); + int numMatches = pcre2_match_16(compiledRegex.get(), (PCRE2_SPTR16)subject, _len, 0, 0, matchData, pcre2MatchContext16); matched = numMatches > 0; if (numMatches < 0 && numMatches != PCRE2_ERROR_NOMATCH) { // Treat everything else as an error - failWithUPCRE2Error(numMatches, "Error in regex search: "); + failWithPCRE2Error(numMatches, "Error in regex search: "); } } @@ -526,47 +732,36 @@ class CUStrRegExprFindInstance : implements IUStrRegExprFindInstance class CCompiledUStrRegExpr : implements ICompiledUStrRegExpr { private: - pcre2_code_16 * compiledRegex = nullptr; + std::shared_ptr compiledRegex = nullptr; public: - CCompiledUStrRegExpr(const UChar * _regex, bool _isCaseSensitive = false) - { - int errNum = 0; - PCRE2_SIZE errOffset; - uint32_t options = (PCRE2_UCP | (_isCaseSensitive ? 0 : PCRE2_CASELESS)); - - compiledRegex = pcre2_compile_16((PCRE2_SPTR16)_regex, PCRE2_ZERO_TERMINATED, options, &errNum, &errOffset, pcre2CompileContext16); - - if (compiledRegex == nullptr) - { - failWithUPCRE2Error(errNum, "Error in regex pattern: ", _regex, errOffset); - } - } - CCompiledUStrRegExpr(int _regexLength, const UChar * _regex, bool _isCaseSensitive = false) { int errNum = 0; PCRE2_SIZE errOffset; uint32_t options = (PCRE2_UCP | (_isCaseSensitive ? 0 : PCRE2_CASELESS)); - compiledRegex = pcre2_compile_16((PCRE2_SPTR16)_regex, _regexLength, options, &errNum, &errOffset, pcre2CompileContext16); + pcre2_code_16 * newCompiledRegex = pcre2_compile_16((PCRE2_SPTR16)_regex, _regexLength, options, &errNum, &errOffset, pcre2CompileContext16); - if (compiledRegex == nullptr) + if (newCompiledRegex == nullptr) { - failWithUPCRE2Error(errNum, "Error in regex pattern: ", _regex, errOffset); + failWithPCRE2Error(errNum, "Error in regex pattern: ", _regex, _regexLength, errOffset); } - } - ~CCompiledUStrRegExpr() - { - pcre2_code_free_16(compiledRegex); + compiledRegex = std::shared_ptr(newCompiledRegex, pcre2_code_free_16); } + CCompiledUStrRegExpr(const RegexCacheEntry& cacheEntry) + : compiledRegex(cacheEntry.getCompiledRegex16()) + {} + + std::shared_ptr getCompiledRegex() const { return compiledRegex; } + void replace(size32_t & outlen, UChar * & out, size32_t slen, const UChar * str, size32_t rlen, UChar const * replace) const { PCRE2_SIZE pcreLen = 0; outlen = 0; - pcre2_match_data_16 * matchData = pcre2_match_data_create_from_pattern_16(compiledRegex, pcre2GeneralContext16); + pcre2_match_data_16 * matchData = pcre2_match_data_create_from_pattern_16(compiledRegex.get(), pcre2GeneralContext16); uint32_t replaceOptions = PCRE2_SUBSTITUTE_GLOBAL|PCRE2_SUBSTITUTE_EXTENDED; @@ -574,20 +769,20 @@ class CCompiledUStrRegExpr : implements ICompiledUStrRegExpr // Note that pcreLen will include space for a terminating null character; // we have to allocate memory for that byte to avoid a buffer overrun, // but we won't count that terminating byte - int replaceResult = pcre2_substitute_16(compiledRegex, (PCRE2_SPTR16)str, slen, 0, replaceOptions|PCRE2_SUBSTITUTE_OVERFLOW_LENGTH, matchData, pcre2MatchContext16, (PCRE2_SPTR16)replace, rlen, nullptr, &pcreLen); + int replaceResult = pcre2_substitute_16(compiledRegex.get(), (PCRE2_SPTR16)str, slen, 0, replaceOptions|PCRE2_SUBSTITUTE_OVERFLOW_LENGTH, matchData, pcre2MatchContext16, (PCRE2_SPTR16)replace, rlen, nullptr, &pcreLen); if (replaceResult < 0 && replaceResult != PCRE2_ERROR_NOMEMORY) { // PCRE2_ERROR_NOMEMORY is a normal result when we're just asking for the size of the output pcre2_match_data_free_16(matchData); - failWithUPCRE2Error(replaceResult, "Error in regex replace: "); + failWithPCRE2Error(replaceResult, "Error in regex replace: "); } if (pcreLen > 0) { out = (UChar *)rtlMalloc(pcreLen * sizeof(UChar)); - replaceResult = pcre2_substitute_16(compiledRegex, (PCRE2_SPTR16)str, slen, 0, replaceOptions, matchData, pcre2MatchContext16, (PCRE2_SPTR16)replace, rlen, (PCRE2_UCHAR16 *)out, &pcreLen); + replaceResult = pcre2_substitute_16(compiledRegex.get(), (PCRE2_SPTR16)str, slen, 0, replaceOptions, matchData, pcre2MatchContext16, (PCRE2_SPTR16)replace, rlen, (PCRE2_UCHAR16 *)out, &pcreLen); // Note that, weirdly, pcreLen will now contain the number of code points // in the result *excluding* the null terminator, so pcreLen will @@ -596,7 +791,7 @@ class CCompiledUStrRegExpr : implements ICompiledUStrRegExpr if (replaceResult < 0) { pcre2_match_data_free_16(matchData); - failWithUPCRE2Error(replaceResult, "Error in regex replace: "); + failWithPCRE2Error(replaceResult, "Error in regex replace: "); } } @@ -617,7 +812,7 @@ class CCompiledUStrRegExpr : implements ICompiledUStrRegExpr size32_t outBytes = 0; PCRE2_SIZE offset = 0; uint32_t matchOptions = 0; - pcre2_match_data_16 * matchData = pcre2_match_data_create_from_pattern_16(compiledRegex, pcre2GeneralContext16); + pcre2_match_data_16 * matchData = pcre2_match_data_create_from_pattern_16(compiledRegex.get(), pcre2GeneralContext16); // Capture groups are ignored when gathering match results into a set, // so we will focus on only the first match (the entire matched string); @@ -626,7 +821,7 @@ class CCompiledUStrRegExpr : implements ICompiledUStrRegExpr while (offset < _subjectLen) { - int numMatches = pcre2_match_16(compiledRegex, (PCRE2_SPTR16)_subject, _subjectLen, offset, matchOptions, matchData, pcre2MatchContext16); + int numMatches = pcre2_match_16(compiledRegex.get(), (PCRE2_SPTR16)_subject, _subjectLen, offset, matchOptions, matchData, pcre2MatchContext16); if (numMatches < 0) { @@ -639,7 +834,7 @@ class CCompiledUStrRegExpr : implements ICompiledUStrRegExpr { // Treat everything else as an error pcre2_match_data_free_16(matchData); - failWithUPCRE2Error(numMatches, "Error in regex getMatchSet: "); + failWithPCRE2Error(numMatches, "Error in regex getMatchSet: "); } } else if (numMatches > 0) @@ -682,16 +877,63 @@ class CCompiledUStrRegExpr : implements ICompiledUStrRegExpr // UNICODE implementation //--------------------------------------------------------------------------- +/** + * @brief Fetches or creates a compiled Unicode regular expression object. + * + * This function fetches a compiled Unicode regular expression object from the cache if it exists, + * or creates a new one if it doesn't. The regular expression object is created based on the provided + * regex pattern, length, and case sensitivity flag. The created object is then cached for future use. + * + * @param _regexLength The length of the regex pattern, in code points. + * @param _regex The regex pattern. + * @param _isCaseSensitive Flag indicating whether the regex pattern is case sensitive or not. + * @return A pointer to a copy of the fetched or created CCompiledUStrRegExpr object. The returned object + * * must eventually be deleted. + */ +CCompiledUStrRegExpr* fetchOrCreateCompiledUStrRegExpr(int _regexLength, const UChar * _regex, bool _isCaseSensitive) +{ + if (compiledCacheEnabled) + { + CCompiledUStrRegExpr * compiledObjPtr = nullptr; + unsigned int regexSize = _regexLength * sizeof(UChar); + uint32_t options = PCRE2_UCP | (_isCaseSensitive ? 0 : PCRE2_CASELESS); + hash64_t regexHash = RegexCacheEntry::hashValue(regexSize, reinterpret_cast(_regex), options); + + // Check the cache + { + CriticalBlock lock(compiledStrRegExprLock); + RegexCacheEntry * cacheEntry = compiledStrRegExprCache.get(regexHash).get(); + + if (cacheEntry && cacheEntry->hasSamePattern(regexSize, reinterpret_cast(_regex), options)) + { + // Return a new copy of the cached object + return new CCompiledUStrRegExpr(*cacheEntry); + } + + // Create a new compiled pattern object + compiledObjPtr = new CCompiledUStrRegExpr(_regexLength, _regex, _isCaseSensitive); + // Create a cache entry for the new object + compiledStrRegExprCache.set(regexHash, std::make_shared(regexSize, reinterpret_cast(_regex), options, compiledObjPtr->getCompiledRegex())); + } + + return compiledObjPtr; + } + else + { + return new CCompiledUStrRegExpr(_regexLength, _regex, _isCaseSensitive); + } +} + +//--------------------------------------------------------------------------- + ECLRTL_API ICompiledUStrRegExpr * rtlCreateCompiledUStrRegExpr(const UChar * regExpr, bool isCaseSensitive) { - CCompiledUStrRegExpr * expr = new CCompiledUStrRegExpr(regExpr, isCaseSensitive); - return expr; + return fetchOrCreateCompiledUStrRegExpr(rtlUnicodeStrlen(regExpr), regExpr, isCaseSensitive); } ECLRTL_API ICompiledUStrRegExpr * rtlCreateCompiledUStrRegExpr(int regExprLength, const UChar * regExpr, bool isCaseSensitive) { - CCompiledUStrRegExpr * expr = new CCompiledUStrRegExpr(regExprLength, regExpr, isCaseSensitive); - return expr; + return fetchOrCreateCompiledUStrRegExpr(regExprLength, regExpr, isCaseSensitive); } ECLRTL_API void rtlDestroyCompiledUStrRegExpr(ICompiledUStrRegExpr * compiledExpr) @@ -737,6 +979,7 @@ MODULE_INIT(INIT_PRIORITY_ECLRTL_ECLRTL) pcre2CompileContext16 = pcre2_compile_context_create_16(pcre2GeneralContext16); pcre2MatchContext16 = pcre2_match_context_create_16(pcre2GeneralContext16); #endif // _USE_ICU + initMaxCacheSize(); return true; } diff --git a/rtl/eclrtl/eclrtl.cpp b/rtl/eclrtl/eclrtl.cpp index 1b8a9f1d313..156cad7066e 100644 --- a/rtl/eclrtl/eclrtl.cpp +++ b/rtl/eclrtl/eclrtl.cpp @@ -4960,6 +4960,19 @@ unsigned rtlUtf8Size(unsigned len, const void * _data) return offset; } +unsigned rtlUtf8Length(const void * _data) +{ + const byte * data = (const byte *)_data; + size32_t length = 0; + unsigned offset = 0; + while (data[offset]) + { + offset += readUtf8Size(data+offset); + length++; + } + return length; +} + unsigned rtlUtf8Length(unsigned size, const void * _data) { const byte * data = (const byte *)_data; diff --git a/rtl/eclrtl/eclrtl.hpp b/rtl/eclrtl/eclrtl.hpp index 53d4a09919b..6cb401b69b0 100644 --- a/rtl/eclrtl/eclrtl.hpp +++ b/rtl/eclrtl/eclrtl.hpp @@ -667,6 +667,7 @@ ECLRTL_API void rtlStrToVUnicode(unsigned outlen, UChar * out, unsigned inlen, c ECLRTL_API unsigned rtlUtf8Size(const void * data); ECLRTL_API unsigned rtlUtf8Size(unsigned len, const void * data); +ECLRTL_API unsigned rtlUtf8Length(const void * data); ECLRTL_API unsigned rtlUtf8Length(unsigned size, const void * data); ECLRTL_API unsigned rtlUtf8Char(const void * data); ECLRTL_API void rtlUtf8ToData(size32_t outlen, void * out, size32_t inlen, const char *in); diff --git a/system/jlib/jhash.hpp b/system/jlib/jhash.hpp index 3653abd4c21..91b154d052c 100644 --- a/system/jlib/jhash.hpp +++ b/system/jlib/jhash.hpp @@ -21,6 +21,7 @@ #define JHASH_HPP #include +#include #include #include @@ -687,4 +688,103 @@ class CTimeLimitedCache } }; +/** + * CLRUCache + * + * Least-Recently-Used cache class, specialized for key and + * value pointer types (the value is a pointer or a data type + * where a nullptr could represent a missing value). + * + * The get() method returns a found object by value. This + * is intentional and very useful for maintaining refcounts. + * + * There is a minimum size for the cache, defined by + * LRU_MIN_CACHE_SIZE. Attempts to create a smaller cache + * will be silently changed to the minimum size. If no + * initial size is provided to the constructor, the cache + * size will be set to LRU_MIN_CACHE_SIZE. + * + * Methods here are not thread-safe. Callers should block + * concurrent access for non-const methods (which are most + * of them). + */ + +#define LRU_MIN_CACHE_SIZE 10 + +template +class CLRUCache +{ + private: + std::list recentList; + std::unordered_map::iterator>> lookupMap; + size32_t maxCacheSize; + + void _downsize() + { + while (lookupMap.size() > maxCacheSize) + { + lookupMap.erase(recentList.back()); + recentList.pop_back(); + } + } + + public: + CLRUCache() : maxCacheSize(LRU_MIN_CACHE_SIZE) {} + CLRUCache(size32_t _maxCacheSize) : maxCacheSize(_maxCacheSize < LRU_MIN_CACHE_SIZE ? LRU_MIN_CACHE_SIZE : _maxCacheSize) {} + CLRUCache(const CLRUCache& other) = delete; + ~CLRUCache() = default; + + size32_t getCacheSize() const + { + return lookupMap.size(); + } + + size32_t setMaxCacheSize(size32_t _maxCacheSize) + { + maxCacheSize = _maxCacheSize < LRU_MIN_CACHE_SIZE ? LRU_MIN_CACHE_SIZE : _maxCacheSize; + _downsize(); + return maxCacheSize; + } + + PTRTYPE get(const KEYTYPE& key) + { + auto foundIter = lookupMap.find(key); + if (foundIter == lookupMap.end()) + return nullptr; + + recentList.splice(recentList.begin(), recentList, foundIter->second.second); + foundIter->second.second = recentList.begin(); + return foundIter->second.first; + } + + void set(const KEYTYPE& key, const PTRTYPE& value) + { + auto foundIter = lookupMap.find(key); + + if (foundIter == lookupMap.end()) + { + recentList.push_front(key); + lookupMap[key] = {value, recentList.begin()}; + _downsize(); + } + else + { + recentList.splice(recentList.begin(), recentList, foundIter->second.second); + foundIter->second.first = value; + foundIter->second.second = recentList.begin(); + } + } + + bool remove(const KEYTYPE& key) + { + auto foundIter = lookupMap.find(key); + if (foundIter == lookupMap.end()) + return false; + + recentList.erase(foundIter->second.second); + lookupMap.erase(foundIter); + return true; + } +}; + #endif diff --git a/testing/regress/ecl/key/regex_cache_string.xml b/testing/regress/ecl/key/regex_cache_string.xml new file mode 100644 index 00000000000..1ef7377e136 --- /dev/null +++ b/testing/regress/ecl/key/regex_cache_string.xml @@ -0,0 +1,3 @@ + + PASSED + diff --git a/testing/regress/ecl/key/regex_cache_unicode.xml b/testing/regress/ecl/key/regex_cache_unicode.xml new file mode 100644 index 00000000000..1ef7377e136 --- /dev/null +++ b/testing/regress/ecl/key/regex_cache_unicode.xml @@ -0,0 +1,3 @@ + + PASSED + diff --git a/testing/regress/ecl/key/regex_cache_utf8.xml b/testing/regress/ecl/key/regex_cache_utf8.xml new file mode 100644 index 00000000000..1ef7377e136 --- /dev/null +++ b/testing/regress/ecl/key/regex_cache_utf8.xml @@ -0,0 +1,3 @@ + + PASSED + diff --git a/testing/regress/ecl/regex_cache_string.ecl b/testing/regress/ecl/regex_cache_string.ecl new file mode 100644 index 00000000000..0222bb83389 --- /dev/null +++ b/testing/regress/ecl/regex_cache_string.ecl @@ -0,0 +1,70 @@ +/*############################################################################## + + HPCC SYSTEMS software Copyright (C) 2024 HPCC Systems®. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +############################################################################## */ + +#OPTION('globalFold', FALSE); + +IMPORT Std; + +regexDS := DATASET + ( + 100000, + TRANSFORM + ( + {STRING a}, + SELF.a := (STRING)RANDOM() + ), + DISTRIBUTED + ); + +res := PROJECT + ( + NOFOLD(regexDS), + TRANSFORM + ( + { + RECORDOF(LEFT), + STRING via_regex, + STRING via_find, + BOOLEAN is_matching + }, + SELF.via_regex := REGEXREPLACE(LEFT.a[1], LEFT.a, 'x'), + SELF.via_find := Std.Str.SubstituteIncluded(LEFT.a, LEFT.a[1], 'x'), + SELF.is_matching := SELF.via_regex = SELF.via_find, + SELF := LEFT + ), + PARALLEL(10) + ); + +numTests := COUNT(regexDS); +testsPassed := res(is_matching); +numTestsPassed := COUNT(testsPassed); +testsFailed := res(~is_matching); +numTestsFailed := COUNT(testsFailed); + +MIN_PASS_PERCENTAGE := 0.95; + +passedPercentage := numTestsPassed / numTests; +isSuccess := passedPercentage >= MIN_PASS_PERCENTAGE; +resultStr := IF(isSuccess, 'PASSED', 'FAILED'); +fullResultStr := resultStr + ': ' + (STRING)(ROUND(passedPercentage * 100, 2)); + +// Output for unit test parsing +OUTPUT(resultStr, NAMED('result')); + +// Uncomment the following to see details +// OUTPUT(numTests, NAMED('num_tests')); +// OUTPUT(numTestsPassed, NAMED('num_passed')); +// OUTPUT(numTestsFailed, NAMED('num_failed')); +// OUTPUT(fullResultStr, NAMED('result_desc')); +// OUTPUT(testsFailed, NAMED('failed_tests'), ALL); diff --git a/testing/regress/ecl/regex_cache_unicode.ecl b/testing/regress/ecl/regex_cache_unicode.ecl new file mode 100644 index 00000000000..c0fa569ec79 --- /dev/null +++ b/testing/regress/ecl/regex_cache_unicode.ecl @@ -0,0 +1,70 @@ +/*############################################################################## + + HPCC SYSTEMS software Copyright (C) 2024 HPCC Systems®. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +############################################################################## */ + +#OPTION('globalFold', FALSE); + +IMPORT Std; + +regexDS := DATASET + ( + 100000, + TRANSFORM + ( + {UNICODE a}, + SELF.a := (UNICODE)RANDOM() + ), + DISTRIBUTED + ); + +res := PROJECT + ( + NOFOLD(regexDS), + TRANSFORM + ( + { + RECORDOF(LEFT), + UNICODE via_regex, + UNICODE via_find, + BOOLEAN is_matching + }, + SELF.via_regex := REGEXREPLACE(LEFT.a[1], LEFT.a, u'x'), + SELF.via_find := (UNICODE)Std.Uni.SubstituteIncluded(LEFT.a, LEFT.a[1], u'x'), + SELF.is_matching := SELF.via_regex = SELF.via_find, + SELF := LEFT + ), + PARALLEL(10) + ); + +numTests := COUNT(regexDS); +testsPassed := res(is_matching); +numTestsPassed := COUNT(testsPassed); +testsFailed := res(~is_matching); +numTestsFailed := COUNT(testsFailed); + +MIN_PASS_PERCENTAGE := 0.95; + +passedPercentage := numTestsPassed / numTests; +isSuccess := passedPercentage >= MIN_PASS_PERCENTAGE; +resultStr := IF(isSuccess, 'PASSED', 'FAILED'); +fullResultStr := resultStr + ': ' + (STRING)(ROUND(passedPercentage * 100, 2)); + +// Output for unit test parsing +OUTPUT(resultStr, NAMED('result')); + +// Uncomment the following to see details +// OUTPUT(numTests, NAMED('num_tests')); +// OUTPUT(numTestsPassed, NAMED('num_passed')); +// OUTPUT(numTestsFailed, NAMED('num_failed')); +// OUTPUT(fullResultStr, NAMED('result_desc')); +// OUTPUT(testsFailed, NAMED('failed_tests'), ALL); diff --git a/testing/regress/ecl/regex_cache_utf8.ecl b/testing/regress/ecl/regex_cache_utf8.ecl new file mode 100644 index 00000000000..e3cf3d4398d --- /dev/null +++ b/testing/regress/ecl/regex_cache_utf8.ecl @@ -0,0 +1,70 @@ +/*############################################################################## + + HPCC SYSTEMS software Copyright (C) 2024 HPCC Systems®. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +############################################################################## */ + +#OPTION('globalFold', FALSE); + +IMPORT Std; + +regexDS := DATASET + ( + 100000, + TRANSFORM + ( + {UTF8 a}, + SELF.a := (UTF8)RANDOM() + ), + DISTRIBUTED + ); + +res := PROJECT + ( + NOFOLD(regexDS), + TRANSFORM + ( + { + RECORDOF(LEFT), + UTF8 via_regex, + UTF8 via_find, + BOOLEAN is_matching + }, + SELF.via_regex := REGEXREPLACE(LEFT.a[1], LEFT.a, u8'x'), + SELF.via_find := (UTF8)Std.Uni.SubstituteIncluded(LEFT.a, LEFT.a[1], u8'x'), + SELF.is_matching := SELF.via_regex = SELF.via_find, + SELF := LEFT + ), + PARALLEL(10) + ); + +numTests := COUNT(regexDS); +testsPassed := res(is_matching); +numTestsPassed := COUNT(testsPassed); +testsFailed := res(~is_matching); +numTestsFailed := COUNT(testsFailed); + +MIN_PASS_PERCENTAGE := 0.95; + +passedPercentage := numTestsPassed / numTests; +isSuccess := passedPercentage >= MIN_PASS_PERCENTAGE; +resultStr := IF(isSuccess, 'PASSED', 'FAILED'); +fullResultStr := resultStr + ': ' + (STRING)(ROUND(passedPercentage * 100, 2)); + +// Output for unit test parsing +OUTPUT(resultStr, NAMED('result')); + +// Uncomment the following to see details +// OUTPUT(numTests, NAMED('num_tests')); +// OUTPUT(numTestsPassed, NAMED('num_passed')); +// OUTPUT(numTestsFailed, NAMED('num_failed')); +// OUTPUT(fullResultStr, NAMED('result_desc')); +// OUTPUT(testsFailed, NAMED('failed_tests'), ALL); From 3091b3d698eac14e09ae255747dad4827ad2124d Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Tue, 18 Jun 2024 18:37:55 +0100 Subject: [PATCH 086/151] HPCC-31807 Remove stale job queue client entries Signed-off-by: Jake Smith --- common/workunit/wujobq.cpp | 14 +++++++++++--- dali/base/dasds.cpp | 13 +++++++++++++ 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/common/workunit/wujobq.cpp b/common/workunit/wujobq.cpp index 0a5bd0576a1..99c02da2fe7 100644 --- a/common/workunit/wujobq.cpp +++ b/common/workunit/wujobq.cpp @@ -1122,11 +1122,19 @@ class CJobQueue: public CJobQueueBase, implements IJobQueue void disconnect() // signal no longer wil be dequeing (optional - done automatically on release) { Cconnlockblock block(this,true); - if (connected) { + if (connected) + { dounsubscribe(); - ForEachQueue(qd) { + ForEachQueue(qd) + { IPropertyTree *croot = queryClientRootSession(*qd); - croot->setPropInt64("@connected",croot->getPropInt64("@connected",0)-1); + unsigned connectedCount = croot->getPropInt64("@connected"); + if (connectedCount) // should never be 0, but guard JIC + connectedCount--; + if (connectedCount) + croot->setPropInt64("@connected", connectedCount); + else + qd->root->removeTree(croot); } connected = false; } diff --git a/dali/base/dasds.cpp b/dali/base/dasds.cpp index 9478cbe2665..7257522fc03 100644 --- a/dali/base/dasds.cpp +++ b/dali/base/dasds.cpp @@ -4938,6 +4938,18 @@ void initializeInternals(IPropertyTree *root) root->addPropTree("Status/Servers",createPTree()); } +void clearStaleMeteData(IPropertyTree *root) +{ + // JobQueues + // Remove all Client entries from all queues. By definition they are stale (they should normally be removed when the client disconnects) + Owned jobQueues = root->getElements("JobQueues/Queue"); + ForEach(*jobQueues) + { + IPropertyTree &queue = jobQueues->query(); + while (queue.removeProp("Client")); + } +} + IPropertyTree *loadStore(const char *storeFilename, unsigned edition, IPTreeMaker *iMaker, unsigned crcValidation, bool logErrorsOnly=false, const bool *abort=NULL) { CHECKEDCRITICALBLOCK(loadStoreCrit, fakeCritTimeout); @@ -6543,6 +6555,7 @@ void CCovenSDSManager::loadStore(const char *storeName, const bool *abort) } Owned conn = connect("/", 0, RTM_INTERNAL, INFINITE); initializeInternals(conn->queryRoot()); + clearStaleMeteData(conn->queryRoot()); conn.clear(); initializeStorageGroups(oldEnvironment); } From e4082eb1ebe9932ed334fe6d71e6da230d287420 Mon Sep 17 00:00:00 2001 From: Shamser Ahmed Date: Thu, 13 Jun 2024 11:55:16 +0100 Subject: [PATCH 087/151] HPCC-31647 spill stats for join Signed-off-by: Shamser Ahmed --- thorlcr/activities/lookupjoin/thlookupjoinslave.cpp | 6 ++++-- thorlcr/thorutil/thmem.cpp | 9 +++++---- thorlcr/thorutil/thmem.hpp | 2 +- thorlcr/thorutil/thormisc.cpp | 2 +- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp b/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp index 3db86c42edc..09662742d09 100644 --- a/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp +++ b/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp @@ -1751,7 +1751,7 @@ class CLookupJoinActivityBase : public CInMemJoinBase overflowWriteFile; - Owned overflowWriteStream; + Owned overflowWriteStream; rowcount_t overflowWriteCount; OwnedMalloc channelDistributors; unsigned nextRhsToSpill = 0; @@ -1881,7 +1881,7 @@ class CLookupJoinActivityBase : public CInMemJoinBasecreateOwnedTempFile(tempName.str())); VStringBuffer spillPrefixStr("clearAllNonLocalRows(%d)", SPILL_PRIORITY_SPILLABLE_STREAM); // 3rd param. is skipNulls = true, the row arrays may have had the non-local rows delete already. - rows.save(file->queryIFile(), spillCompInfo, true, spillPrefixStr.str()); // saves committed rows + rows.save(*file, spillCompInfo, true, spillPrefixStr.str()); // saves committed rows rows.flushMarker = 0; // reset because array will be moved as a consequence of further adds, so next scan must be from start } @@ -2900,6 +2900,7 @@ class CLookupJoinActivityBase : public CInMemJoinBaseputRow(rhsInRowsTemp.getClear(r)); + overflowWriteFile->noteSize(overflowWriteStream->getStatistic(StSizeDiskWrite)); return true; } if (hasFailedOverToLocal()) @@ -2949,6 +2950,7 @@ class CLookupJoinActivityBase : public CInMemJoinBaseputRow(rhsInRowsTemp.getClear(r)); + overflowWriteFile->noteSize(overflowWriteStream->getStatistic(StSizeDiskWrite)); return true; } virtual void gatherActiveStats(CRuntimeStatisticCollection &activeStats) const diff --git a/thorlcr/thorutil/thmem.cpp b/thorlcr/thorutil/thmem.cpp index c3d2d795a88..299f1e3118a 100644 --- a/thorlcr/thorutil/thmem.cpp +++ b/thorlcr/thorutil/thmem.cpp @@ -247,7 +247,7 @@ class CSpillableStreamBase : public CSpillable GetTempFilePath(tempName, tempPrefix.str()); spillFile.setown(activity.createOwnedTempFile(tempName.str())); VStringBuffer spillPrefixStr("SpillableStream(%u)", spillPriority); - rows.save(spillFile->queryIFile(), spillCompInfo, false, spillPrefixStr.str()); // saves committed rows + rows.save(*spillFile, spillCompInfo, false, spillPrefixStr.str()); // saves committed rows rows.kill(); // no longer needed, readers will pull from spillFile. NB: ok to kill array as rows is never written to or expanded spillFile->noteSize(spillFile->queryIFile().size()); return true; @@ -1375,7 +1375,7 @@ static int callbackSortRev(IInterface * const *cb2, IInterface * const *cb1) return 1; } -rowidx_t CThorSpillableRowArray::save(IFile &iFile, unsigned _spillCompInfo, bool skipNulls, const char *_tracingPrefix) +rowidx_t CThorSpillableRowArray::save(CFileOwner &iFileOwner, unsigned _spillCompInfo, bool skipNulls, const char *_tracingPrefix) { rowidx_t n = numCommitted(); if (0 == n) @@ -1405,7 +1405,7 @@ rowidx_t CThorSpillableRowArray::save(IFile &iFile, unsigned _spillCompInfo, boo nextCB = &cbCopy.popGet(); nextCBI = nextCB->queryRecordNumber(); } - Owned writer = createRowWriter(&iFile, rowIf, rwFlags, nullptr, compBlkSz); + Owned writer = createRowWriter(&iFileOwner.queryIFile(), rowIf, rwFlags, nullptr, compBlkSz); rowidx_t i=0; rowidx_t rowsWritten=0; try @@ -1444,6 +1444,7 @@ rowidx_t CThorSpillableRowArray::save(IFile &iFile, unsigned _spillCompInfo, boo ++i; } writer->flush(NULL); + iFileOwner.noteSize(writer->getStatistic(StSizeDiskWrite)); } catch (IException *e) { @@ -1656,7 +1657,7 @@ class CThorRowCollectorBase : public CSpillable GetTempFilePath(tempName, tempPrefix.str()); VStringBuffer spillPrefixStr("%sRowCollector(%d)", tracingPrefix.str(), spillPriority); Owned tempFileOwner = activity.createOwnedTempFile(tempName.str()); - spillableRows.save(tempFileOwner->queryIFile(), spillCompInfo, false, spillPrefixStr.str()); // saves committed rows + spillableRows.save(*tempFileOwner, spillCompInfo, false, spillPrefixStr.str()); // saves committed rows spillFiles.append(tempFileOwner.getLink()); ++overflowCount; statOverflowCount.fastAdd(1); // NB: this is total over multiple uses of this class diff --git a/thorlcr/thorutil/thmem.hpp b/thorlcr/thorutil/thmem.hpp index ac7a1dd60ee..8e4f1b896a8 100644 --- a/thorlcr/thorutil/thmem.hpp +++ b/thorlcr/thorutil/thmem.hpp @@ -480,7 +480,7 @@ class graph_decl CThorSpillableRowArray : private CThorExpandingRowArray, implem //A thread calling the following functions must own the lock, or guarantee no other thread will access void sort(ICompare & compare, unsigned maxcores); - rowidx_t save(IFile &file, unsigned _spillCompInfo, bool skipNulls, const char *tracingPrefix); + rowidx_t save(CFileOwner &file, unsigned _spillCompInfo, bool skipNulls, const char *tracingPrefix); inline rowidx_t numCommitted() const { return commitRows - firstRow; } //MORE::Not convinced this is very safe! inline rowidx_t queryTotalRows() const { return CThorExpandingRowArray::ordinality(); } // includes uncommited rows diff --git a/thorlcr/thorutil/thormisc.cpp b/thorlcr/thorutil/thormisc.cpp index e3759811a48..48dc1231ee1 100644 --- a/thorlcr/thorutil/thormisc.cpp +++ b/thorlcr/thorutil/thormisc.cpp @@ -83,7 +83,7 @@ const StatisticsMapping indexReadActivityStatistics({StNumRowsProcessed}, indexR const StatisticsMapping indexWriteActivityStatistics({StPerReplicated, StNumLeafCacheAdds, StNumNodeCacheAdds, StNumBlobCacheAdds }, basicActivityStatistics, diskWriteRemoteStatistics); const StatisticsMapping keyedJoinActivityStatistics({ StNumIndexAccepted, StNumPreFiltered, StNumDiskSeeks, StNumDiskAccepted, StNumDiskRejected}, basicActivityStatistics, indexReadFileStatistics); const StatisticsMapping loopActivityStatistics({StNumIterations}, basicActivityStatistics); -const StatisticsMapping lookupJoinActivityStatistics({StNumSmartJoinSlavesDegradedToStd, StNumSmartJoinDegradedToLocal}, basicActivityStatistics); +const StatisticsMapping lookupJoinActivityStatistics({StNumSmartJoinSlavesDegradedToStd, StNumSmartJoinDegradedToLocal}, spillStatistics, basicActivityStatistics); const StatisticsMapping joinActivityStatistics({StNumLeftRows, StNumRightRows}, basicActivityStatistics, spillStatistics); const StatisticsMapping diskReadActivityStatistics({StNumDiskRowsRead, }, basicActivityStatistics, diskReadRemoteStatistics); const StatisticsMapping diskWriteActivityStatistics({StPerReplicated}, basicActivityStatistics, diskWriteRemoteStatistics); From 0a8c4d1e556dc6ce4b0bc9537aecd1176ba9b8c3 Mon Sep 17 00:00:00 2001 From: M Kelly Date: Wed, 19 Jun 2024 09:33:47 -0400 Subject: [PATCH 088/151] HPCC-32096 Expose MP listen queue somaxconn for containerized envs Signed-off-by: M Kelly --- system/mp/mpcomm.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/system/mp/mpcomm.cpp b/system/mp/mpcomm.cpp index 5b37a15e2a2..7154b61e6e1 100644 --- a/system/mp/mpcomm.cpp +++ b/system/mp/mpcomm.cpp @@ -2165,6 +2165,11 @@ CMPConnectThread::CMPConnectThread(CMPServer *_parent, unsigned port, bool _list parent->mpTraceLevel = getComponentConfigSP()->getPropInt("logging/@detail", InfoMsgThreshold); if (listen) { + if (getComponentConfigSP()->hasProp("expert/@mpSoMaxConn")) + mpSoMaxConn = getComponentConfigSP()->getPropInt("expert/@mpSoMaxConn"); + else + mpSoMaxConn = getGlobalConfigSP()->getPropInt("expert/@mpSoMaxConn", 0); + if (getComponentConfigSP()->hasProp("expert/@acceptThreadPoolSize")) acceptThreadPoolSize = getComponentConfigSP()->getPropInt("expert/@acceptThreadPoolSize"); else From 0e0849ec5611689bf0398df81f5a95d12d8dbd77 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Thu, 20 Jun 2024 16:37:37 +0100 Subject: [PATCH 089/151] HPCC-31931 Manual copy/paste of WU errors Fix flicker + poor layout of the error/warning pane. Signed-off-by: Gordon Smith --- esp/src/src-react/components/InfoGrid.tsx | 51 ++++++++----- .../src-react/components/controls/Grid.tsx | 72 +++++++++++++------ 2 files changed, 83 insertions(+), 40 deletions(-) diff --git a/esp/src/src-react/components/InfoGrid.tsx b/esp/src/src-react/components/InfoGrid.tsx index 0e13e1d0e7b..1bcca1178b9 100644 --- a/esp/src/src-react/components/InfoGrid.tsx +++ b/esp/src/src-react/components/InfoGrid.tsx @@ -1,5 +1,5 @@ import * as React from "react"; -import { Checkbox, CommandBar, ICommandBarItemProps, Link } from "@fluentui/react"; +import { Checkbox, CommandBar, ICommandBarItemProps, Link, SelectionMode } from "@fluentui/react"; import { SizeMe } from "react-sizeme"; import { formatCost, formatTwoDigits } from "src/Session"; import nlsHPCC from "src/nlsHPCC"; @@ -123,6 +123,11 @@ export const InfoGrid: React.FunctionComponent = ({ return <>{info?.prefix}{txt}{info?.message}; } return Message; + }, + fluentColumn: { + flexGrow: 1, + minWidth: 320, + isResizable: true } }, Column: { label: nlsHPCC.Col, width: 36 }, @@ -133,7 +138,14 @@ export const InfoGrid: React.FunctionComponent = ({ return activityId ? a{activityId} : ""; } }, - FileName: { label: nlsHPCC.FileName, width: 360 } + FileName: { + label: nlsHPCC.FileName, + fluentColumn: { + flexGrow: 2, + minWidth: 320, + isResizable: true + } + } }; }, [wuid]); @@ -210,7 +222,8 @@ export const InfoGrid: React.FunctionComponent = ({ }); setData(filteredExceptions); setFilterCounts(filterCounts); - }, [costChecked, errorChecked, errors, infoChecked, otherChecked, warningChecked]); + setSelection(filteredExceptions); + }, [costChecked, errorChecked, errors, infoChecked, otherChecked, setSelection, warningChecked]); React.useEffect(() => { if (data.length) { @@ -224,19 +237,23 @@ export const InfoGrid: React.FunctionComponent = ({ } }, [data.length]); - return {({ size }) => -
- -
- + return
+ + {({ size }) => +
+
+ { }} + setTotal={setTotal} + refresh={refreshTable} + height={`${size.height - (44 + 8 + 45 + 12)}px`} + selectionMode={SelectionMode.none} + > +
-
- }; + } +
; }; diff --git a/esp/src/src-react/components/controls/Grid.tsx b/esp/src/src-react/components/controls/Grid.tsx index fff30e920ca..634cb318627 100644 --- a/esp/src/src-react/components/controls/Grid.tsx +++ b/esp/src/src-react/components/controls/Grid.tsx @@ -1,5 +1,5 @@ import * as React from "react"; -import { DetailsList, DetailsListLayoutMode, Dropdown, IColumn as _IColumn, ICommandBarItemProps, IDetailsHeaderProps, IDetailsListStyles, mergeStyleSets, Selection, Stack, TooltipHost, TooltipOverflowMode, IDetailsList, IRenderFunction, IDetailsRowProps } from "@fluentui/react"; +import { DetailsList, DetailsListLayoutMode, Dropdown, IColumn as _IColumn, ICommandBarItemProps, IDetailsHeaderProps, IDetailsListStyles, mergeStyleSets, Selection, Stack, TooltipHost, TooltipOverflowMode, IDetailsList, IRenderFunction, IDetailsRowProps, SelectionMode, ConstrainMode } from "@fluentui/react"; import { Pagination } from "@fluentui/react-experiments/lib/Pagination"; import { useConst, useId, useMount, useOnEvent } from "@fluentui/react-hooks"; import { BaseStore, Memory, QueryRequest, QuerySortItem } from "src/store/Memory"; @@ -34,6 +34,7 @@ export interface FluentColumn { formatter?: (value: any, row: any) => any; csvFormatter?: (value: any, row: any) => string; className?: (value: any, row: any) => string; + fluentColumn?: Partial<_IColumn>; } export type FluentColumns = { [key: string]: FluentColumn }; @@ -72,25 +73,42 @@ function columnsAdapter(columns: FluentColumns, columnWidths: Map): const column = columns[key]; const width = columnWidths.get(key) ?? column.width; if (column?.selectorType === undefined && column?.hidden !== true) { - retVal.push({ - key, - name: column.label ?? key, - fieldName: column.field ?? key, - minWidth: width ?? 70, - maxWidth: width, - isResizable: true, - isSorted: false, - isSortedDescending: false, - iconName: column.headerIcon, - isIconOnly: !!column.headerIcon, - data: column, - styles: { root: { width, ":hover": { cursor: column?.sortable !== false ? "pointer" : "default" } } }, - onRender: (item: any, index: number, col: IColumn) => { - col.minWidth = column.width ?? 70; - col.maxWidth = column.width; - return tooltipItemRenderer(item, index, col); - } - } as IColumn); + if (column?.fluentColumn) { + retVal.push({ + key, + name: column.label ?? key, + fieldName: column.field ?? key, + iconName: column.headerIcon, + isIconOnly: !!column.headerIcon, + data: column, + styles: { root: { width, ":hover": { cursor: column?.sortable !== false ? "pointer" : "default" } } }, + onRender: (item: any, index: number, col: IColumn) => { + col.minWidth = column.width ?? 70; + return tooltipItemRenderer(item, index, col); + }, + ...column.fluentColumn + } as IColumn); + } else { + retVal.push({ + key, + name: column.label ?? key, + fieldName: column.field ?? key, + minWidth: width ?? 70, + maxWidth: width, + isResizable: true, + isSorted: false, + isSortedDescending: false, + iconName: column.headerIcon, + isIconOnly: !!column.headerIcon, + data: column, + styles: { root: { width, ":hover": { cursor: column?.sortable !== false ? "pointer" : "default" } } }, + onRender: (item: any, index: number, col: IColumn) => { + col.minWidth = column.width ?? 70; + col.maxWidth = column.width; + return tooltipItemRenderer(item, index, col); + } + } as IColumn); + } } } return retVal; @@ -191,6 +209,7 @@ interface FluentStoreGridProps { columns: FluentColumns, height: string, refresh: RefreshTable, + selectionMode?: SelectionMode, setSelection: (selection: any[]) => void, setTotal: (total: number) => void, onRenderRow?: IRenderFunction @@ -205,6 +224,7 @@ const FluentStoreGrid: React.FunctionComponent = ({ columns, height, refresh, + selectionMode = SelectionMode.multiple, setSelection, setTotal, onRenderRow @@ -315,7 +335,8 @@ const FluentStoreGrid: React.FunctionComponent = ({ compact={true} items={items} columns={fluentColumns} - layoutMode={DetailsListLayoutMode.justified} + layoutMode={DetailsListLayoutMode.fixedColumns} + constrainMode={ConstrainMode.unconstrained} selection={selectionHandler} isSelectedOnFocus={false} selectionPreservedOnEmptyClick={true} @@ -324,6 +345,7 @@ const FluentStoreGrid: React.FunctionComponent = ({ onColumnResize={columnResize} onRenderRow={onRenderRow} styles={gridStyles(height)} + selectionMode={selectionMode} />
; }; @@ -335,6 +357,7 @@ interface FluentGridProps { sort?: QuerySortItem, columns: FluentColumns, height?: string, + selectionMode?: SelectionMode, setSelection: (selection: any[]) => void, setTotal: (total: number) => void, refresh: RefreshTable, @@ -348,6 +371,7 @@ export const FluentGrid: React.FunctionComponent = ({ sort, columns, height, + selectionMode = SelectionMode.multiple, setSelection, setTotal, refresh, @@ -362,7 +386,7 @@ export const FluentGrid: React.FunctionComponent = ({ // eslint-disable-next-line react-hooks/exhaustive-deps }, [constStore, data, /*refresh*/]); - return + return ; }; @@ -375,6 +399,7 @@ interface FluentPagedGridProps { total: number, columns: FluentColumns, height?: string, + selectionMode?: SelectionMode, setSelection: (selection: any[]) => void, setTotal: (total: number) => void, refresh: RefreshTable, @@ -390,6 +415,7 @@ export const FluentPagedGrid: React.FunctionComponent = ({ total, columns, height, + selectionMode = SelectionMode.multiple, setSelection, setTotal, refresh, @@ -414,7 +440,7 @@ export const FluentPagedGrid: React.FunctionComponent = ({ setPage(_page); }, [pageNum]); - return + return ; }; From 5a9d68992bdcb592cc72bd299f4a31cfc0e7930f Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Mon, 17 Jun 2024 12:15:03 +0100 Subject: [PATCH 090/151] HPCC-31116 Fix 1 min delay introduced waiting for some CQ's The changes introduced by HPCC-30288 also caused Thor to stall for 1 minute per CQ that had not been started. Signed-off-by: Jake Smith --- thorlcr/graph/thgraphslave.cpp | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/thorlcr/graph/thgraphslave.cpp b/thorlcr/graph/thgraphslave.cpp index fbb68e4f947..2ffb55e596d 100644 --- a/thorlcr/graph/thgraphslave.cpp +++ b/thorlcr/graph/thgraphslave.cpp @@ -1222,6 +1222,8 @@ void CSlaveGraph::executeSubGraph(size32_t parentExtractSz, const byte *parentEx void CSlaveGraph::abort(IException *e) { + if (aborted) + return; if (!graphDone) // set pre done(), no need to abort if got that far. CGraphBase::abort(e); getDoneSem.signal(); @@ -1229,23 +1231,27 @@ void CSlaveGraph::abort(IException *e) void CSlaveGraph::done() { - GraphPrintLog("End of sub-graph"); - progressActive.store(false); - setProgressUpdated(); // NB: ensure collected after end of graph - if (!queryOwner() || isGlobal()) + if (started) { - if (aborted || !graphDone) + GraphPrintLog("End of sub-graph"); + progressActive.store(false); + setProgressUpdated(); // NB: ensure collected after end of graph + + if (initialized && (!queryOwner() || isGlobal())) { - if (!getDoneSem.wait(SHORTTIMEOUT)) // wait on master to clear up, gather info from slaves - WARNLOG("CSlaveGraph::done - timedout waiting for master to signal done()"); + if (aborted || !graphDone) + { + if (!getDoneSem.wait(SHORTTIMEOUT)) // wait on master to clear up, gather info from slaves + WARNLOG("CSlaveGraph::done - timedout waiting for master to signal done()"); + } + else + getDoneSem.wait(); + } + if (!queryOwner()) + { + if (globals->getPropBool("@watchdogProgressEnabled")) + jobS->queryProgressHandler()->stopGraph(*this, NULL); } - else - getDoneSem.wait(); - } - if (!queryOwner()) - { - if (globals->getPropBool("@watchdogProgressEnabled")) - jobS->queryProgressHandler()->stopGraph(*this, NULL); } Owned exception; From 1cd728c458651d0d926d3007556aa6b7facb0259 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Thu, 20 Jun 2024 16:58:07 +0100 Subject: [PATCH 091/151] Split off 9.6.24 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 6ef69d7f8e0..68c7fe4b3da 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.6.23-closedown0 +version: 9.6.25-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.6.23-closedown0 +appVersion: 9.6.25-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index b9335684a21..f964c2fe1ca 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1523,7 +1523,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index b6c3dac92b8..c17ffe5e3c3 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index bef0f09a7a7..ef506e3dd4a 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index d1913603c0f..f4d23e11edd 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 4c14437f2a9..afc9f83e710 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -139,7 +139,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index bf61ed3bd6a..70b356ab907 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -147,7 +147,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index d2e73bb48fc..be9d9f553bf 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 4b9b87f8c35..fe9260babc3 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -125,7 +125,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index c92e039725e..bc262ad6ac0 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index f2200dd39b0..82d5c1f2e86 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 498ba0f78cf..8e0d6a593be 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 7ea00c0c88e..25b3c5fea31 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -88,7 +88,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -153,7 +153,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -220,7 +220,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -353,7 +353,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -418,7 +418,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.6.23-closedown0 + helmVersion: 9.6.25-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index dc014e9a22f..ff08503f2e4 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 6 ) -set ( HPCC_POINT 23 ) +set ( HPCC_POINT 25 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-14T16:27:18Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-20T15:58:07Z" ) ### From ee7c3bfab83e528e04588378dbe137415e0c07d0 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Thu, 20 Jun 2024 16:59:29 +0100 Subject: [PATCH 092/151] Split off 9.4.72 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index c8177a82bfb..c194d77e8d6 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.4.71-closedown0 +version: 9.4.73-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.4.71-closedown0 +appVersion: 9.4.73-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 3af14c427c0..ab3e33f4178 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1519,7 +1519,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 732a1114341..a50440cf06b 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index c7b1a0a3b7c..21ade987987 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index eadf65d08fd..36fdadbb6cf 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 05ebf461b28..7b86a7449d1 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -60,7 +60,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -137,7 +137,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 135798849d6..882b2b7f63c 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -143,7 +143,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 1afedb255f5..c69b739f350 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index b2d4f24bfc6..c28acbaaa8f 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -122,7 +122,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 8063096a70e..e5621197105 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index ecd46f7112a..1c740401a65 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index a6359bb8cd2..01d5361a5b5 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index f5ef8bf3d6d..8656e97f866 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -86,7 +86,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -151,7 +151,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -218,7 +218,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -351,7 +351,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -416,7 +416,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.4.71-closedown0 + helmVersion: 9.4.73-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index e313885be18..22b14233235 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 4 ) -set ( HPCC_POINT 71 ) +set ( HPCC_POINT 73 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-14T16:03:56Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-20T15:59:29Z" ) ### From 13038aa7885739502ebf52b7ca264a26450bd863 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Thu, 20 Jun 2024 17:00:57 +0100 Subject: [PATCH 093/151] Split off 9.2.98 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 08b98c94298..6fb3b6188af 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.2.97-closedown0 +version: 9.2.99-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.2.97-closedown0 +appVersion: 9.2.99-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 444d2af71db..4658ca74316 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1405,7 +1405,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 381d7fbddb0..d9ab3304dae 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index d840198008d..2c88ddbe102 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 6de6c7b87c3..faa5493a225 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 520cded722f..1f5a8da4b07 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -133,7 +133,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index e665c1e1c0b..991b88ec767 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -140,7 +140,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index d0b54cef1b7..713eec2275a 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index d7ee54fc3c3..c11fc1ac990 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -120,7 +120,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 9ef61b02d47..5d75783d0e0 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index a39049fa504..735c7d17117 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -178,7 +178,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -240,7 +240,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -346,7 +346,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 34a1a660472..a660b392ae3 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index caf98186b0e..a99ee0d0356 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -145,7 +145,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -210,7 +210,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -341,7 +341,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -404,7 +404,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.2.97-closedown0 + helmVersion: 9.2.99-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 5a0d035376d..9f37c6d68fd 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 2 ) -set ( HPCC_POINT 97 ) +set ( HPCC_POINT 99 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-14T16:02:29Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-20T16:00:57Z" ) ### From 932d0ec05f1642d8151fb06551430413df30fa85 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Thu, 20 Jun 2024 17:03:34 +0100 Subject: [PATCH 094/151] Split off 9.0.120 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index d464f82f589..c03ac127f3d 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.0.119-closedown0 +version: 9.0.121-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.0.119-closedown0 +appVersion: 9.0.121-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index e5a996e4ce5..3ee14df6e41 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1314,7 +1314,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 3ad9abc8a7d..c05a073f715 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 annotations: checksum/config: {{ $configSHA }} spec: diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index 8ca163cf633..32ae6256bb3 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 83bea8a6e79..ed71697b16c 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index bb2f431d598..a73c6190982 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -135,7 +135,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 9baab7b13bc..744cc72f048 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -142,7 +142,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index a41719e8e70..5bef21a5f76 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index f703f88f434..58de354af8e 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -117,7 +117,7 @@ spec: server: {{ .name | quote }} accessDali: "yes" app: {{ $application }} - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 0ff8395a220..37109c9eedf 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 72d1f73456e..9087d9a6b7a 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -180,7 +180,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -242,7 +242,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -347,7 +347,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 753f0362c7d..9d18fb5a35e 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -52,7 +52,7 @@ spec: run: {{ $serviceName | quote }} server: {{ $serviceName | quote }} accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 3182a00a772..be3d07f7bda 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -147,7 +147,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -214,7 +214,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -347,7 +347,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -412,7 +412,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.0.119-closedown0 + helmVersion: 9.0.121-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 2f11f281cb9..7040e49d251 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 0 ) -set ( HPCC_POINT 119 ) +set ( HPCC_POINT 121 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-14T16:00:55Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-20T16:03:34Z" ) ### From ad67f96e76191fbab39f4bcdf30d1bf5aaf0ec44 Mon Sep 17 00:00:00 2001 From: M Kelly Date: Thu, 20 Jun 2024 12:19:39 -0400 Subject: [PATCH 095/151] HPCC-32103 Roxie crash in soapcall after reaching TIMELIMIT Signed-off-by: M Kelly --- common/thorhelper/thorsoapcall.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/common/thorhelper/thorsoapcall.cpp b/common/thorhelper/thorsoapcall.cpp index 51ca07f8485..000d4a80e81 100644 --- a/common/thorhelper/thorsoapcall.cpp +++ b/common/thorhelper/thorsoapcall.cpp @@ -2642,12 +2642,11 @@ class CWSCAsyncFor : implements IWSCAsyncFor, public CInterface, public CAsyncFo { // other roxie exception ... master->logctx.CTXLOG("Exiting: received Roxie exception"); + master->activitySpanScope->recordException(e, true, true); if (e->errorRow()) processException(url, e->errorRow(), e); else processException(url, inputRows, e); - - master->activitySpanScope->recordException(e, true, true); break; } } @@ -2657,10 +2656,10 @@ class CWSCAsyncFor : implements IWSCAsyncFor, public CInterface, public CAsyncFo persistentHandler->doneUsing(socket, false); if (master->timeLimitExceeded) { - processException(url, inputRows, e); VStringBuffer msg("%s exiting: time limit (%ums) exceeded", getWsCallTypeName(master->wscType), master->timeLimitMS); master->logctx.CTXLOG("%s", msg.str()); master->activitySpanScope->recordError(SpanError(msg.str(), e->errorCode(), true, true)); + processException(url, inputRows, e); break; } From 475a4fc824fea5c2528e89c0ce149aa3d318c743 Mon Sep 17 00:00:00 2001 From: Michael Gardner Date: Thu, 20 Jun 2024 13:51:54 -0400 Subject: [PATCH 096/151] HPCC-32104 Fix for xml:base issue with updated xml/xsl libraries Removal of DOCBOOK_TO_HTML for SecurityManagerGuide as it is no longer used. See HPCC-31898 File contained unresolved relative path xml:base issues Signed-off-by: Michael Gardner --- docs/BuildTools/cmake_config/HPCCSystemAdmin.txt | 5 +---- docs/CMakeLists.txt | 2 +- docs/EN_US/ECLWatch/TheECLWatchMan.xml | 6 +++--- .../Inst-Mods/Hardware.xml | 6 +++--- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/docs/BuildTools/cmake_config/HPCCSystemAdmin.txt b/docs/BuildTools/cmake_config/HPCCSystemAdmin.txt index 428d42ce2b1..a99915e66ff 100644 --- a/docs/BuildTools/cmake_config/HPCCSystemAdmin.txt +++ b/docs/BuildTools/cmake_config/HPCCSystemAdmin.txt @@ -26,9 +26,7 @@ IF(MAKE_DOCS) LIST(APPEND HELP_DEPENDENCIES ${T}) ENDIF() ENDFOREACH() - - DOCBOOK_TO_HTML(${ECLIPSE_HTML_XSL} "${CMAKE_CURRENT_BINARY_DIR}/HPCCSystemAdministratorsGuide.xml" ${PORTAL_HTML_DIR}/SystemAdministratorsGuide_${DOC_LANG} "system_administration_guide_html_${DOC_LANG}" "${HPCC_SOURCE_DIR}/docs/common/eclipsehelp.css" "") - + SET(HELP_DEPENDENCIES) GET_PROPERTY(Current_Targets GLOBAL PROPERTY DOC_TARGETS) FOREACH(T ${Current_Targets}) @@ -37,5 +35,4 @@ IF(MAKE_DOCS) ENDIF() ENDFOREACH() - DOCBOOK_TO_HTML(${PORTAL_GEN_XSL} "${CMAKE_CURRENT_BINARY_DIR}/SecMgrInc.xml" ${PORTAL_HTML_DIR}/SecurityManagerGuide_${DOC_LANG} "security_manager_guide_html_${DOC_LANG}" "${HPCC_SOURCE_DIR}/docs/common/eclipsehelp.css" "") ENDIF(MAKE_DOCS) diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 0e0efbd88f9..06c0baf34bb 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -29,7 +29,7 @@ set (DOCBOOK_XSL ${CMAKE_CURRENT_BINARY_DIR}/resources/docbook-xsl) set (XML_CATALOG ${CMAKE_CURRENT_BINARY_DIR}/BuildTools/catalog.xml) set (DOC_VERSION "${HPCC_MAJOR}.${HPCC_MINOR}.${HPCC_POINT}") set (FO_XSL ${CMAKE_CURRENT_BINARY_DIR}/BuildTools/fo.xsl) -set (VERSION_DIR ${CMAKE_CURRENT_BINARY_DIR}/) +set (VERSION_DIR ${CMAKE_CURRENT_BINARY_DIR}) set (ECLIPSE_HTML_XSL ${CMAKE_CURRENT_BINARY_DIR}/BuildTools/EclipseHelp.xsl) #set (ECL_REFERENCE_XML ${CMAKE_CURRENT_BINARY_DIR}/ECLReference/ECLReference.xml) set (HTML_HELP_XSL ${CMAKE_CURRENT_BINARY_DIR}/resources/docbook-xsl/htmlhelp/htmlhelp.xsl) diff --git a/docs/EN_US/ECLWatch/TheECLWatchMan.xml b/docs/EN_US/ECLWatch/TheECLWatchMan.xml index 4572d53a321..70c5855f97c 100644 --- a/docs/EN_US/ECLWatch/TheECLWatchMan.xml +++ b/docs/EN_US/ECLWatch/TheECLWatchMan.xml @@ -42,17 +42,17 @@ - - HPCC Systems® - diff --git a/docs/PT_BR/Installing_and_RunningTheHPCCPlatform/Inst-Mods/Hardware.xml b/docs/PT_BR/Installing_and_RunningTheHPCCPlatform/Inst-Mods/Hardware.xml index 0d747c6fa7f..2d9c6a1c96e 100644 --- a/docs/PT_BR/Installing_and_RunningTheHPCCPlatform/Inst-Mods/Hardware.xml +++ b/docs/PT_BR/Installing_and_RunningTheHPCCPlatform/Inst-Mods/Hardware.xml @@ -39,15 +39,15 @@ - - HPCC Systems - From 4e587a0c06a68e20a2c90300b01981ece2055cbd Mon Sep 17 00:00:00 2001 From: Panagiotatos Date: Fri, 21 Jun 2024 12:06:48 -0400 Subject: [PATCH 097/151] HPCC-32110 Resolve Attribute clash Signed-off-by: Panagiotatos --- .../ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml index bd6474cd740..4e6f4ecacd7 100644 --- a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml +++ b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml @@ -1062,7 +1062,7 @@ - + Environment Values Você pode definir variáveis de ambiente em um arquivo YAML. Os From 90e7c92a8631bebbe0a82fe444550d3127b8c715 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Mon, 24 Jun 2024 15:15:40 +0100 Subject: [PATCH 098/151] Split off 9.8.0 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 25f78c664c5..d28e3dffbcb 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.7.0-trunk0 +version: 9.9.0-trunk0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.7.0-trunk0 +appVersion: 9.9.0-trunk0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 1ccb9d41b5a..947a235811d 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1523,7 +1523,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 275aa8d6b0c..ff928dd03c9 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index b458e708383..15eb2e607f5 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 88e3ee9a5be..e316e3e3286 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index da0207f7fc8..0f02793f7c4 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -139,7 +139,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 01030cbbfb0..ce9fd3676c3 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -147,7 +147,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 9513b95f939..488d70a5504 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 160be59be49..94d62d860a0 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -125,7 +125,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 55ee65b9385..344abbf0fb2 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 1849d559ec7..99bd1e7f25b 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 0aad43a7328..164c5ae7602 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 863e7cb3e4c..824ebbf574a 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -88,7 +88,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -153,7 +153,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -220,7 +220,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -353,7 +353,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -418,7 +418,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.9.0-trunk0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index a2c38c499f0..1a8e0e7fc1a 100644 --- a/version.cmake +++ b/version.cmake @@ -4,9 +4,9 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) -set ( HPCC_MINOR 7 ) +set ( HPCC_MINOR 9 ) set ( HPCC_POINT 0 ) set ( HPCC_MATURITY "trunk" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-03-08T17:36:05Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-24T14:15:40Z" ) ### From aad4f52027a6e0cc7eb209343294b9aed5f6f4c0 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Mon, 24 Jun 2024 15:15:43 +0100 Subject: [PATCH 099/151] Split off 9.8.0 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 8 ++++---- 14 files changed, 27 insertions(+), 27 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 25f78c664c5..de086da65e6 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.7.0-trunk0 +version: 9.8.1-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.7.0-trunk0 +appVersion: 9.8.1-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 1ccb9d41b5a..e571448a037 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1523,7 +1523,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 275aa8d6b0c..3b4046deafa 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index b458e708383..c92f9d4bb17 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 88e3ee9a5be..3c39acd576c 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index da0207f7fc8..430ec35143d 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -139,7 +139,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 01030cbbfb0..1b6c580be70 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -147,7 +147,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 9513b95f939..11f3edb0628 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 160be59be49..14e9ae0d5cb 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -125,7 +125,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 55ee65b9385..fd3ad929130 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 1849d559ec7..03cd7d3effc 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 0aad43a7328..d7558cdcf30 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 863e7cb3e4c..706dfacaf2f 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -88,7 +88,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -153,7 +153,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -220,7 +220,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -353,7 +353,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -418,7 +418,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.7.0-trunk0 + helmVersion: 9.8.1-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index a2c38c499f0..92e98514d49 100644 --- a/version.cmake +++ b/version.cmake @@ -4,9 +4,9 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) -set ( HPCC_MINOR 7 ) -set ( HPCC_POINT 0 ) -set ( HPCC_MATURITY "trunk" ) +set ( HPCC_MINOR 8 ) +set ( HPCC_POINT 1 ) +set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-03-08T17:36:05Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-24T14:15:43Z" ) ### From 99add4c267554e30a9fe34da5fb3a391b1620e78 Mon Sep 17 00:00:00 2001 From: "Dan S. Camper" Date: Fri, 21 Jun 2024 11:22:20 -0500 Subject: [PATCH 100/151] HPCC-32091 Improve REGEXREPLACE performance Leverage PCRE2-specific opaque data structure search/replace usage pattern to improve average 'replace' performance. --- rtl/eclrtl/eclregex.cpp | 132 ++++++++++++++++++++++++++-------------- 1 file changed, 88 insertions(+), 44 deletions(-) diff --git a/rtl/eclrtl/eclregex.cpp b/rtl/eclrtl/eclregex.cpp index 275aa19cd4d..bab37da8ea4 100644 --- a/rtl/eclrtl/eclregex.cpp +++ b/rtl/eclrtl/eclregex.cpp @@ -386,41 +386,63 @@ class CCompiledStrRegExpr : implements ICompiledStrRegExpr size32_t sourceSize = (isUTF8Enabled ? rtlUtf8Size(slen, str) : slen); size32_t replaceSize = (isUTF8Enabled ? rtlUtf8Size(rlen, replace) : rlen); - uint32_t replaceOptions = PCRE2_SUBSTITUTE_GLOBAL|PCRE2_SUBSTITUTE_EXTENDED; + // Execute an explicit match first to see if we match at all; if we do, matchData will be populated + // with data that can be used by pcre2_substitute to bypass some work + int numMatches = pcre2_match_8(compiledRegex.get(), (PCRE2_SPTR8)str, sourceSize, 0, 0, matchData, pcre2MatchContext8); - // Call it once to get the size of the output, then allocate memory for it; - // Note that pcreLen will include space for a terminating null character; - // we have to allocate memory for that byte to avoid a buffer overrun, - // but we won't count that terminating byte - int replaceResult = pcre2_substitute_8(compiledRegex.get(), (PCRE2_SPTR8)str, sourceSize, 0, replaceOptions|PCRE2_SUBSTITUTE_OVERFLOW_LENGTH, matchData, pcre2MatchContext8, (PCRE2_SPTR8)replace, replaceSize, nullptr, &pcreLen); - - if (replaceResult < 0 && replaceResult != PCRE2_ERROR_NOMEMORY) + if (numMatches < 0 && numMatches != PCRE2_ERROR_NOMATCH) { - // PCRE2_ERROR_NOMEMORY is a normal result when we're just asking for the size of the output + // Treat everything other than PCRE2_ERROR_NOMATCH as an error pcre2_match_data_free_8(matchData); - failWithPCRE2Error(replaceResult, "Error in regex replace: "); + failWithPCRE2Error(numMatches, "Error in regex replace: "); } - if (pcreLen > 0) + if (numMatches > 0) { - out = (char *)rtlMalloc(pcreLen); - - replaceResult = pcre2_substitute_8(compiledRegex.get(), (PCRE2_SPTR8)str, sourceSize, 0, replaceOptions, matchData, pcre2MatchContext8, (PCRE2_SPTR8)replace, replaceSize, (PCRE2_UCHAR8 *)out, &pcreLen); + uint32_t replaceOptions = PCRE2_SUBSTITUTE_MATCHED|PCRE2_SUBSTITUTE_GLOBAL|PCRE2_SUBSTITUTE_EXTENDED; - // Note that, weirdly, pcreLen will now contain the number of code points - // in the result *excluding* the null terminator, so pcreLen will - // become our final result length + // Call substitute once to get the size of the output, then allocate memory for it; + // Note that pcreLen will include space for a terminating null character; + // we have to allocate memory for that byte to avoid a buffer overrun, + // but we won't count that terminating byte + int replaceResult = pcre2_substitute_8(compiledRegex.get(), (PCRE2_SPTR8)str, sourceSize, 0, replaceOptions|PCRE2_SUBSTITUTE_OVERFLOW_LENGTH, matchData, pcre2MatchContext8, (PCRE2_SPTR8)replace, replaceSize, nullptr, &pcreLen); - if (replaceResult < 0) + if (replaceResult < 0 && replaceResult != PCRE2_ERROR_NOMEMORY) { + // PCRE2_ERROR_NOMEMORY is a normal result when we're just asking for the size of the output pcre2_match_data_free_8(matchData); failWithPCRE2Error(replaceResult, "Error in regex replace: "); } - } - pcre2_match_data_free_8(matchData); - // We need to return the number of characters here, not the byte count - outlen = (isUTF8Enabled ? rtlUtf8Length(pcreLen, out) : pcreLen); + if (pcreLen > 0) + { + out = (char *)rtlMalloc(pcreLen); + + replaceResult = pcre2_substitute_8(compiledRegex.get(), (PCRE2_SPTR8)str, sourceSize, 0, replaceOptions, matchData, pcre2MatchContext8, (PCRE2_SPTR8)replace, replaceSize, (PCRE2_UCHAR8 *)out, &pcreLen); + + // Note that, weirdly, pcreLen will now contain the number of code points + // in the result *excluding* the null terminator, so pcreLen will + // become our final result length + + if (replaceResult < 0) + { + pcre2_match_data_free_8(matchData); + failWithPCRE2Error(replaceResult, "Error in regex replace: "); + } + } + + pcre2_match_data_free_8(matchData); + // We need to return the number of characters here, not the byte count + outlen = (isUTF8Enabled ? rtlUtf8Length(pcreLen, out) : pcreLen); + } + else + { + // No match found; return the original string + out = (char *)rtlMalloc(sourceSize); + memcpy(out, str, sourceSize); + outlen = slen; + pcre2_match_data_free_8(matchData); + } } IStrRegExprFindInstance * find(const char * str, size32_t from, size32_t len, bool needToKeepSearchString) const @@ -763,41 +785,63 @@ class CCompiledUStrRegExpr : implements ICompiledUStrRegExpr outlen = 0; pcre2_match_data_16 * matchData = pcre2_match_data_create_from_pattern_16(compiledRegex.get(), pcre2GeneralContext16); - uint32_t replaceOptions = PCRE2_SUBSTITUTE_GLOBAL|PCRE2_SUBSTITUTE_EXTENDED; + // Execute an explicit match first to see if we match at all; if we do, matchData will be populated + // with data that can be used by pcre2_substitute to bypass some work + int numMatches = pcre2_match_16(compiledRegex.get(), (PCRE2_SPTR16)str, slen, 0, 0, matchData, pcre2MatchContext16); - // Call it once to get the size of the output, then allocate memory for it; - // Note that pcreLen will include space for a terminating null character; - // we have to allocate memory for that byte to avoid a buffer overrun, - // but we won't count that terminating byte - int replaceResult = pcre2_substitute_16(compiledRegex.get(), (PCRE2_SPTR16)str, slen, 0, replaceOptions|PCRE2_SUBSTITUTE_OVERFLOW_LENGTH, matchData, pcre2MatchContext16, (PCRE2_SPTR16)replace, rlen, nullptr, &pcreLen); - - if (replaceResult < 0 && replaceResult != PCRE2_ERROR_NOMEMORY) + if (numMatches < 0 && numMatches != PCRE2_ERROR_NOMATCH) { - // PCRE2_ERROR_NOMEMORY is a normal result when we're just asking for the size of the output + // Treat everything other than PCRE2_ERROR_NOMATCH as an error pcre2_match_data_free_16(matchData); - failWithPCRE2Error(replaceResult, "Error in regex replace: "); + failWithPCRE2Error(numMatches, "Error in regex replace: "); } - if (pcreLen > 0) + if (numMatches > 0) { - out = (UChar *)rtlMalloc(pcreLen * sizeof(UChar)); - - replaceResult = pcre2_substitute_16(compiledRegex.get(), (PCRE2_SPTR16)str, slen, 0, replaceOptions, matchData, pcre2MatchContext16, (PCRE2_SPTR16)replace, rlen, (PCRE2_UCHAR16 *)out, &pcreLen); + uint32_t replaceOptions = PCRE2_SUBSTITUTE_MATCHED|PCRE2_SUBSTITUTE_GLOBAL|PCRE2_SUBSTITUTE_EXTENDED; - // Note that, weirdly, pcreLen will now contain the number of code points - // in the result *excluding* the null terminator, so pcreLen will - // become our final result length + // Call substitute once to get the size of the output, then allocate memory for it; + // Note that pcreLen will include space for a terminating null character; + // we have to allocate memory for that byte to avoid a buffer overrun, + // but we won't count that terminating byte + int replaceResult = pcre2_substitute_16(compiledRegex.get(), (PCRE2_SPTR16)str, slen, 0, replaceOptions|PCRE2_SUBSTITUTE_OVERFLOW_LENGTH, matchData, pcre2MatchContext16, (PCRE2_SPTR16)replace, rlen, nullptr, &pcreLen); - if (replaceResult < 0) + if (replaceResult < 0 && replaceResult != PCRE2_ERROR_NOMEMORY) { + // PCRE2_ERROR_NOMEMORY is a normal result when we're just asking for the size of the output pcre2_match_data_free_16(matchData); failWithPCRE2Error(replaceResult, "Error in regex replace: "); } - } - pcre2_match_data_free_16(matchData); - // We need to return the number of characters here, not the byte count - outlen = pcreLen; + if (pcreLen > 0) + { + out = (UChar *)rtlMalloc(pcreLen * sizeof(UChar)); + + replaceResult = pcre2_substitute_16(compiledRegex.get(), (PCRE2_SPTR16)str, slen, 0, replaceOptions, matchData, pcre2MatchContext16, (PCRE2_SPTR16)replace, rlen, (PCRE2_UCHAR16 *)out, &pcreLen); + + // Note that, weirdly, pcreLen will now contain the number of code points + // in the result *excluding* the null terminator, so pcreLen will + // become our final result length + + if (replaceResult < 0) + { + pcre2_match_data_free_16(matchData); + failWithPCRE2Error(replaceResult, "Error in regex replace: "); + } + } + + pcre2_match_data_free_16(matchData); + // We need to return the number of characters here, not the byte count + outlen = pcreLen; + } + else + { + // No match found; return the original string + out = (UChar *)rtlMalloc(slen * sizeof(UChar)); + memcpy(out, str, slen * sizeof(UChar)); + outlen = slen; + pcre2_match_data_free_16(matchData); + } } IUStrRegExprFindInstance * find(const UChar * str, size32_t from, size32_t len) const From f6408046078b26ce59b8de6a93501d8b9d6c37f3 Mon Sep 17 00:00:00 2001 From: "Dan S. Camper" Date: Mon, 24 Jun 2024 12:55:42 -0500 Subject: [PATCH 101/151] HPCC-32126 Performance improvements in regex code (via Coverity) --- rtl/eclrtl/eclregex.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/rtl/eclrtl/eclregex.cpp b/rtl/eclrtl/eclregex.cpp index 275aa19cd4d..8943f88d101 100644 --- a/rtl/eclrtl/eclregex.cpp +++ b/rtl/eclrtl/eclregex.cpp @@ -153,11 +153,11 @@ class RegexCacheEntry RegexCacheEntry() = delete; RegexCacheEntry(size32_t _patternSize, const char * _pattern, uint32_t _options, std::shared_ptr _compiledRegex8) - : savedOptions(_options), savedPattern(_pattern, _patternSize), compiledRegex8(_compiledRegex8) + : savedOptions(_options), savedPattern(_pattern, _patternSize), compiledRegex8(std::move(_compiledRegex8)) {} RegexCacheEntry(size32_t _patternSize, const char * _pattern, uint32_t _options, std::shared_ptr _compiledRegex16) - : savedOptions(_options), savedPattern(_pattern, _patternSize), compiledRegex16(_compiledRegex16) + : savedOptions(_options), savedPattern(_pattern, _patternSize), compiledRegex16(std::move(_compiledRegex16)) {} RegexCacheEntry(const RegexCacheEntry & other) = delete; @@ -254,7 +254,7 @@ class CStrRegExprFindInstance : implements IStrRegExprFindInstance public: CStrRegExprFindInstance(std::shared_ptr _compiledRegex, const char * _subject, size32_t _from, size32_t _len, bool _keep) - : compiledRegex(_compiledRegex) + : compiledRegex(std::move(_compiledRegex)) { // See if UTF-8 is enabled on this compiled regex uint32_t option_bits; @@ -663,7 +663,7 @@ class CUStrRegExprFindInstance : implements IUStrRegExprFindInstance public: CUStrRegExprFindInstance(std::shared_ptr _compiledRegex, const UChar * _subject, size32_t _from, size32_t _len) - : compiledRegex(_compiledRegex) + : compiledRegex(std::move(_compiledRegex)) { subject = _subject + _from; matched = false; From 29ecc71185f45934e381ec9adbe21233ce7cc69b Mon Sep 17 00:00:00 2001 From: Michael Gardner Date: Mon, 24 Jun 2024 15:10:50 -0400 Subject: [PATCH 102/151] HPCC-32110-1 Fixing issue with sec3, sect2, programlisting Signed-off-by: Michael Gardner --- .../ContainerizedMods/ConfigureValues.xml | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml index 0e926af95bf..4f37c591c36 100644 --- a/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml +++ b/docs/PT_BR/ContainerizedHPCC/ContainerizedMods/ConfigureValues.xml @@ -1034,7 +1034,7 @@ https://github.com/hpcc-systems/HPCC-Platform/blob/master/helm/hpcc/docs/placements.md - + Placement O Placement é responsável por encontrar o melhor nó para um @@ -1096,21 +1096,9 @@ posicionamentos para garantir que os pods com requisitos específicos sejam colocados nos nós apropriados. -
- - - Environment Values - - Você pode definir variáveis de ambiente em um arquivo YAML. Os - valores do ambiente são definidos na parte global.env - do arquivo HPCC Systems values.yaml fornecido. Esses - valores são especificados como uma lista de pares de valor de nome - conforme ilustrado abaixo. - - global: + global: -bbe9bd8001 (HPCC-32050 -HPCC Portuguese language Update 9.6) env: - name: SMTPserver value: mysmtpserver From 5946df191d96bb10d57f56d4ddd7932274bc13ab Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Tue, 25 Jun 2024 13:15:15 +0100 Subject: [PATCH 103/151] HPCC-32136 Allow the input and output to be replaced in a buffered class Signed-off-by: Gavin Halliday --- system/jlib/jstream.cpp | 16 ++++++++++++++++ system/jlib/jstream.hpp | 6 +++++- testing/unittests/jstreamtests.cpp | 1 + 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/system/jlib/jstream.cpp b/system/jlib/jstream.cpp index fa76a9632f8..64641763a3a 100644 --- a/system/jlib/jstream.cpp +++ b/system/jlib/jstream.cpp @@ -405,6 +405,11 @@ class CBlockedSerialInputStream : public CInterfaceOfreset(_offset, _flen); } + virtual void replaceInput(ISerialInputStream * newInput) override + { + input.set(newInput); + } + protected: inline byte * data(size32_t offset) { return (byte *)buffer.get() + offset; } inline size32_t available() const { return dataLength - bufferOffset; } @@ -792,6 +797,11 @@ class CBlockedSerialOutputStream final : public CInterfaceOfwanted then got is size available in buffer }; -using IBufferedSerialInputStream = ISerialStream; +interface IBufferedSerialInputStream : extends ISerialStream +{ + virtual void replaceInput(ISerialInputStream * newInput) = 0; +}; /* example of reading a nul terminated string using ISerialStream peek and skip { @@ -100,6 +103,7 @@ interface IBufferedSerialOutputStream : extends ISerialOutputStream virtual void commit(size32_t written) = 0 ; // commit the data written to the block returned by reserve virtual void suspend(size32_t wanted) = 0; // Reserve some bytes and prevent data being flushed to the next stage until endNested is called. May nest. virtual void resume(size32_t len, const void * ptr) = 0; // update the data allocated by suspend and allow flushing. + virtual void replaceOutput(ISerialOutputStream * newOutput) = 0; }; interface ICompressor; diff --git a/testing/unittests/jstreamtests.cpp b/testing/unittests/jstreamtests.cpp index 98303578ccc..7e13a495e04 100644 --- a/testing/unittests/jstreamtests.cpp +++ b/testing/unittests/jstreamtests.cpp @@ -292,6 +292,7 @@ class NullOuputStream : public CInterfaceOf virtual void suspend(size32_t wanted) {} virtual void resume(size32_t len, const void * ptr) {} virtual offset_t tell() const override { return 0; } + virtual void replaceOutput(ISerialOutputStream * newOutput) override {} }; class JlibStreamStressTest : public CppUnit::TestFixture From 6835477778a17332da0284334271c3b793feead7 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Tue, 25 Jun 2024 16:17:22 +0100 Subject: [PATCH 104/151] HPCC-32136 Minor modifications to streaming classes Signed-off-by: Gavin Halliday --- system/jlib/jstream.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/system/jlib/jstream.cpp b/system/jlib/jstream.cpp index 64641763a3a..8f31718a8e1 100644 --- a/system/jlib/jstream.cpp +++ b/system/jlib/jstream.cpp @@ -290,7 +290,7 @@ IByteInputStream *createInputStream(int handle) // This means the buffer size is likely to be bigger than the block size - the class is passed // an initial estimate for the potential overlap. -class CBlockedSerialInputStream : public CInterfaceOf +class CBlockedSerialInputStream final : public CInterfaceOf { public: CBlockedSerialInputStream(ISerialInputStream * _input, size32_t _blockReadSize) @@ -318,7 +318,7 @@ class CBlockedSerialInputStream : public CInterfaceOf Date: Mon, 24 Jun 2024 23:09:46 -0400 Subject: [PATCH 105/151] HPCC-32131 Jtrace exporters batch config support - Enables span batch export mode by default for remote exporters - Exposes batch configuration options - Updates sample otel export values files to include batch config - Updates helm schema to expose batch confi Signed-off-by: Rodrigo Pastrana --- .../tracing/otlp-grpc-collector-default.yaml | 5 +++ .../tracing/otlp-grpc-collector-k8s.yaml | 5 +++ .../tracing/otlp-http-collector-default.yaml | 7 +++- .../tracing/otlp-http-collector-k8s.yaml | 5 +++ helm/hpcc/values.schema.json | 12 +++++++ system/jlib/jtrace.cpp | 35 +++++++++++++------ 6 files changed, 57 insertions(+), 12 deletions(-) diff --git a/helm/examples/tracing/otlp-grpc-collector-default.yaml b/helm/examples/tracing/otlp-grpc-collector-default.yaml index 90ca78a56b0..e038dedeb4f 100644 --- a/helm/examples/tracing/otlp-grpc-collector-default.yaml +++ b/helm/examples/tracing/otlp-grpc-collector-default.yaml @@ -4,3 +4,8 @@ global: - type: OTLP-GRPC endpoint: "localhost:4317" useSslCredentials: false + batch: + enabled: true + maxQueueSize: 4096 + scheduledDelayMillis: 6000 + maxExportBatchSize: 512 \ No newline at end of file diff --git a/helm/examples/tracing/otlp-grpc-collector-k8s.yaml b/helm/examples/tracing/otlp-grpc-collector-k8s.yaml index a5aa01b2dd6..2730b415a1c 100644 --- a/helm/examples/tracing/otlp-grpc-collector-k8s.yaml +++ b/helm/examples/tracing/otlp-grpc-collector-k8s.yaml @@ -4,3 +4,8 @@ global: - type: OTLP-GRPC endpoint: "http://myotelcollector-opentelemetry-collector.default.svc.cluster.local:4317" useSslCredentials: false + batch: + enabled: true + maxQueueSize: 4096 + scheduledDelayMillis: 6000 + maxExportBatchSize: 512 diff --git a/helm/examples/tracing/otlp-http-collector-default.yaml b/helm/examples/tracing/otlp-http-collector-default.yaml index c48979473d6..361d1afe126 100644 --- a/helm/examples/tracing/otlp-http-collector-default.yaml +++ b/helm/examples/tracing/otlp-http-collector-default.yaml @@ -3,4 +3,9 @@ global: exporters: - type: OTLP-HTTP endpoint: "localhost:4318/v1/traces" - consoleDebug: true \ No newline at end of file + consoleDebug: true + batch: + enabled: true + maxQueueSize: 4096 + scheduledDelayMillis: 6000 + maxExportBatchSize: 512 \ No newline at end of file diff --git a/helm/examples/tracing/otlp-http-collector-k8s.yaml b/helm/examples/tracing/otlp-http-collector-k8s.yaml index d4f77ba86a5..74eb0e40e0d 100644 --- a/helm/examples/tracing/otlp-http-collector-k8s.yaml +++ b/helm/examples/tracing/otlp-http-collector-k8s.yaml @@ -4,3 +4,8 @@ global: - type: OTLP-HTTP endpoint: "http://myotelcollector-opentelemetry-collector.default.svc.cluster.local:4318/v1/traces" consoleDebug: true + batch: + enabled: true + maxQueueSize: 4096 + scheduledDelayMillis: 6000 + maxExportBatchSize: 512 diff --git a/helm/hpcc/values.schema.json b/helm/hpcc/values.schema.json index e028ee27f92..49cfc60bfca 100644 --- a/helm/hpcc/values.schema.json +++ b/helm/hpcc/values.schema.json @@ -1165,6 +1165,18 @@ "enabled": { "type": "boolean", "description": "If true, trace data is processed in a batch, if false, trace data is processed immediately" + }, + "maxQueueSize": { + "type": "number", + "description": "The maximum buffer/queue size. After the size is reached, spans are dropped." + }, + "scheduledDelayMillis": { + "type": "number", + "description": "The time interval between two consecutive exports." + }, + "maxExportBatchSize": { + "type": "number", + "description": " The maximum batch size of every export. It must be smaller or equal to max_queue_size." } }, "additionalProperties": { "type": ["integer", "string", "boolean"] } diff --git a/system/jlib/jtrace.cpp b/system/jlib/jtrace.cpp index d166ad06eaa..ce6f7f68fd5 100644 --- a/system/jlib/jtrace.cpp +++ b/system/jlib/jtrace.cpp @@ -494,7 +494,7 @@ class CTraceManager : implements ITraceManager, public CInterface void initTracerProviderAndGlobalInternals(const IPropertyTree * traceConfig); void initTracer(const IPropertyTree * traceConfig); void cleanupTracer(); - std::unique_ptr createExporter(const IPropertyTree * exportConfig); + std::unique_ptr createExporter(const IPropertyTree * exportConfig, bool & shouldBatch); std::unique_ptr createProcessor(const IPropertyTree * exportConfig); public: @@ -1159,10 +1159,11 @@ IProperties * getSpanContext(const ISpan * span) //--------------------------------------------------------------------------------------------------------------------- -std::unique_ptr CTraceManager::createExporter(const IPropertyTree * exportConfig) +std::unique_ptr CTraceManager::createExporter(const IPropertyTree * exportConfig, bool & shouldBatch) { assertex(exportConfig); + shouldBatch = true; StringBuffer exportType; exportConfig->getProp("@type", exportType); @@ -1172,6 +1173,7 @@ std::unique_ptr CTraceManager::createEx if (stricmp(exportType.str(), "OS")==0) //To stdout/err { LOG(MCoperatorInfo, "Tracing exporter set OS"); + shouldBatch = false; return opentelemetry::exporter::trace::OStreamSpanExporterFactory::Create(); } else if (stricmp(exportType.str(), "OTLP")==0 || stricmp(exportType.str(), "OTLP-HTTP")==0) @@ -1255,6 +1257,7 @@ std::unique_ptr CTraceManager::createEx if (logFlags == SpanLogFlags::LogNone) logFlags = DEFAULT_SPAN_LOG_FLAGS; + shouldBatch = false; LOG(MCoperatorInfo, "Tracing exporter set to JLog: logFlags( LogAttributes LogParentInfo %s)", logFlagsStr.str()); return JLogSpanExporterFactory::Create(logFlags); } @@ -1268,10 +1271,11 @@ std::unique_ptr CTraceManager::createEx std::unique_ptr CTraceManager::createProcessor(const IPropertyTree * exportConfig) { + bool batchDefault; //to be determined by the createExporter function std::unique_ptr exporter; try { - exporter = createExporter(exportConfig); + exporter = createExporter(exportConfig, batchDefault); } catch(const std::exception& e) //polymorphic type std::exception { @@ -1285,16 +1289,25 @@ std::unique_ptr CTraceManager::createP if (!exporter) return nullptr; - if (exportConfig->getPropBool("batch/@enabled", false)) + if (exportConfig->getPropBool("batch/@enabled", batchDefault)) { //Groups several spans together, before sending them to an exporter. - //MORE: These options should be configurable from batch/@option - opentelemetry::v1::sdk::trace::BatchSpanProcessorOptions options; //size_t max_queue_size = 2048; - //The time interval between two consecutive exports - //std::chrono::milliseconds(5000); - //The maximum batch size of every export. It must be smaller or - //equal to max_queue_size. - //size_t max_export_batch_size = 512 + opentelemetry::v1::sdk::trace::BatchSpanProcessorOptions options; + /** + * The maximum buffer/queue size. After the size is reached, spans are + * dropped. + */ + options.max_queue_size = exportConfig->getPropInt("batch/@maxQueueSize", 2048); + + /* The time interval between two consecutive exports. */ + options.schedule_delay_millis = std::chrono::milliseconds(exportConfig->getPropInt("batch/@scheduledDelayMillis", 5000)); + + /** + * The maximum batch size of every export. It must be smaller or + * equal to max_queue_size. + */ + options.max_export_batch_size = exportConfig->getPropInt("batch/@maxExportBatchSize", 512); + return opentelemetry::sdk::trace::BatchSpanProcessorFactory::Create(std::move(exporter), options); } From c309fd15a1271c5886b220af6c66d0885cc9092e Mon Sep 17 00:00:00 2001 From: "Dan S. Camper" Date: Tue, 25 Jun 2024 12:33:37 -0500 Subject: [PATCH 106/151] HPCC-32140 eclcc should expand embedded archives within an ECL archive file Embedded archives will be unpacked into subdirectories based upon their original package values (typically git branch names) or, if package values are not found, an ascending numeric archive_NNNNNN name. --- ecl/hql/hqlcache.cpp | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/ecl/hql/hqlcache.cpp b/ecl/hql/hqlcache.cpp index 1e9cd46ebd0..1389f456b8c 100644 --- a/ecl/hql/hqlcache.cpp +++ b/ecl/hql/hqlcache.cpp @@ -507,6 +507,24 @@ extern HQL_API void expandArchive(const char * path, IPropertyTree * archive, bo StringBuffer baseFilename; makeAbsolutePath(path, baseFilename, false); addPathSepChar(baseFilename); + unsigned int embeddedArchiveNum = 0; + + // Look for embedded archives and recursively expand them + Owned embeddedArchives = archive->getElements("Archive"); + ForEach(*embeddedArchives) + { + // Append the package value to the path, if it exists + StringBuffer embeddedFilename(baseFilename); + if (embeddedArchives->query().hasProp("@package")) + { + embeddedFilename.append(embeddedArchives->query().queryProp("@package")); + } + else + { + embeddedFilename.appendf("archive_%0*d", 6, ++embeddedArchiveNum); + } + expandArchive(embeddedFilename, &embeddedArchives->query(), includePlugins); + } Owned modules = archive->getElements("Module"); ForEach(*modules) From f3116659a55e1af6a99d5cbf5208211879f46cd3 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Wed, 26 Jun 2024 15:28:35 +0100 Subject: [PATCH 107/151] HPCC-32148 Add an option to gather metrics by default for service calls in esp Signed-off-by: Gavin Halliday --- cmake_modules/options.cmake | 1 + initfiles/componentfiles/configxml/dali.xsl | 2 +- tools/hidl/CMakeLists.txt | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cmake_modules/options.cmake b/cmake_modules/options.cmake index 1919ae71b07..6a39ffd369e 100644 --- a/cmake_modules/options.cmake +++ b/cmake_modules/options.cmake @@ -70,6 +70,7 @@ option(USE_ADDRESS_SANITIZER "Use address sanitizer to spot leaks" OFF) option(INSTALL_VCPKG_CATALOG "Install vcpkg-catalog.txt" ON) option(PORTALURL "Set url to hpccsystems portal download page") option(PROFILING "Set to true if planning to profile so stacks are informative" OFF) +option(COLLECT_SERVICE_METRICS "Set to true to gather metrics for HIDL services by default" OFF) set(CUSTOM_LABEL "" CACHE STRING "Appends a custom label to the final package name") diff --git a/initfiles/componentfiles/configxml/dali.xsl b/initfiles/componentfiles/configxml/dali.xsl index 5983537297d..a8fa70c2aab 100644 --- a/initfiles/componentfiles/configxml/dali.xsl +++ b/initfiles/componentfiles/configxml/dali.xsl @@ -346,8 +346,8 @@ + - diff --git a/tools/hidl/CMakeLists.txt b/tools/hidl/CMakeLists.txt index ac69965966a..a67cf109401 100644 --- a/tools/hidl/CMakeLists.txt +++ b/tools/hidl/CMakeLists.txt @@ -23,7 +23,7 @@ project( hidl ) -if(CMAKE_BUILD_TYPE STREQUAL "Debug") +if(CMAKE_BUILD_TYPE STREQUAL "Debug" OR COLLECT_SERVICE_METRICS) add_definitions(-DENABLE_DEFAULT_EXECUTION_PROFILING) endif() From bcdbc49316455ed8c3df96b8c79167c45c74e10b Mon Sep 17 00:00:00 2001 From: Richard Chapman Date: Wed, 19 Jun 2024 14:19:12 +0100 Subject: [PATCH 108/151] HPCC-32031 Generate summary information in workunit to speed up file list operations Track whether references are used in conjunction with OPT and signed code. Signed-off-by: Richard Chapman --- common/pkgfiles/referencedfilelist.cpp | 87 ++++++++++++++++++-------- common/workunit/workunit.cpp | 69 +++++++++++++++++++- common/workunit/workunit.hpp | 39 ++++++++++++ common/workunit/workunit.ipp | 3 + ecl/hqlcpp/hqlckey.cpp | 14 +++-- ecl/hqlcpp/hqlcpp.ipp | 26 +++++++- ecl/hqlcpp/hqlhtcpp.cpp | 45 +++++++++---- ecl/hqlcpp/hqlsource.cpp | 26 +++++++- 8 files changed, 259 insertions(+), 50 deletions(-) diff --git a/common/pkgfiles/referencedfilelist.cpp b/common/pkgfiles/referencedfilelist.cpp index 837a1ee1e4f..9a246966413 100644 --- a/common/pkgfiles/referencedfilelist.cpp +++ b/common/pkgfiles/referencedfilelist.cpp @@ -965,34 +965,15 @@ void ReferencedFileList::addFilesFromPackageMap(IPropertyTree *pm) bool ReferencedFileList::addFilesFromQuery(IConstWorkUnit *cw, const IHpccPackage *pkg) { - Owned graphs = &cw->getGraphs(GraphTypeActivities); - ForEach(*graphs) + SummaryMap files; + if (cw->getSummary(SummaryType::ReadFile, files) && + cw->getSummary(SummaryType::ReadIndex, files)) { - Owned xgmml = graphs->query().getXGMMLTree(false, false); - Owned iter = xgmml->getElements("//node[att/@name='_*ileName']"); - ForEach(*iter) + for (const auto& [lName, summaryFlags] : files) { - IPropertyTree &node = iter->query(); - bool isOpt = false; - const char *logicalName = node.queryProp("att[@name='_fileName']/@value"); - if (!logicalName) - logicalName = node.queryProp("att[@name='_indexFileName']/@value"); - if (!logicalName) - continue; - - isOpt = node.getPropBool("att[@name='_isIndexOpt']/@value"); - if (!isOpt) - isOpt = node.getPropBool("att[@name='_isOpt']/@value"); - - ThorActivityKind kind = (ThorActivityKind) node.getPropInt("att[@name='_kind']/@value", TAKnone); - //not likely to be part of roxie queries, but for forward compatibility: - if(kind==TAKdiskwrite || kind==TAKspillwrite || kind==TAKindexwrite || kind==TAKcsvwrite || kind==TAKxmlwrite || kind==TAKjsonwrite) - continue; - if (node.getPropBool("att[@name='_isSpill']/@value") || - node.getPropBool("att[@name='_isTransformSpill']/@value")) - continue; + const char *logicalName = lName.c_str(); StringArray subfileNames; - unsigned flags = isOpt ? RefFileOptional : RefFileNotOptional; + unsigned flags = (summaryFlags & SummaryFlags::IsOpt) ? RefFileOptional : RefFileNotOptional; if (pkg) { const char *pkgid = pkg->locateSuperFile(logicalName); @@ -1018,6 +999,62 @@ bool ReferencedFileList::addFilesFromQuery(IConstWorkUnit *cw, const IHpccPackag ensureFile(logicalName, flags, NULL, false, &subfileNames); } } + else + { + Owned graphs = &cw->getGraphs(GraphTypeActivities); + ForEach(*graphs) + { + Owned xgmml = graphs->query().getXGMMLTree(false, false); + Owned iter = xgmml->getElements("//node[att/@name='_*ileName']"); + ForEach(*iter) + { + IPropertyTree &node = iter->query(); + bool isOpt = false; + const char *logicalName = node.queryProp("att[@name='_fileName']/@value"); + if (!logicalName) + logicalName = node.queryProp("att[@name='_indexFileName']/@value"); + if (!logicalName) + continue; + + isOpt = node.getPropBool("att[@name='_isIndexOpt']/@value"); + if (!isOpt) + isOpt = node.getPropBool("att[@name='_isOpt']/@value"); + + ThorActivityKind kind = (ThorActivityKind) node.getPropInt("att[@name='_kind']/@value", TAKnone); + //not likely to be part of roxie queries, but for forward compatibility: + if(kind==TAKdiskwrite || kind==TAKspillwrite || kind==TAKindexwrite || kind==TAKcsvwrite || kind==TAKxmlwrite || kind==TAKjsonwrite) + continue; + if (node.getPropBool("att[@name='_isSpill']/@value") || + node.getPropBool("att[@name='_isTransformSpill']/@value")) + continue; + StringArray subfileNames; + unsigned flags = isOpt ? RefFileOptional : RefFileNotOptional; + if (pkg) + { + const char *pkgid = pkg->locateSuperFile(logicalName); + if (pkgid) + { + flags |= (RefFileSuper | RefFileInPackage); + Owned ssfe = pkg->resolveSuperFile(logicalName); + if (ssfe && ssfe->numSubFiles()>0) + { + unsigned count = ssfe->numSubFiles(); + while (count--) + { + StringBuffer subfile; + ssfe->getSubFileName(count, subfile); + ensureFile(subfile, RefSubFile | RefFileInPackage, pkgid, false, nullptr); + subfileNames.append(subfile); + } + } + } + ensureFile(logicalName, flags, pkgid, pkg->isCompulsory(), &subfileNames); + } + else + ensureFile(logicalName, flags, NULL, false, &subfileNames); + } + } + } return pkg ? pkg->isCompulsory() : false; } diff --git a/common/workunit/workunit.cpp b/common/workunit/workunit.cpp index 87d6842ba7c..1337d27a276 100644 --- a/common/workunit/workunit.cpp +++ b/common/workunit/workunit.cpp @@ -4437,6 +4437,8 @@ class CLockedWorkUnit : implements ILocalWorkUnit, implements IExtendedWUInterfa { return c->getFileAccessCost(); } virtual cost_type getCompileCost() const { return c->getCompileCost(); } + virtual bool getSummary(SummaryType type, SummaryMap &map) const override + { return c->getSummary(type, map); } virtual void import(IPropertyTree *wuTree, IPropertyTree *graphProgressTree) { return c->import(wuTree, graphProgressTree); } @@ -4503,6 +4505,8 @@ class CLockedWorkUnit : implements ILocalWorkUnit, implements IExtendedWUInterfa { c->setUser(value); } virtual void setWuScope(const char * value) { c->setWuScope(value); } + virtual void setSummary(SummaryType type, const SummaryMap &map) override + { c->setSummary(type, map); } virtual IWorkflowItem* addWorkflowItem(unsigned wfid, WFType type, WFMode mode, unsigned success, unsigned failure, unsigned recovery, unsigned retriesAllowed, unsigned contingencyFor) { return c->addWorkflowItem(wfid, type, mode, success, failure, recovery, retriesAllowed, contingencyFor); } virtual void syncRuntimeWorkflow(IWorkflowItemArray * array) @@ -8721,6 +8725,65 @@ void CLocalWorkUnit::setDebugValue(const char *propname, const char *value, bool } } +static const char *summaryTypeName(SummaryType type) +{ + switch (type) + { + case SummaryType::ReadFile: return "ReadFile"; + case SummaryType::ReadIndex: return "ReadIndex"; + case SummaryType::WriteFile: return "WriteFile"; + case SummaryType::WriteIndex: return "WriteIndex"; + case SummaryType::PersistFile: return "PersistFile"; + case SummaryType::SpillFile: return "SpillFile"; + case SummaryType::JobTemp: return "JobTemp"; + case SummaryType::Service: return "Service"; + default: + throwUnexpected(); + } +}; + +bool CLocalWorkUnit::getSummary(SummaryType type, SummaryMap &map) const +{ + VStringBuffer xpath("Summaries/%s", summaryTypeName(type)); + CriticalBlock block(crit); + const char *list = p->queryProp(xpath); + if (!list) + return false; + StringArray s; + s.appendList(list, "\n"); + ForEachItemIn(idx, s) + { + const char *name = s.item(idx); + if (name && *name) + { + char *end = nullptr; + SummaryFlags flags = (SummaryFlags) strtol(name, &end, 16); + if (*end!=':') + return false; // unrecognized format + name = end+1; + if (map.find(name) == map.end()) + map[name] = flags; + else + map[name] = map[name] & flags; + } + } + return true; +} + +void CLocalWorkUnit::setSummary(SummaryType type, const SummaryMap &map) +{ + StringBuffer list; + for (const auto& [name, flags] : map) + { + if (list.length()) + list.append('\n'); + list.appendf("%01x:%s", (unsigned) flags, name.c_str()); + } + CriticalBlock block(crit); + IPropertyTree *summaries = ensurePTree(p, "Summaries"); + summaries->setProp(summaryTypeName(type), list); +} + void CLocalWorkUnit::setDebugValueInt(const char *propname, int value, bool overwrite) { StringBuffer lower; @@ -13980,6 +14043,11 @@ extern WORKUNIT_API void descheduleWorkunit(char const * wuid) doDescheduleWorkkunit(wuid); } +extern WORKUNIT_API void addWorkunitSummary(IWorkUnit * wu, SummaryType summaryType, SummaryMap &map) +{ + wu->setSummary(summaryType, map); +} + extern WORKUNIT_API void updateWorkunitStat(IWorkUnit * wu, StatisticScopeType scopeType, const char * scope, StatisticKind kind, const char * description, unsigned __int64 value, unsigned wfid) { StringBuffer scopestr; @@ -14008,7 +14076,6 @@ class WuTimingUpdater : implements ITimeReportInfo StatisticKind kind; }; - extern WORKUNIT_API void updateWorkunitTimings(IWorkUnit * wu, ITimeReporter *timer) { WuTimingUpdater target(wu, SSTsection, StTimeTotalExecute); diff --git a/common/workunit/workunit.hpp b/common/workunit/workunit.hpp index d96471b5077..4cd299961e6 100644 --- a/common/workunit/workunit.hpp +++ b/common/workunit/workunit.hpp @@ -41,6 +41,7 @@ #include #include #include +#include #include #define LEGACY_GLOBAL_SCOPE "workunit" @@ -1179,6 +1180,40 @@ interface IConstWUScopeIterator : extends IScmIterator //--------------------------------------------------------------------------------------------------------------------- //! IWorkUnit //! Provides high level access to WorkUnit "header" data. + +// Be sure to update summaryTypeName in workunit.cpp if adding anything here +enum class SummaryType +{ + First, + ReadFile = First, + ReadIndex, + WriteFile, + WriteIndex, + PersistFile, + SpillFile, + JobTemp, + Service, + // Keep these at the end + NumItems, + None = NumItems +}; + +enum SummaryFlags : byte +{ + None = 0, + IsOpt = 0x01, + IsSigned = 0x02, +}; +BITMASK_ENUM(SummaryFlags); + +struct ncasecomp { + bool operator() (const std::string& lhs, const std::string& rhs) const { + return stricmp(lhs.c_str(), rhs.c_str()) < 0; + } +}; + +typedef std::map SummaryMap; + interface IWorkUnit; interface IUserDescriptor; @@ -1267,6 +1302,7 @@ interface IConstWorkUnit : extends IConstWorkUnitInfo virtual unsigned queryFileUsage(const char * filename) const = 0; virtual IConstWUFileUsageIterator * getFieldUsage() const = 0; virtual bool getFieldUsageArray(StringArray & filenames, StringArray & columnnames, const char * clusterName) const = 0; + virtual bool getSummary(SummaryType type, SummaryMap &result) const = 0; virtual unsigned getCodeVersion() const = 0; virtual unsigned getWuidVersion() const = 0; @@ -1400,6 +1436,7 @@ interface IWorkUnit : extends IConstWorkUnit virtual void setResultDecimal(const char *name, unsigned sequence, int len, int precision, bool isSigned, const void *val) = 0; virtual void setResultDataset(const char * name, unsigned sequence, size32_t len, const void *val, unsigned numRows, bool extend) = 0; virtual void import(IPropertyTree *wuTree, IPropertyTree *graphProgressTree = nullptr) = 0; + virtual void setSummary(SummaryType type, const SummaryMap &map) = 0; virtual IConstWorkUnit * unlock() = 0; }; @@ -1722,6 +1759,8 @@ extern WORKUNIT_API void gatherLibraryNames(StringArray &names, StringArray &unr //If we add any more parameters we should consider returning an object that can be updated extern WORKUNIT_API void associateLocalFile(IWUQuery * query, WUFileType type, const char * name, const char * description, unsigned crc, unsigned minActivity=0, unsigned maxActivity=0); +extern WORKUNIT_API void addWorkunitSummary(IWorkUnit * wu, SummaryType summaryType, SummaryMap &map); + interface ITimeReporter; extern WORKUNIT_API void updateWorkunitStat(IWorkUnit * wu, StatisticScopeType scopeType, const char * scope, StatisticKind kind, const char * description, unsigned __int64 value, unsigned wfid=0); extern WORKUNIT_API void updateWorkunitTimings(IWorkUnit * wu, ITimeReporter *timer); diff --git a/common/workunit/workunit.ipp b/common/workunit/workunit.ipp index f83956af090..8ac4acbc0aa 100644 --- a/common/workunit/workunit.ipp +++ b/common/workunit/workunit.ipp @@ -379,6 +379,9 @@ public: void setTimeScheduled(const IJlibDateTime &val); virtual void subscribe(WUSubscribeOptions options) {}; + virtual bool getSummary(SummaryType type, SummaryMap &map) const override; + virtual void setSummary(SummaryType type, const SummaryMap &map) override; + // ILocalWorkUnit - used for debugging etc void loadXML(const char *xml); void serialize(MemoryBuffer &tgt); diff --git a/ecl/hqlcpp/hqlckey.cpp b/ecl/hqlcpp/hqlckey.cpp index f04c2cad29b..47099f3d328 100644 --- a/ecl/hqlcpp/hqlckey.cpp +++ b/ecl/hqlcpp/hqlckey.cpp @@ -162,6 +162,8 @@ class KeyedJoinInfo : public CInterface bool needToExtractJoinFields() const { return extractJoinFieldsTransform != NULL; } bool hasPostFilter() const { return monitors->queryExtraFilter() || fileFilter; } bool requireActivityForKey() const { return hasComplexIndex; } + bool isKeySigned() { return key->hasAttribute(_signed_Atom); } + bool isFileSigned() { return file && file->hasAttribute(_signed_Atom); } void reportFailureReason(IHqlExpression * cond) { monitors->reportFailureReason(cond); } bool useValueSets() const { return createValueSets; } @@ -1192,7 +1194,7 @@ void HqlCppTranslator::buildKeyedJoinExtra(ActivityInstance & instance, IHqlExpr //virtual const char * getFileName() = 0; // Returns filename of raw file fpos'es refer into if (info->isFullJoin()) - buildFilenameFunction(instance, instance.createctx, WaFilename, "getFileName", info->queryFileFilename(), hasDynamicFilename(info->queryFile())); + buildFilenameFunction(instance, instance.createctx, WaFilename, "getFileName", info->queryFileFilename(), hasDynamicFilename(info->queryFile()), SummaryType::ReadFile, info->isKeyOpt(), info->isFileSigned()); //virtual bool diskAccessRequired() = 0; if (info->isFullJoin()) @@ -1229,7 +1231,7 @@ void HqlCppTranslator::buildKeyJoinIndexReadHelper(ActivityInstance & instance, info->buildExtractIndexReadFields(instance.startctx); //virtual const char * getIndexFileName() = 0; - buildFilenameFunction(instance, instance.startctx, WaIndexname, "getIndexFileName", info->queryKeyFilename(), hasDynamicFilename(info->queryKey())); + buildFilenameFunction(instance, instance.startctx, WaIndexname, "getIndexFileName", info->queryKeyFilename(), hasDynamicFilename(info->queryKey()), SummaryType::ReadIndex, info->isKeyOpt(), info->isKeySigned()); //virtual IOutputMetaData * queryIndexRecordSize() = 0; LinkedHqlExpr indexExpr = info->queryOriginalKey(); @@ -1489,7 +1491,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityKeyedDistribute(BuildCtx & ctx doBuildUnsignedFunction(instance->classctx, "getFlags", flags.str()+1); //virtual const char * getIndexFileName() = 0; - buildFilenameFunction(*instance, instance->startctx, WaIndexname, "getIndexFileName", keyFilename, dynamic); + buildFilenameFunction(*instance, instance->startctx, WaIndexname, "getIndexFileName", keyFilename, dynamic, SummaryType::ReadIndex, info.isKeyOpt(), info.isKeySigned()); //virtual IOutputMetaData * queryIndexRecordSize() = 0; LinkedHqlExpr indexExpr = info.queryRawKey(); @@ -1583,7 +1585,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityKeyDiff(BuildCtx & ctx, IHqlEx noteAllFieldsUsed(updated); //virtual const char * getOutputName() = 0; - buildFilenameFunction(*instance, instance->startctx, WaOutputFilename, "getOutputName", output, hasDynamicFilename(expr)); + buildFilenameFunction(*instance, instance->startctx, WaOutputFilename, "getOutputName", output, hasDynamicFilename(expr), SummaryType::WriteFile, false, expr->hasAttribute(_signed_Atom)); //virtual int getSequence() = 0; doBuildSequenceFunc(instance->classctx, querySequence(expr), false); @@ -1626,10 +1628,10 @@ ABoundActivity * HqlCppTranslator::doBuildActivityKeyPatch(BuildCtx & ctx, IHqlE noteAllFieldsUsed(original); //virtual const char * getPatchName() = 0; - buildFilenameFunction(*instance, instance->startctx, WaPatchFilename, "getPatchName", patch, true); + buildFilenameFunction(*instance, instance->startctx, WaPatchFilename, "getPatchName", patch, true, SummaryType::ReadFile, false, false); //virtual const char * getOutputName() = 0; - buildFilenameFunction(*instance, instance->startctx, WaOutputFilename, "getOutputName", output, hasDynamicFilename(expr)); + buildFilenameFunction(*instance, instance->startctx, WaOutputFilename, "getOutputName", output, hasDynamicFilename(expr), SummaryType::WriteIndex, false, false); //virtual int getSequence() = 0; doBuildSequenceFunc(instance->classctx, querySequence(expr), false); diff --git a/ecl/hqlcpp/hqlcpp.ipp b/ecl/hqlcpp/hqlcpp.ipp index 16ef89f64e3..0607c583892 100644 --- a/ecl/hqlcpp/hqlcpp.ipp +++ b/ecl/hqlcpp/hqlcpp.ipp @@ -1886,8 +1886,8 @@ public: void doBuildFunctionReturn(BuildCtx & ctx, ITypeInfo * type, IHqlExpression * value); void doBuildUserFunctionReturn(BuildCtx & ctx, ITypeInfo * type, IHqlExpression * value); - void addFilenameConstructorParameter(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr); - void buildFilenameFunction(ActivityInstance & instance, BuildCtx & classctx, WuAttr attr, const char * name, IHqlExpression * expr, bool isDynamic); + void addFilenameConstructorParameter(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr, SummaryType summaryType); + void buildFilenameFunction(ActivityInstance & instance, BuildCtx & classctx, WuAttr attr, const char * name, IHqlExpression * expr, bool isDynamic, SummaryType summaryType, bool isOpt, bool isSigned); void buildRefFilenameFunction(ActivityInstance & instance, BuildCtx & classctx, WuAttr attr, const char * name, IHqlExpression * dataset); void createAccessFunctions(StringBuffer & helperFunc, BuildCtx & declarectx, unsigned prio, const char * interfaceName, const char * object); @@ -1911,7 +1911,7 @@ protected: void buildIteratorNext(BuildCtx & ctx, IHqlExpression * iter, IHqlExpression * row); bool shouldEvaluateSelectAsAlias(BuildCtx & ctx, IHqlExpression * expr); IWUResult * createWorkunitResult(int sequence, IHqlExpression * nameExpr); - void noteFilename(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr, bool isDynamic); + void noteFilename(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr, bool isDynamic, SummaryType summaryType, bool isOpt, bool isSigned); bool checkGetResultContext(BuildCtx & ctx, IHqlExpression * expr, CHqlBoundExpr & tgt); void buildGetResultInfo(BuildCtx & ctx, IHqlExpression * expr, CHqlBoundExpr * boundTarget, const CHqlBoundTarget * targetAssign); void buildGetResultSetInfo(BuildCtx & ctx, IHqlExpression * expr, CHqlBoundExpr * boundTarget, const CHqlBoundTarget * targetAssign); @@ -2038,6 +2038,7 @@ protected: bool isNeverDistributed(IHqlExpression * expr); void ensureWorkUnitUpdated(); + void addWorkunitSummaries(); bool getDebugFlag(const char * name, bool defValue); void initOptions(); void postProcessOptions(); @@ -2140,6 +2141,25 @@ protected: Owned timeReporter; CIArrayOf trackedSources; HqlExprArray tracedActivities; + + // These are used to generate workunit summary info, to avoid having to walk the xgmml to get it + SummaryMap summaries[(int) SummaryType::NumItems]; + void noteSummaryInfo(const char *name, SummaryType type, bool isOpt, bool isSigned) + { + if (type != SummaryType::None) + { + SummaryMap &map = summaries[(int) type]; + SummaryFlags flags = SummaryFlags::None; + if (isOpt) + flags |= SummaryFlags::IsOpt; + if (isSigned) + flags |= SummaryFlags::IsSigned; + if (map.find(name) == map.end()) + map[name] = flags; + else + map[name] = map[name] & flags; + } + } }; diff --git a/ecl/hqlcpp/hqlhtcpp.cpp b/ecl/hqlcpp/hqlhtcpp.cpp index 1c0a55836be..ea303805449 100644 --- a/ecl/hqlcpp/hqlhtcpp.cpp +++ b/ecl/hqlcpp/hqlhtcpp.cpp @@ -3382,21 +3382,21 @@ void HqlCppTranslator::doBuildFunction(BuildCtx & ctx, ITypeInfo * type, const c } } -void HqlCppTranslator::addFilenameConstructorParameter(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr) +void HqlCppTranslator::addFilenameConstructorParameter(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr, SummaryType summaryType) { OwnedHqlExpr folded = foldHqlExpression(expr); instance.addConstructorParameter(folded); - noteFilename(instance, attr, folded, false); + noteFilename(instance, attr, folded, false, summaryType, false, false); } -void HqlCppTranslator::buildFilenameFunction(ActivityInstance & instance, BuildCtx & classctx, WuAttr attr, const char * name, IHqlExpression * expr, bool isDynamic) +void HqlCppTranslator::buildFilenameFunction(ActivityInstance & instance, BuildCtx & classctx, WuAttr attr, const char * name, IHqlExpression * expr, bool isDynamic, SummaryType summaryType, bool isOpt, bool isSigned) { OwnedHqlExpr folded = foldHqlExpression(expr); doBuildVarStringFunction(classctx, name, folded); - noteFilename(instance, attr, folded, isDynamic); + noteFilename(instance, attr, folded, isDynamic, summaryType, isOpt, isSigned); } -void HqlCppTranslator::noteFilename(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr, bool isDynamic) +void HqlCppTranslator::noteFilename(ActivityInstance & instance, WuAttr attr, IHqlExpression * expr, bool isDynamic, SummaryType summaryType, bool isOpt, bool isSigned) { if (options.addFilesnamesToGraph) { @@ -3417,6 +3417,7 @@ void HqlCppTranslator::noteFilename(ActivityInstance & instance, WuAttr attr, IH StringBuffer propValue; folded->queryValue()->getStringValue(propValue); instance.addAttribute(attr, propValue); + noteSummaryInfo(propValue, summaryType, isOpt, isSigned); } } if (isDynamic) @@ -3459,20 +3460,24 @@ void HqlCppTranslator::buildRefFilenameFunction(ActivityInstance & instance, Bui assertex(table); IHqlExpression * filename = NULL; + SummaryType summaryType = SummaryType::ReadFile; switch (table->getOperator()) { case no_keyindex: filename = table->queryChild(2); + summaryType = SummaryType::ReadIndex; break; case no_newkeyindex: filename = table->queryChild(3); + summaryType = SummaryType::ReadIndex; break; case no_table: filename = table->queryChild(0); + summaryType = SummaryType::ReadFile; break; } - buildFilenameFunction(instance, classctx, attr, name, filename, hasDynamicFilename(table)); + buildFilenameFunction(instance, classctx, attr, name, filename, hasDynamicFilename(table), summaryType, table->hasAttribute(optAtom), table->hasAttribute(_signed_Atom)); } void HqlCppTranslator::buildConnectInputOutput(BuildCtx & ctx, ActivityInstance * instance, ABoundActivity * table, unsigned outputIndex, unsigned inputIndex, const char * label, bool nWay) @@ -6236,12 +6241,17 @@ bool HqlCppTranslator::buildCpp(IHqlCppInstance & _code, HqlQueryContext & query ensureWorkUnitUpdated(); throw; } + addWorkunitSummaries(); ensureWorkUnitUpdated(); - - return true; } +void HqlCppTranslator::addWorkunitSummaries() +{ + for (int i = (int) SummaryType::First; i < (int) SummaryType::NumItems; i++) + addWorkunitSummary(wu(), (SummaryType) i, summaries[i]); +} + void HqlCppTranslator::ensureWorkUnitUpdated() { if (timeReporter) @@ -10659,7 +10669,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityOutputIndex(BuildCtx & ctx, IH buildInstancePrefix(instance); //virtual const char * getFileName() { return "x.d00"; } - buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", filename, hasDynamicFilename(expr)); + buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", filename, hasDynamicFilename(expr), SummaryType::WriteIndex, false, expr->hasAttribute(_signed_Atom)); //virtual unsigned getFlags() = 0; IHqlExpression * updateAttr = expr->queryAttribute(updateAtom); @@ -10710,7 +10720,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityOutputIndex(BuildCtx & ctx, IH IHqlExpression * indexNameAttr = expr->queryAttribute(indexAtom); if (indexNameAttr) - buildFilenameFunction(*instance, instance->startctx, WaDistributeIndexname, "getDistributeIndexName", indexNameAttr->queryChild(0), hasDynamicFilename(expr)); + buildFilenameFunction(*instance, instance->startctx, WaDistributeIndexname, "getDistributeIndexName", indexNameAttr->queryChild(0), hasDynamicFilename(expr), SummaryType::ReadIndex, false, expr->hasAttribute(_signed_Atom)); buildExpiryHelper(instance->createctx, expr->queryAttribute(expireAtom)); buildUpdateHelper(instance->createctx, *instance, dataset, updateAttr); @@ -10942,15 +10952,18 @@ ABoundActivity * HqlCppTranslator::doBuildActivityOutput(BuildCtx & ctx, IHqlExp Owned boundDataset = buildCachedActivity(ctx, dataset); ThorActivityKind kind = TAKdiskwrite; const char * activityArgName = "DiskWrite"; + SummaryType summaryType = SummaryType::WriteFile; if (expr->getOperator() == no_spill) { kind = TAKspill; activityArgName = "Spill"; + summaryType = SummaryType::SpillFile; } else if (pipe) { kind = TAKpipewrite; activityArgName = "PipeWrite"; + summaryType = SummaryType::None; } else if (csvAttr) { @@ -10963,7 +10976,14 @@ ABoundActivity * HqlCppTranslator::doBuildActivityOutput(BuildCtx & ctx, IHqlExp activityArgName = "XmlWrite"; } else if (expr->hasAttribute(_spill_Atom)) + { kind = TAKspillwrite; + summaryType = SummaryType::SpillFile; + } + if (expr->hasAttribute(jobTempAtom)) + summaryType = SummaryType::JobTemp; + else if (expr->hasAttribute(_workflowPersist_Atom)) + summaryType = SummaryType::PersistFile; bool useImplementationClass = options.minimizeActivityClasses && targetRoxie() && expr->hasAttribute(_spill_Atom); Owned instance = new ActivityInstance(*this, ctx, kind, expr, activityArgName); @@ -11061,7 +11081,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityOutput(BuildCtx & ctx, IHqlExp if (filename && filename->getOperator() != no_pipe) { bool isDynamic = expr->hasAttribute(resultAtom) || hasDynamicFilename(expr); - buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", filename, isDynamic); + buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", filename, isDynamic, summaryType, false, expr->hasAttribute(_signed_Atom)); if (!filename->isConstant()) constFilename = false; } @@ -11163,7 +11183,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityOutput(BuildCtx & ctx, IHqlExp { assertex(tempCount.get() && !hasDynamic(expr)); instance->addConstructorParameter(tempCount); - addFilenameConstructorParameter(*instance, WaFilename, filename); + addFilenameConstructorParameter(*instance, WaFilename, filename, summaryType); } instance->addSignedAttribute(expr->queryAttribute(_signed_Atom)); @@ -18050,6 +18070,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivitySOAP(BuildCtx & ctx, IHqlExpre StringBuffer serviceName; getUTF8Value(serviceName, service); instance->addAttribute(WaServiceName, serviceName); + noteSummaryInfo(serviceName, SummaryType::Service, false, false); } enum class ReqFormat { NONE, XML, JSON, FORM_ENCODED }; diff --git a/ecl/hqlcpp/hqlsource.cpp b/ecl/hqlcpp/hqlsource.cpp index 3c22a022168..55fca66803a 100644 --- a/ecl/hqlcpp/hqlsource.cpp +++ b/ecl/hqlcpp/hqlsource.cpp @@ -1174,7 +1174,27 @@ void SourceBuilder::rebindFilepositons(BuildCtx & ctx, IHqlExpression * dataset, void SourceBuilder::buildFilenameMember() { //---- virtual const char * getFileName() { return "x.d00"; } ---- - translator.buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", nameExpr, translator.hasDynamicFilename(tableExpr)); + SummaryType summaryType = SummaryType::ReadFile; + switch (activityKind) + { + case TAKindexread: + case TAKindexnormalize: + case TAKindexaggregate: + case TAKindexcount: + case TAKindexgroupaggregate: + summaryType = SummaryType::ReadIndex; + break; + case TAKspillread: + summaryType = SummaryType::SpillFile; + break; + } + if (tableExpr->hasAttribute(_spill_Atom)) + summaryType = SummaryType::SpillFile; + else if (tableExpr->hasAttribute(jobTempAtom)) + summaryType = SummaryType::JobTemp; + else if (tableExpr->hasAttribute(_workflowPersist_Atom)) + summaryType = SummaryType::PersistFile; + translator.buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", nameExpr, translator.hasDynamicFilename(tableExpr), summaryType, tableExpr->hasAttribute(optAtom), tableExpr->hasAttribute(_signed_Atom)); } void SourceBuilder::buildReadMembers(IHqlExpression * expr) @@ -2115,7 +2135,7 @@ ABoundActivity * SourceBuilder::buildActivity(BuildCtx & ctx, IHqlExpression * e else throwError1(HQLERR_ReadSpillBeforeWrite, spillName.str()); } - translator.addFilenameConstructorParameter(*instance, WaFilename, nameExpr); + translator.addFilenameConstructorParameter(*instance, WaFilename, nameExpr, SummaryType::SpillFile); } if (steppedExpr) @@ -4843,7 +4863,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivityXmlRead(BuildCtx & ctx, IHqlEx fieldUsage->noteAll(); //---- virtual const char * getFileName() { return "x.d00"; } ---- - buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", filename, hasDynamicFilename(tableExpr)); + buildFilenameFunction(*instance, instance->startctx, WaFilename, "getFileName", filename, hasDynamicFilename(tableExpr), SummaryType::ReadIndex, tableExpr->hasAttribute(optAtom), tableExpr->hasAttribute(_signed_Atom)); buildEncryptHelper(instance->startctx, tableExpr->queryAttribute(encryptAtom)); bool usesContents = false; From 68198d1da4ed22216615b6952fdcac0866d1ed4b Mon Sep 17 00:00:00 2001 From: Richard Chapman Date: Wed, 26 Jun 2024 11:50:24 +0100 Subject: [PATCH 109/151] HPCC-30252 Optimize WuInfo::IncludeServiceNames Use new summary information from workunit if present. Signed-off-by: Richard Chapman --- .../ws_workunits/ws_workunitsHelpers.cpp | 34 ++++++++++++------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/esp/services/ws_workunits/ws_workunitsHelpers.cpp b/esp/services/ws_workunits/ws_workunitsHelpers.cpp index 212e90d7b4f..62000fcf450 100644 --- a/esp/services/ws_workunits/ws_workunitsHelpers.cpp +++ b/esp/services/ws_workunits/ws_workunitsHelpers.cpp @@ -1203,20 +1203,30 @@ void WsWuInfo::getServiceNames(IEspECLWorkunit &info, unsigned long flags) { if (!(flags & WUINFO_IncludeServiceNames)) return; - StringArray serviceNames; - WuScopeFilter filter; - filter.addScopeType("activity"); - filter.addOutputAttribute(WaServiceName); - filter.addRequiredAttr(WaServiceName); - filter.finishedFilter(); - Owned it = &cw->getScopeIterator(filter); - ForEach(*it) + SummaryMap services; + if (cw->getSummary(SummaryType::Service, services)) + { + for (const auto& [serviceName, flags] : services) + if (!serviceName.empty()) + serviceNames.append(serviceName.c_str()); + } + else { - StringBuffer serviceName; - const char *value = it->queryAttribute(WaServiceName, serviceName); - if (!isEmptyString(value)) - serviceNames.append(value); + // Old method used if new information not present + WuScopeFilter filter; + filter.addScopeType("activity"); + filter.addOutputAttribute(WaServiceName); + filter.addRequiredAttr(WaServiceName); + filter.finishedFilter(); + Owned it = &cw->getScopeIterator(filter); + ForEach(*it) + { + StringBuffer serviceName; + const char *value = it->queryAttribute(WaServiceName, serviceName); + if (!isEmptyString(value)) + serviceNames.append(value); + } } info.setServiceNames(serviceNames); } From a04e422437e02022b5e8c1f894505cd5cfafd302 Mon Sep 17 00:00:00 2001 From: Kunal Aswani Date: Wed, 26 Jun 2024 11:49:23 -0400 Subject: [PATCH 110/151] HPCC-32150 ES Translations 9.8.x Translations added for Spanish (ES). Signed-off-by: Kunal Aswani --- esp/src/src/nls/es/hpcc.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/esp/src/src/nls/es/hpcc.ts b/esp/src/src/nls/es/hpcc.ts index 79938c65ca2..6ce86086867 100644 --- a/esp/src/src/nls/es/hpcc.ts +++ b/esp/src/src/nls/es/hpcc.ts @@ -412,6 +412,8 @@ export = { IgnoreGlobalStoreOutEdges: "Ignorar los bordes de salida de el almacén global", Import: "Importar", Inactive: "Inactivo", + IncludePerComponentLogs: "Incluir registros por componente", + IncludeRelatedLogs: "Incluir registros relacionados", IncludeSlaveLogs: "Incluir slave logs", IncludeSubFileInfo: "¿Incluir información de subarchivo?", Index: "Indice", @@ -583,6 +585,7 @@ export = { Newest: "El Mas Nuevo", NewPassword: "Nueva Contraseña", NextSelection: "Siguiente selección", + NextWorkunit: "Siguiente unidad de trabajo", NoCommon: "No hay común", NoContent: "(No hay contenido)", NoContentPleaseSelectItem: "Sin contenido, por favor seleccione un elemento", @@ -715,6 +718,7 @@ export = { PressCtrlCToCopy: "Oprima ctrl+c para copiar", Preview: "Presentación Preliminar", PreviousSelection: "Selección anterior", + PreviousWorkunit: "Unidad de trabajo anterior", PrimaryLost: "Primario perdido", PrimaryMonitoring: "Monitoreado Principal", Priority: "Prioridad", @@ -836,6 +840,7 @@ export = { Save: "Guardar", Scope: "Ámbito", SearchResults: "Resultados de búsqueda", + Seconds: "Segundos", SecondsRemaining: "Segundos que faltan", Security: "Seguridad", SecurityMessageHTML: "Solo vea HTML de usuarios de confianza. Esta unidad de trabajo fue creada por '{__placeholder__}'. ¿Representar HTML?", @@ -935,6 +940,7 @@ export = { SVGSource: "Origen del SVG", Sync: "Sincronizar", SyncSelection: "Sincronizar", + Syntax: "Sintaxis", SystemServers: "Servidores de sistema", Table: "Tabla", tag: "etiqueta", @@ -962,6 +968,7 @@ export = { TimeMaxTotalExecuteMinutes: "Maximo tiempo total de ejecucion en minutos", TimeMeanTotalExecuteMinutes: "Total tiempo total de ejecucion en minutos", TimeMinTotalExecuteMinutes: "Minomo tiempo total de ejecucion en minutos", + TimePenalty: "Penalización de tiempo", Timers: "Cronómetros", TimeSeconds: "Tiempo (Segundos)", TimeStamp: "Marca de tiempo", @@ -1129,6 +1136,7 @@ export = { WildcardFilter: "Filtro de Comodín", Workflows: "Flujos de Trabajo", Workunit: "Unidad de trabajo", + WorkunitNotFound: "Unidad de trabajo no encontrada", Workunits: "Unidades de trabajo", WorkUnitScopeDefaultPermissions: "Permisos por defect de alcaces de Workunit", Wrap: "Envolver", From 80934a5af1629068628617dbeb25a4b96d3e6753 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Wed, 26 Jun 2024 18:12:22 +0100 Subject: [PATCH 111/151] HPCC-32159 Fix core stack capture Signed-off-by: Jake Smith --- initfiles/bin/check_executes | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/initfiles/bin/check_executes b/initfiles/bin/check_executes index 5214b767026..67d17c95564 100755 --- a/initfiles/bin/check_executes +++ b/initfiles/bin/check_executes @@ -101,11 +101,16 @@ if [ $PMD_ALWAYS = true ] || [ $retVal -ne 0 ]; then done cp `ls -rt /tmp/postmortem.$progPid.log.*` $POST_MORTEM_DIR rm /tmp/postmortem.$progPid.log.* - if [ -f core ]; then - echo "Generating info from core file to $POST_MORTEM_DIR/info.log" | tee -a $POST_MORTEM_DIR/info.log - gdb -batch -ix /opt/HPCCSystems/bin/.gdbinit -x /opt/HPCCSystems/bin/post-mortem-gdb ${PMD_PROGNAME} core 2>$POST_MORTEM_DIR/info.err >>$POST_MORTEM_DIR/info.log - echo "Generated info from core file" | tee -a $POST_MORTEM_DIR/info.log - rm core + + readarray -t core_files < <(find . -maxdepth 1 -type f -name 'core*' -print) + # we only expect one, but cater for multiple + if [[ ${#core_files[@]} -gt 0 ]]; then + for file in "${core_files[@]}"; do + echo "Generating info from core file($file) to $POST_MORTEM_DIR/info.log" | tee -a $POST_MORTEM_DIR/info.log + gdb -batch -ix /opt/HPCCSystems/bin/.gdbinit -x /opt/HPCCSystems/bin/post-mortem-gdb ${PMD_PROGNAME} $file 2>$POST_MORTEM_DIR/info.err >>$POST_MORTEM_DIR/info.log + echo "Generated info from core file($file)" | tee -a $POST_MORTEM_DIR/info.log + rm $file + done fi dmesg -xT > $POST_MORTEM_DIR/dmesg.log if [[ -n "${PMD_DALISERVER}" ]] && [[ -n "${PMD_WORKUNIT}" ]]; then From e3416762ca0b3a6db713e5f273ce9d3e73913403 Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Wed, 26 Jun 2024 16:01:26 -0400 Subject: [PATCH 112/151] HPCC-32147 ECL Watch v9 WU details do not show blank TotalClusterTime If the TotalClusterTime for a WU returned by ESP is "", then display "0.00" instead. Also, fixed an issue where "Potential Savings" percentage was showing "NaN%" when totalCosts was 0. Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- esp/src/src-react/components/WorkunitSummary.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/esp/src/src-react/components/WorkunitSummary.tsx b/esp/src/src-react/components/WorkunitSummary.tsx index e0b0b49da2e..8e74c0b008c 100644 --- a/esp/src/src-react/components/WorkunitSummary.tsx +++ b/esp/src/src-react/components/WorkunitSummary.tsx @@ -216,13 +216,13 @@ export const WorkunitSummary: React.FunctionComponent = ({ "owner": { label: nlsHPCC.Owner, type: "string", value: workunit?.Owner, readonly: true }, "jobname": { label: nlsHPCC.JobName, type: "string", value: jobname }, "description": { label: nlsHPCC.Description, type: "string", value: description }, - "potentialSavings": { label: nlsHPCC.PotentialSavings, type: "string", value: `${formatCost(potentialSavings)} (${Math.round((potentialSavings / totalCosts) * 10000) / 100}%)`, readonly: true }, + "potentialSavings": { label: nlsHPCC.PotentialSavings, type: "string", value: `${formatCost(potentialSavings)} (${totalCosts > 0 ? Math.round((potentialSavings / totalCosts) * 10000) / 100 : 0}%)`, readonly: true }, "compileCost": { label: nlsHPCC.CompileCost, type: "string", value: `${formatCost(workunit?.CompileCost)}`, readonly: true }, "executeCost": { label: nlsHPCC.ExecuteCost, type: "string", value: `${formatCost(workunit?.ExecuteCost)}`, readonly: true }, "fileAccessCost": { label: nlsHPCC.FileAccessCost, type: "string", value: `${formatCost(workunit?.FileAccessCost)}`, readonly: true }, "protected": { label: nlsHPCC.Protected, type: "checkbox", value: _protected }, "cluster": { label: nlsHPCC.Cluster, type: "string", value: workunit?.Cluster, readonly: true }, - "totalClusterTime": { label: nlsHPCC.TotalClusterTime, type: "string", value: workunit?.TotalClusterTime, readonly: true }, + "totalClusterTime": { label: nlsHPCC.TotalClusterTime, type: "string", value: workunit?.TotalClusterTime ? workunit?.TotalClusterTime : "0.00", readonly: true }, "abortedBy": { label: nlsHPCC.AbortedBy, type: "string", value: workunit?.AbortBy, readonly: true }, "abortedTime": { label: nlsHPCC.AbortedTime, type: "string", value: workunit?.AbortTime, readonly: true }, "ServiceNamesCustom": { label: nlsHPCC.Services, type: "string", value: serviceNames, readonly: true, multiline: true }, From 9586b86519ef293b2d55980f833dfa0bb76c7ac2 Mon Sep 17 00:00:00 2001 From: Rodrigo Pastrana Date: Wed, 26 Jun 2024 18:58:57 -0400 Subject: [PATCH 113/151] HPCC-32163 Zap LogFilter Absolute Time Range Fix - Fixes absolute time range param name - Fixes erroneous time range param test Signed-off-by: Rodrigo Pastrana --- esp/services/ws_workunits/ws_workunitsHelpers.hpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/esp/services/ws_workunits/ws_workunitsHelpers.hpp b/esp/services/ws_workunits/ws_workunitsHelpers.hpp index 5305b0dec1a..8ceafe5864d 100644 --- a/esp/services/ws_workunits/ws_workunitsHelpers.hpp +++ b/esp/services/ws_workunits/ws_workunitsHelpers.hpp @@ -186,13 +186,15 @@ struct WUComponentLogOptions } else if (!isEmptyString(end)) { - if (isEmptyString(end)) + if (isEmptyString(start)) throw makeStringException(ECLWATCH_INVALID_INPUT, "ZapLogFilter: Empty 'Absolute TimeRange Start' detected!"); } else { if (relativeTimeBufferSecs > 0 ) wuLogSearchTimeBuffSecs = relativeTimeBufferSecs; + else + throw makeStringException(ECLWATCH_INVALID_INPUT, "ZapLogFilter: Invalid 'TimeRange' detected!"); } } @@ -206,9 +208,9 @@ struct WUComponentLogOptions logDataFormat = logAccessFormatFromName(requestedLogDataFormat.str()); StringBuffer start; // Absolute query time range start in YYYY-DD-MMTHH:MM:SS - zapHttpRequest->getParameter("LogFilter_AbsoluteTimeRange_Start", start); + zapHttpRequest->getParameter("LogFilter_AbsoluteTimeRange_StartDate", start); StringBuffer end; // Absolute query time range end in YYYY-DD-MMTHH:MM:SS - zapHttpRequest->getParameter("LogFilter_AbsoluteTimeRange_End", end); + zapHttpRequest->getParameter("LogFilter_AbsoluteTimeRange_EndDate", end); // Query time range based on WU Time +- Buffer in seconds unsigned bufferSecs = (unsigned)zapHttpRequest->getParameterInt("LogFilter_RelativeTimeRangeBuffer", 0); From ee5c128eabdb60c2c4ae53ab60953cf939b5f7d4 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Wed, 19 Jun 2024 12:44:19 +0100 Subject: [PATCH 114/151] HPCC-32132 New compressed spilling lookahead implementation Signed-off-by: Jake Smith --- system/jlib/jqueue.hpp | 54 ++ .../activities/nsplitter/thnsplitterslave.cpp | 4 +- thorlcr/activities/thactivityutil.cpp | 40 +- thorlcr/thorutil/thbuf.cpp | 622 +++++++++++++++++- thorlcr/thorutil/thbuf.hpp | 29 +- thorlcr/thorutil/thormisc.hpp | 7 +- 6 files changed, 707 insertions(+), 49 deletions(-) diff --git a/system/jlib/jqueue.hpp b/system/jlib/jqueue.hpp index e9447833c65..97ff9d91832 100644 --- a/system/jlib/jqueue.hpp +++ b/system/jlib/jqueue.hpp @@ -546,5 +546,59 @@ class DListOf } }; +// Lockfree Single Producer Single Conumser bounded queue implementation +// No mutexes are required to interact with the queue, as long as there's a single consumer thread, and a single writer thread. +template +class CSPSCQueue +{ + size32_t maxCapacity = 0; + std::vector elements; + std::atomic head = 0; + std::atomic tail = 0; + + inline size32_t increment(size32_t idx) const + { + size32_t next = idx+1; + if (next == maxCapacity) + next = 0; + return next; + } +public: + CSPSCQueue() + { + // should set capacity before using + } + CSPSCQueue(size32_t _maxCapacity) + : maxCapacity(_maxCapacity + 1), // +1 to distinguish full vs empty + elements(maxCapacity) + { + } + void setCapacity(size32_t _maxCapacity) + { + maxCapacity = _maxCapacity + 1; + elements.resize(maxCapacity); + } + bool enqueue(const T e) + { + size32_t currentHead = head; + size32_t nextHead = increment(currentHead); + if (nextHead == tail) + return false; // full + + elements[currentHead] = std::move(e); + head = nextHead; + return true; + } + bool dequeue(T &res) + { + size32_t currentTail = tail; + if (currentTail == head) + return false; // empty + + res = std::move(elements[currentTail]); + tail = increment(currentTail); + return true; + } +}; #endif diff --git a/thorlcr/activities/nsplitter/thnsplitterslave.cpp b/thorlcr/activities/nsplitter/thnsplitterslave.cpp index 191d005fa9a..de22da08908 100644 --- a/thorlcr/activities/nsplitter/thnsplitterslave.cpp +++ b/thorlcr/activities/nsplitter/thnsplitterslave.cpp @@ -251,9 +251,9 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf if ((size32_t)-1 != blockedSequentialIOSize) options.storageBlockSize = blockedSequentialIOSize; } - options.totalCompressionBufferSize = getOptInt(THOROPT_SPLITTER_COMPRESSIONTOALK, options.totalCompressionBufferSize / 1024) * 1024; + options.totalCompressionBufferSize = getOptInt(THOROPT_SPLITTER_COMPRESSIONTOTALK, options.totalCompressionBufferSize / 1024) * 1024; options.inMemMaxMem = getOptInt(THOROPT_SPLITTER_MAXROWMEMK, options.inMemMaxMem / 1024) * 1024; - options.spillWriteAheadSize = getOptInt64(THOROPT_SPLITTER_WRITEAHEADK, options.spillWriteAheadSize / 1024) * 1024; + options.writeAheadSize = getOptInt64(THOROPT_SPLITTER_WRITEAHEADK, options.writeAheadSize / 1024) * 1024; options.inMemReadAheadGranularity = getOptInt(THOROPT_SPLITTER_READAHEADGRANULARITYK, options.inMemReadAheadGranularity / 1024) * 1024; options.inMemReadAheadGranularityRows = getOptInt(THOROPT_SPLITTER_READAHEADGRANULARITYROWS, options.inMemReadAheadGranularity); options.heapFlags = getOptInt("spillheapflags", options.heapFlags); diff --git a/thorlcr/activities/thactivityutil.cpp b/thorlcr/activities/thactivityutil.cpp index f5701672c18..fe9a960bb74 100644 --- a/thorlcr/activities/thactivityutil.cpp +++ b/thorlcr/activities/thactivityutil.cpp @@ -66,6 +66,8 @@ class CRowStreamLookAhead : public CSimpleInterfaceOf rowcount_t required; Semaphore startSem; Owned getexception; + LookAheadOptions options; + bool newLookAhead = false; class CThread: public Thread { @@ -94,12 +96,19 @@ class CRowStreamLookAhead : public CSimpleInterfaceOf { try { - StringBuffer temp; - if (allowspill) - GetTempFilePath(temp,"lookahd"); assertex(bufsize); if (allowspill) - smartbuf.setown(createSmartBuffer(&activity, temp.str(), bufsize, rowIf)); + { + StringBuffer temp; + GetTempFilePath(temp,"lookahd"); + if (newLookAhead) + { + ICompressHandler *compressHandler = options.totalCompressionBufferSize ? queryDefaultCompressHandler() : nullptr; + smartbuf.setown(createCompressedSpillingRowStream(&activity, temp.str(), preserveGrouping, rowIf, options, compressHandler)); + } + else + smartbuf.setown(createSmartBuffer(&activity, temp.str(), bufsize, rowIf)); + } else smartbuf.setown(createSmartInMemoryBuffer(&activity, rowIf, bufsize)); startSem.signal(); @@ -207,6 +216,29 @@ class CRowStreamLookAhead : public CSimpleInterfaceOf running = true; required = _required; count = 0; + + newLookAhead = activity.getOptBool("newlookahead", false); + if (activity.getOptBool("forcenewlookahead")) + { + newLookAhead = true; + allowspill = true; + } + + // for "newlookahead" only + if (isContainerized()) + { + // JCSMORE - add CJobBase::getTempBlockSize() to calc. once. + StringBuffer planeName; + if (!getDefaultPlane(planeName, "@tempPlane", "temp")) + getDefaultPlane(planeName, "@spillPlane", "spill"); + size32_t blockedSequentialIOSize = getPlaneAttributeValue(planeName, BlockedSequentialIO, (size32_t)-1); + if ((size32_t)-1 != blockedSequentialIOSize) + options.storageBlockSize = blockedSequentialIOSize; + } + options.totalCompressionBufferSize = activity.getOptInt(THOROPT_LOOKAHEAD_COMPRESSIONTOTALK, options.totalCompressionBufferSize / 1024) * 1024; + options.inMemMaxMem = activity.getOptInt(THOROPT_LOOKAHEAD_MAXROWMEMK, options.inMemMaxMem / 1024) * 1024; + options.writeAheadSize = activity.getOptInt64(THOROPT_LOOKAHEAD_WRITEAHEADK, options.writeAheadSize / 1024) * 1024; + options.tempFileGranularity = activity.getOptInt64(THOROPT_LOOKAHEAD_TEMPFILE_GRANULARITY, options.tempFileGranularity / 0x100000) * 0x100000; } ~CRowStreamLookAhead() { diff --git a/thorlcr/thorutil/thbuf.cpp b/thorlcr/thorutil/thbuf.cpp index 951b7db82d2..b1377f6db00 100644 --- a/thorlcr/thorutil/thbuf.cpp +++ b/thorlcr/thorutil/thbuf.cpp @@ -16,11 +16,14 @@ ############################################################################## */ #include +#include #include +#include #include "platform.h" #include #include #include "jlib.hpp" +#include "jqueue.hpp" #include "jmisc.hpp" #include "jio.hpp" #include "jlzw.hpp" @@ -606,6 +609,576 @@ class CSmartRowInMemoryBuffer: public CSimpleInterface, implements ISmartRowBuff } }; + +static std::tuple createSerialInputStream(IFile *iFile, ICompressHandler *compressHandler, const CommonBufferRowRWStreamOptions &options, unsigned numSharingCompressionBuffer) +{ + Owned iFileIO = iFile->open(IFOread); + Owned in = createSerialInputStream(iFileIO); + Owned inputStream = createBufferedInputStream(in, options.storageBlockSize, 0); + if (compressHandler) + { + const char *decompressOptions = nullptr; // at least for now! + Owned decompressor = compressHandler->getExpander(decompressOptions); + Owned decompressed = createDecompressingInputStream(inputStream, decompressor); + + size32_t compressionBlockSize = (size32_t)(options.totalCompressionBufferSize / numSharingCompressionBuffer); + if (compressionBlockSize < options.minCompressionBlockSize) + { + WARNLOG("Shared totalCompressionBufferSize=%" I64F "u, too small for number of numSharingCompressionBuffer(%u). Using minCompressionBlockSize(%u).", (unsigned __int64)options.totalCompressionBufferSize, numSharingCompressionBuffer, options.minCompressionBlockSize); + compressionBlockSize = options.minCompressionBlockSize; + } + inputStream.setown(createBufferedInputStream(decompressed, compressionBlockSize, 0)); + } + return { inputStream.getClear(), iFileIO.getClear() }; +} + +static std::tuple createSerialOutputStream(IFile *iFile, ICompressHandler *compressHandler, const CommonBufferRowRWStreamOptions &options, unsigned numSharingCompressionBuffer) +{ + Owned iFileIO = iFile->open(IFOcreate); // kept for stats purposes + Owned out = createSerialOutputStream(iFileIO); + Owned outputStream = createBufferedOutputStream(out, options.storageBlockSize); //prefered plane block size + if (compressHandler) + { + const char *compressOptions = nullptr; // at least for now! + Owned compressor = compressHandler->getCompressor(compressOptions); + Owned compressed = createCompressingOutputStream(outputStream, compressor); + size32_t compressionBlockSize = (size32_t)(options.totalCompressionBufferSize / numSharingCompressionBuffer); + if (compressionBlockSize < options.minCompressionBlockSize) + { + WARNLOG("Shared totalCompressionBufferSize=%" I64F "u, too small for number of numSharingCompressionBuffer(%u). Using minCompressionBlockSize(%u).", (unsigned __int64)options.totalCompressionBufferSize, numSharingCompressionBuffer, options.minCompressionBlockSize); + compressionBlockSize = options.minCompressionBlockSize; + } + + outputStream.setown(createBufferedOutputStream(compressed, compressionBlockSize)); + } + return { outputStream.getClear(), iFileIO.getClear() }; +} + +// #define TRACE_SPILLING_ROWSTREAM // traces each row read/written, and other events + +// based on query that produces records with a single sequential (from 1) unsigned4 +// #define VERIFY_ROW_IDS_SPILLING_ROWSTREAM + +// for 'stressLookAhead' code. When enabled, reduces buffer sizes etc. to stress test the lookahead spilling +// #define STRESSTEST_SPILLING_ROWSTREAM + + + +/* CCompressedSpillingRowStream implementation details: + - Writer: + - The writer to an in-memory queue, and when the queue is full, or a certain number of rows have been queued, it writes to starts writing to temp files. + - The writer will always write to the queue if it can, even after it has started spilling. + - The writer commits to disk at LookAheadOptions::writeAheadSize granularity + - The writer creates a new temp file when the current one reaches LookAheadOptions::tempFileGranularity + - The writer pushes the current nextOutputRow to a queue when it creates the next output file (used by the reader to know when to move to next) + - NB: writer implements ISmartRowBuffer::flush() which has slightly weird semantics (blocks until everything is read or stopped) +- Reader: + - The reader will read from the queue until it is exhausted, and block to be signalled for more. + - If the reader dequeues a row that is ahead of the expected 'nextInputRow', it will stash it, and read from disk until it catches up to that row. + - If the reader is reading from disk and it catches up with 'committedRows' it will block until the writer has committed more rows. + - When reading from a temp file, it will take ownership the CFileOwner and dispose of the underlying file when it has consumed it. + - The reader will read from the stream until it hits 'currentTempFileEndRow' (initially 0), at which point it will open the next temp file. + */ + +// NB: Supports being read by 1 thread and written to by another only +class CCompressedSpillingRowStream: public CSimpleInterfaceOf, implements IRowWriter +{ + typedef std::tuple RowEntry; + + CActivityBase &activity; // ctor input parameter + StringAttr baseTmpFilename; // ctor input parameter + LookAheadOptions options; // ctor input parameter + Linked compressHandler; // ctor input parameter + + // derived from input paramter (IThorRowInterfaces *rowIf) + Linked meta; + Linked serializer; + Linked allocator; + Linked deserializer; + memsize_t compressionBlockSize = 0; // filled in createOutputStream + + // in-memory related members + CSPSCQueue inMemRows; + std::atomic inMemRowsMemoryUsage = 0; // NB updated from writer and reader threads + Semaphore moreRows; + std::atomic readerWaitingForQ = false; // set by reader, cleared by writer + + // temp write related members + Owned outputStream; + std::unique_ptr outputStreamSerializer; + memsize_t pendingFlushToDiskSz = 0; + offset_t currentTempFileSize = 0; + CFileOwner *currentOwnedOutputFile = nullptr; + Owned currentOutputIFileIO; // keep for stats + CriticalSection outputFilesQCS; + std::queue outputFiles; + unsigned writeTempFileNum = 0; + std::atomic nextOutputRow = 0; // read by reader, updated by writer + std::atomic committedRows = 0; // read by reader, updated by writer + std::atomic spilt = false; // set by createOutputStream, checked by reader + std::queue outputFileEndRowMarkers; + bool lastWriteWasEog = false; + bool outputComplete = false; // only accessed and modified by writer or reader within readerWriterCS + bool recentlyQueued = false; + CriticalSection outputStreamCS; + + // temp read related members + std::atomic currentTempFileEndRow = 0; + Owned currentInputIFileIO; // keep for stats + Linked currentOwnedInputFile; + Owned inputStream; + CThorStreamDeserializerSource inputDeserializerSource; + rowcount_t nextInputRow = 0; + bool readerWaitingForCommit = false; + static constexpr unsigned readerWakeupGranularity = 32; // how often to wake up the reader if it is waiting for more rows + enum ReadState { rs_fromqueue, rs_frommarker, rs_endstream, rs_stopped } readState = rs_fromqueue; + RowEntry readFromStreamMarker = { nullptr, 0, 0 }; + + // misc + bool grouped = false; // ctor input parameter + CriticalSection readerWriterCS; +#ifdef STRESSTEST_SPILLING_ROWSTREAM + bool stressTest = false; +#endif + + // annoying flush semantics + bool flushWaiting = false; + Semaphore flushWaitSem; + + + void trace(const char *format, ...) + { +#ifdef TRACE_SPILLING_ROWSTREAM + va_list args; + va_start(args, format); + VALOG(MCdebugInfo, format, args); + va_end(args); +#endif + } + void createNextOutputStream() + { + VStringBuffer tmpFilename("%s.%u", baseTmpFilename.get(), writeTempFileNum++); + trace("WRITE: writing to %s", tmpFilename.str()); + Owned iFile = createIFile(tmpFilename); + currentOwnedOutputFile = new CFileOwner(iFile, activity.queryTempFileSizeTracker()); // used by checkFlushToDisk to noteSize + { + CriticalBlock b(outputFilesQCS); + outputFiles.push(currentOwnedOutputFile); // NB: takes ownership + } + + auto res = createSerialOutputStream(iFile, compressHandler, options, 2); // (2) input & output sharing totalCompressionBufferSize + outputStream.setown(std::get<0>(res)); + currentOutputIFileIO.setown(std::get<1>(res)); + outputStreamSerializer = std::make_unique(outputStream); + } + void createNextInputStream() + { + CFileOwner *dequeuedOwnedIFile = nullptr; + { + CriticalBlock b(outputFilesQCS); + dequeuedOwnedIFile = outputFiles.front(); + outputFiles.pop(); + } + currentOwnedInputFile.setown(dequeuedOwnedIFile); + IFile *iFile = ¤tOwnedInputFile->queryIFile(); + trace("READ: reading from %s", iFile->queryFilename()); + + auto res = createSerialInputStream(iFile, compressHandler, options, 2); // (2) input & output sharing totalCompressionBufferSize + inputStream.setown(std::get<0>(res)); + currentInputIFileIO.setown(std::get<1>(res)); + inputDeserializerSource.setStream(inputStream); + } + const void *readRowFromStream() + { + // readRowFromStream() called from readToMarker (which will block before calling this if behind committedRows), + // or when outputComplete. + // Either way, it will not enter this method until the writer has committed ahead of the reader nextInputRow + + // NB: currentTempFileEndRow will be 0 if 1st input read + // nextInputRow can be > currentTempFileEndRow, because the writer/read may have used the Q + // beyond this point, the next row in the stream could be anywhere above. + if (nextInputRow >= currentTempFileEndRow) + { + createNextInputStream(); + CriticalBlock b(outputStreamCS); + if (nextInputRow >= currentTempFileEndRow) + { + if (!outputFileEndRowMarkers.empty()) + { + currentTempFileEndRow = outputFileEndRowMarkers.front(); + outputFileEndRowMarkers.pop(); + assertex(currentTempFileEndRow > nextInputRow); + } + else + { + currentTempFileEndRow = (rowcount_t)-1; // unbounded for now, writer will set when it knows + trace("READ: setting currentTempFileEndRow: unbounded"); + } + } + } + if (grouped) + { + bool eog; + inputStream->read(sizeof(bool), &eog); + if (eog) + return nullptr; + } + RtlDynamicRowBuilder rowBuilder(allocator); + size32_t sz = deserializer->deserialize(rowBuilder, inputDeserializerSource); + const void *row = rowBuilder.finalizeRowClear(sz); + checkCurrentRow("S: ", row, nextInputRow); + return row; + } + void writeRowToStream(const void *row, size32_t rowSz) + { + if (!spilt) + { + spilt = true; + ActPrintLog(&activity, "Spilling to temp storage [file = %s]", baseTmpFilename.get()); + createNextOutputStream(); + } + if (grouped) + { + bool eog = (nullptr == row); + outputStream->put(sizeof(bool), &eog); + pendingFlushToDiskSz++; + if (nullptr == row) + return; + } + serializer->serialize(*outputStreamSerializer.get(), (const byte *)row); + pendingFlushToDiskSz += rowSz; + } + void checkReleaseQBlockReader() + { + if (readerWaitingForQ) + { + readerWaitingForQ = false; + moreRows.signal(); + } + } + void checkReleaseReaderCommitBlocked() + { + if (readerWaitingForCommit) + { + readerWaitingForCommit = false; + moreRows.signal(); + } + } + void handleInputComplete() + { + readState = rs_stopped; + if (flushWaiting) + { + flushWaiting = false; + flushWaitSem.signal(); + } + } + bool checkFlushToDisk(size32_t threshold) + { + if (pendingFlushToDiskSz <= threshold) + return false; + rowcount_t currentNextOutputRow = nextOutputRow.load(); + trace("WRITE: Flushed to disk. nextOutputRow = %" RCPF "u", currentNextOutputRow); + outputStream->flush(); + currentTempFileSize += pendingFlushToDiskSz; + currentOwnedOutputFile->noteSize(currentTempFileSize); + pendingFlushToDiskSz = 0; + if (currentTempFileSize > options.tempFileGranularity) + { + currentTempFileSize = 0; + { + CriticalBlock b(outputStreamCS); + // set if reader isn't bounded yet, or queue next boundary + if ((rowcount_t)-1 == currentTempFileEndRow) + { + currentTempFileEndRow = currentNextOutputRow; + trace("WRITE: setting currentTempFileEndRow: %" RCPF "u", currentTempFileEndRow.load()); + } + else + { + outputFileEndRowMarkers.push(currentNextOutputRow); + trace("WRITE: adding to tempFileEndRowMarker(size=%u): %" RCPF "u", (unsigned)outputFileEndRowMarkers.size(), currentNextOutputRow); + } + } + createNextOutputStream(); + } + committedRows = currentNextOutputRow; + return true; + } + void addRow(const void *row) + { + bool queued = false; + size32_t rowSz = row ? thorRowMemoryFootprint(serializer, row) : 0; + if (rowSz + inMemRowsMemoryUsage <= options.inMemMaxMem) + queued = inMemRows.enqueue({ row, nextOutputRow, rowSz }); // takes ownership of 'row' if successful + if (queued) + { + trace("WRITE: Q: nextOutputRow: %" RCPF "u", nextOutputRow.load()); + inMemRowsMemoryUsage += rowSz; + ++nextOutputRow; + recentlyQueued = true; + } + else + { + trace("WRITE: S: nextOutputRow: %" RCPF "u", nextOutputRow.load()); + writeRowToStream(row, rowSz); // JCSMORE - rowSz is memory not disk size... does it matter that much? + ::ReleaseThorRow(row); + ++nextOutputRow; + if (checkFlushToDisk(options.writeAheadSize)) + { + CriticalBlock b(readerWriterCS); + checkReleaseReaderCommitBlocked(); + } + } + + // do not wake up reader every time a row is queued (but granularly) to avoid excessive flapping + if (recentlyQueued && (0 == (nextOutputRow % readerWakeupGranularity))) + { + recentlyQueued = false; + CriticalBlock b(readerWriterCS); + checkReleaseQBlockReader(); + } + } + const void *getQRow(RowEntry &e) + { + rowcount_t writeRow = std::get<1>(e); + inMemRowsMemoryUsage -= std::get<2>(e); + if (writeRow == nextInputRow) + { +#ifdef STRESSTEST_SPILLING_ROWSTREAM + if (stressTest && (0 == (nextInputRow % 100))) + MilliSleep(5); +#endif + + const void *row = std::get<0>(e); + checkCurrentRow("Q: ", row, nextInputRow); + ++nextInputRow; + return row; + } + else + { + // queued row is ahead of reader position, save marker and read from stream until marker + dbgassertex(writeRow > nextInputRow); + readFromStreamMarker = e; + readState = rs_frommarker; + return readToMarker(); + } + + } + inline void checkCurrentRow(const char *msg, const void *row, rowcount_t expectedId) + { +#ifdef VERIFY_ROW_IDS_SPILLING_ROWSTREAM + unsigned id; + memcpy(&id, row, sizeof(unsigned)); + assertex(id-1 == expectedId); + trace("READ: %s nextInputRow: %" RCPF "u", msg, expectedId); +#endif + } + const void *readToMarker() + { + rowcount_t markerRow = std::get<1>(readFromStreamMarker); + if (markerRow == nextInputRow) + { + const void *ret = std::get<0>(readFromStreamMarker); + checkCurrentRow("M: ", ret, nextInputRow); + readFromStreamMarker = { nullptr, 0, 0 }; + readState = rs_fromqueue; + ++nextInputRow; + return ret; + } + else if (nextInputRow >= committedRows) // row we need have not yet been committed to disk. + { + CLeavableCriticalBlock b(readerWriterCS); + if (nextInputRow >= committedRows) + { + // wait for writer to commit + readerWaitingForCommit = true; + b.leave(); + trace("READ: waiting for committedRows(currently = %" RCPF "u) to catch up to nextInputRow = %" RCPF "u", committedRows.load(), nextInputRow); + moreRows.wait(); + assertex(nextInputRow < committedRows); + } + } + const void *row = readRowFromStream(); + ++nextInputRow; + return row; + } +public: + IMPLEMENT_IINTERFACE_O_USING(CSimpleInterfaceOf); + + explicit CCompressedSpillingRowStream(CActivityBase *_activity, const char *_baseTmpFilename, bool _grouped, IThorRowInterfaces *rowIf, const LookAheadOptions &_options, ICompressHandler *_compressHandler) + : activity(*_activity), baseTmpFilename(_baseTmpFilename), grouped(_grouped), options(_options), compressHandler(_compressHandler), + meta(rowIf->queryRowMetaData()), serializer(rowIf->queryRowSerializer()), allocator(rowIf->queryRowAllocator()), deserializer(rowIf->queryRowDeserializer()) + { + size32_t minSize = meta->getMinRecordSize(); + +#ifdef STRESSTEST_SPILLING_ROWSTREAM + stressTest = activity.getOptBool("stressLookAhead"); + if (stressTest) + { + options.inMemMaxMem = minSize * 4; + options.writeAheadSize = options.inMemMaxMem * 2; + options.tempFileGranularity = options.inMemMaxMem * 4; + if (options.tempFileGranularity < 0x10000) // stop silly sizes (NB: this would only be set so small for testing!) + options.tempFileGranularity = 0x10000; + } +#endif + + if (minSize < 16) + minSize = 16; // not too important, just using to cap inMemRows queue size + inMemRows.setCapacity(options.inMemMaxMem / minSize); + + assertex(options.writeAheadSize < options.tempFileGranularity); + } + ~CCompressedSpillingRowStream() + { + while (!outputFiles.empty()) + { + ::Release(outputFiles.front()); + outputFiles.pop(); + } + RowEntry e; + while (true) + { + if (!inMemRows.dequeue(e)) + break; + const void *row = std::get<0>(e); + if (row) + ReleaseThorRow(row); + } + const void *markerRow = std::get<0>(readFromStreamMarker); + if (markerRow) + ReleaseThorRow(markerRow); + } + +// ISmartRowBuffer + virtual IRowWriter *queryWriter() override + { + return this; + } +// IRowStream + virtual const void *nextRow() override + { + switch (readState) + { + case rs_fromqueue: + { + while (true) + { + RowEntry e; + if (inMemRows.dequeue(e)) + return getQRow(e); + else + { + { + CLeavableCriticalBlock b(readerWriterCS); + // Recheck Q now have CS, if reader here and writer ready to signal more, then it may have just released CS + if (inMemRows.dequeue(e)) + { + b.leave(); + return getQRow(e); + } + else if (outputComplete)// && (nextInputRow == nextOutputRow)) + { + if (nextInputRow == nextOutputRow) + { + handleInputComplete(); // sets readState to rs_stopped + return nullptr; + } + else + { + // writer has finished, nothing is on the queue or will be queued, rest is on disk + readState = rs_endstream; + const void *row = readRowFromStream(); + ++nextInputRow; + return row; + } + } + readerWaitingForQ = true; + } + trace("READ: waiting for Q'd rows @ %" RCPF "u (nextOutputRow = %" RCPF "u)", nextInputRow, nextOutputRow.load()); + moreRows.wait(); + } + } + return nullptr; + } + case rs_frommarker: + { + return readToMarker(); + } + case rs_endstream: + { + if (nextInputRow == nextOutputRow) + { + readState = rs_stopped; + return nullptr; + } + const void *row = readRowFromStream(); + ++nextInputRow; + return row; + } + case rs_stopped: + return nullptr; + } + throwUnexpected(); + } + virtual void stop() override + { + CriticalBlock b(readerWriterCS); + handleInputComplete(); + } +// IRowWriter + virtual void putRow(const void *row) override + { + if (outputComplete) + { + // should never get here, but guard against. + OwnedConstThorRow tmpRow(row); + assertex(!row); + return; + } + + if (row) + { + lastWriteWasEog = false; + addRow(row); + } + else // eog + { + if (lastWriteWasEog) // error, should not have two EOGs in a row + return; + else if (grouped) + { + lastWriteWasEog = true; + addRow(nullptr); + } + else // non-grouped nulls unexpected + throwUnexpected(); + } + } + virtual void flush() override + { + // semantics of ISmartRowBuffer::flush: + // - tell smartbuf that there will be no more rows written (BUT should only be called after finished writing) + // - wait for all rows to be read from smartbuf, or smartbuf stopped before returning. + + bool flushedToDisk = checkFlushToDisk(0); + { + CriticalBlock b(readerWriterCS); + outputComplete = true; + if (rs_stopped == readState) + return; + flushWaiting = true; + if (flushedToDisk) + checkReleaseReaderCommitBlocked(); + checkReleaseQBlockReader(); + } + flushWaitSem.wait(); + } +}; + + + ISmartRowBuffer * createSmartBuffer(CActivityBase *activity, const char * tempname, size32_t buffsize, IThorRowInterfaces *rowif) { Owned file = createIFile(tempname); @@ -617,6 +1190,11 @@ ISmartRowBuffer * createSmartInMemoryBuffer(CActivityBase *activity, IThorRowInt return new CSmartRowInMemoryBuffer(activity, rowIf, buffsize); } +ISmartRowBuffer * createCompressedSpillingRowStream(CActivityBase *activity, const char * tempBaseName, bool grouped, IThorRowInterfaces *rowif, const LookAheadOptions &options, ICompressHandler *compressHandler) +{ + return new CCompressedSpillingRowStream(activity, tempBaseName, grouped, rowif, options, compressHandler); +} + class COverflowableBuffer : public CSimpleInterface, implements IRowWriterMultiReader { CActivityBase &activity; @@ -1844,6 +2422,7 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf input; + unsigned numOutputs = 0; Linked meta; Linked serializer; Linked deserializer; @@ -1863,7 +2442,6 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfopen(IFOcreate)); // kept for stats purposes - Owned out = createSerialOutputStream(iFileIO); - outputStream.setown(createBufferedOutputStream(out, options.storageBlockSize)); //prefered plane block size - if (compressHandler) - { - const char *compressOptions = nullptr; - Owned compressor = compressHandler->getCompressor(compressOptions); - Owned compressed = createCompressingOutputStream(outputStream, compressor); - outputStream.setown(createBufferedOutputStream(compressed, compressionBlockSize)); - } + auto res = createSerialOutputStream(iFile, compressHandler, options, numOutputs + 1); + outputStream.setown(std::get<0>(res)); + iFileIO.setown(std::get<1>(res)); totalInputRowsRead = inMemTotalRows; } void writeRowsFromInput() @@ -1940,7 +2511,7 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf= options.spillWriteAheadSize) + if (serializedSz >= options.writeAheadSize) break; } } @@ -1957,8 +2528,8 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf(row)); } public: - explicit CSharedFullSpillingWriteAhead(CActivityBase *_activity, unsigned numOutputs, IRowStream *_input, bool _inputGrouped, const SharedRowStreamReaderOptions &_options, IThorRowInterfaces *rowIf, const char *tempFileName, ICompressHandler *_compressHandler) - : activity(*_activity), input(_input), inputGrouped(_inputGrouped), options(_options), compressHandler(_compressHandler), + explicit CSharedFullSpillingWriteAhead(CActivityBase *_activity, unsigned _numOutputs, IRowStream *_input, bool _inputGrouped, const SharedRowStreamReaderOptions &_options, IThorRowInterfaces *rowIf, const char *tempFileName, ICompressHandler *_compressHandler) + : activity(*_activity), numOutputs(_numOutputs), input(_input), inputGrouped(_inputGrouped), options(_options), compressHandler(_compressHandler), meta(rowIf->queryRowMetaData()), serializer(rowIf->queryRowSerializer()), allocator(rowIf->queryRowAllocator()), deserializer(rowIf->queryRowDeserializer()) { assertex(input); @@ -1968,17 +2539,6 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf options.inMemMaxMem) inMemReadAheadGranularity = options.inMemMaxMem; - constexpr size32_t minCompressionBlockSize = 256 * 1024; - memsize_t totalCompressionBufferSize = options.totalCompressionBufferSize; - if (totalCompressionBufferSize) - { - compressionBlockSize = (size32_t)(totalCompressionBufferSize / (numOutputs + 1)); // +1 for writer - if (compressionBlockSize < minCompressionBlockSize) - { - WARNLOG("Shared totalCompressionBufferSize=%" I64F "u, too small for number of outputs(%u). Using minCompressionBlockSize(%u) for writer and each reader.", (unsigned __int64)totalCompressionBufferSize, numOutputs, minCompressionBlockSize); - compressionBlockSize = minCompressionBlockSize; - } - } for (unsigned o=0; o getReadStream() // also pass back IFileIO for stats purposes { - Owned iFileIO = iFile->open(IFOread); - Owned in = createSerialInputStream(iFileIO); - Owned inputStream = createBufferedInputStream(in, options.storageBlockSize, 0); - if (compressHandler) - { - const char *decompressOptions = nullptr; - Owned decompressor = compressHandler->getExpander(decompressOptions); - Owned decompressed = createDecompressingInputStream(inputStream, decompressor); - inputStream.setown(createBufferedInputStream(decompressed, compressionBlockSize, 0)); - } - return { inputStream.getClear(), iFileIO.getClear() }; + return createSerialInputStream(iFile, compressHandler, options, numOutputs + 1); // +1 for writer } bool checkWriteAhead(rowcount_t &outputRowsAvailable) { diff --git a/thorlcr/thorutil/thbuf.hpp b/thorlcr/thorutil/thbuf.hpp index dc64aeb888d..1750f63b007 100644 --- a/thorlcr/thorutil/thbuf.hpp +++ b/thorlcr/thorutil/thbuf.hpp @@ -37,6 +37,25 @@ typedef QueueOf ThorRowQueue; +struct CommonBufferRowRWStreamOptions +{ + offset_t storageBlockSize = 256 * 1024; // block size of read/write streams + size32_t minCompressionBlockSize = 256 * 1024; // minimum block size for compression + memsize_t totalCompressionBufferSize = 3000 * 1024; // compression buffer size of read streams (split between writer and outputs) + memsize_t inMemMaxMem = 2000 * 1024; // before spilling begins. + offset_t writeAheadSize = 2000 * 1024; // once spilling, maximum size to write ahead + unsigned heapFlags = roxiemem::RHFunique|roxiemem::RHFblocked; +}; + +struct LookAheadOptions : CommonBufferRowRWStreamOptions +{ + LookAheadOptions() + { + // override defaults + totalCompressionBufferSize = 2000 * 1024; // compression buffer size of read streams (split between writer and outputs) + } + offset_t tempFileGranularity = 1000 * 0x100000; // 1GB +}; interface ISmartRowBuffer: extends IRowStream @@ -55,15 +74,13 @@ extern graph_decl ISmartRowBuffer * createSmartInMemoryBuffer(CActivityBase *act IThorRowInterfaces *rowIf, size32_t buffsize); -struct SharedRowStreamReaderOptions + +extern graph_decl ISmartRowBuffer * createCompressedSpillingRowStream(CActivityBase *activity, const char * tempBasename, bool grouped, IThorRowInterfaces *rowif, const LookAheadOptions &options, ICompressHandler *compressHandler); + +struct SharedRowStreamReaderOptions : public CommonBufferRowRWStreamOptions { - offset_t storageBlockSize = 256 * 1024; // block size of read/write streams - memsize_t totalCompressionBufferSize = 3000 * 1024; // compression buffer size of read streams (split between writer and outputs) - memsize_t inMemMaxMem = 2000 * 1024; // before spilling begins. memsize_t inMemReadAheadGranularity = 128 * 1024; // granularity (K) of read ahead rowcount_t inMemReadAheadGranularityRows = 64; // granularity (rows) of read ahead. NB: whichever granularity is hit first - offset_t spillWriteAheadSize = 2000 * 1024; // once spilling, maximum size to write ahead - unsigned heapFlags = roxiemem::RHFunique|roxiemem::RHFblocked; }; interface ISharedRowStreamReader : extends IInterface { diff --git a/thorlcr/thorutil/thormisc.hpp b/thorlcr/thorutil/thormisc.hpp index cb259a7053a..d760f3d06da 100644 --- a/thorlcr/thorutil/thormisc.hpp +++ b/thorlcr/thorutil/thormisc.hpp @@ -59,7 +59,7 @@ #define THOROPT_SPLITTER_READAHEADGRANULARITYK "inMemReadAheadGranularityK" // Splitter in memory read ahead granularity (K) (default = 128K) #define THOROPT_SPLITTER_READAHEADGRANULARITYROWS "inMemReadAheadGranularityRows" // Splitter in memory read ahead granularity (# rows) (default = 64) #define THOROPT_SPLITTER_WRITEAHEADK "splitterWriteAheadK" // Splitter spilling write ahead size (K) (default = 2MB) -#define THOROPT_SPLITTER_COMPRESSIONTOALK "splitterCompressionTotalK" // Splitter total compression buffer size (shared between writer and readers) (K) (default = 3MB) +#define THOROPT_SPLITTER_COMPRESSIONTOTALK "splitterCompressionTotalK" // Splitter total compression buffer size (shared between writer and readers) (K) (default = 3MB) #define THOROPT_LOOP_MAX_EMPTY "loopMaxEmpty" // Max # of iterations that LOOP can cycle through with 0 results before errors (default = 1000) #define THOROPT_SMALLSORT "smallSortThreshold" // Use minisort approach, if estimate size of data to sort is below this setting (default = 0) #define THOROPT_PARALLEL_FUNNEL "parallelFunnel" // Use parallel funnel impl. if !ordered (default = true) @@ -121,6 +121,11 @@ #define THOROPT_SORT_ALGORITHM "sortAlgorithm" // The algorithm used to sort records (quicksort/mergesort) #define THOROPT_COMPRESS_ALLFILES "compressAllOutputs" // Compress all output files (default: bare-metal=off, cloud=on) #define THOROPT_AVOID_RENAME "avoidRename" // Avoid rename, write directly to target physical filenames (no temp file) +#define THOROPT_LOOKAHEAD_MAXROWMEMK "readAheadRowMemK" // Splitter max memory (K) to use before spilling (default = 2MB) +#define THOROPT_LOOKAHEAD_WRITEAHEADK "readAheadWriteAheadK" // Splitter spilling write ahead size (K) (default = 2MB) +#define THOROPT_LOOKAHEAD_COMPRESSIONTOTALK "readAheadCompressionTotalK" // Splitter total compression buffer size (shared between writer and readers) (K) (default = 3MB) +#define THOROPT_LOOKAHEAD_TEMPFILE_GRANULARITY "readAheadTempFileGranularity" // Splitter temp file granularity (default = 1GB) + #define INITIAL_SELFJOIN_MATCH_WARNING_LEVEL 20000 // max of row matches before selfjoin emits warning From 3fc8be88960e949c4ff7fbc72e3566e9365c59f6 Mon Sep 17 00:00:00 2001 From: Attila Vamos Date: Thu, 27 Jun 2024 10:52:10 +0100 Subject: [PATCH 115/151] HPCC-32142 Add SVM to platform-ml and platform-gnn Docker images Add missing install instruction for SVM to the relevant Dockerfiles Signed-off-by: Attila Vamos --- dockerfiles/platform-gnn-gpu/Dockerfile | 2 ++ dockerfiles/platform-gnn/Dockerfile | 1 + dockerfiles/platform-ml/Dockerfile | 1 + 3 files changed, 4 insertions(+) diff --git a/dockerfiles/platform-gnn-gpu/Dockerfile b/dockerfiles/platform-gnn-gpu/Dockerfile index 181580d225f..e6df170e921 100644 --- a/dockerfiles/platform-gnn-gpu/Dockerfile +++ b/dockerfiles/platform-gnn-gpu/Dockerfile @@ -85,6 +85,8 @@ RUN apt clean && \ apt autoremove && \ apt-get update +RUN apt-get install -y libsvm-dev libsvm-tools + RUN apt-get install -y python3-pip --fix-missing RUN python3 -m pip --no-cache-dir install \ setuptools diff --git a/dockerfiles/platform-gnn/Dockerfile b/dockerfiles/platform-gnn/Dockerfile index 20b3faba41e..3a735fa88f3 100644 --- a/dockerfiles/platform-gnn/Dockerfile +++ b/dockerfiles/platform-gnn/Dockerfile @@ -25,6 +25,7 @@ FROM ${DOCKER_REPO}/platform-core:${BUILD_LABEL} USER root RUN apt-get update -y && apt-get install -y python3-pip --fix-missing +RUN apt-get install -y libsvm-dev libsvm-tools RUN python3 -m pip install --upgrade pip RUN pip3 install \ diff --git a/dockerfiles/platform-ml/Dockerfile b/dockerfiles/platform-ml/Dockerfile index d6bfe648a85..0e0d4d8ce72 100644 --- a/dockerfiles/platform-ml/Dockerfile +++ b/dockerfiles/platform-ml/Dockerfile @@ -26,6 +26,7 @@ USER root # Machine Learning Dependencies RUN apt-get update && apt-get install -y python3 python3-pip --fix-missing +RUN apt-get install -y libsvm-dev libsvm-tools RUN python3 -m pip install --upgrade pip RUN pip3 install \ scikit-learn From d05ab65745fd6e3587eaa6632e6bb9f6f5c60be1 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Thu, 27 Jun 2024 10:19:22 +0100 Subject: [PATCH 116/151] HPCC-32164 Minor improvements to new filename gathering code Signed-off-by: Gavin Halliday --- common/workunit/workunit.cpp | 23 ++++++++++++++++------- ecl/hqlcpp/hqlcpp.ipp | 30 +++++++++++++++++------------- 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/common/workunit/workunit.cpp b/common/workunit/workunit.cpp index 1337d27a276..4b5b7223a9d 100644 --- a/common/workunit/workunit.cpp +++ b/common/workunit/workunit.cpp @@ -8745,12 +8745,20 @@ static const char *summaryTypeName(SummaryType type) bool CLocalWorkUnit::getSummary(SummaryType type, SummaryMap &map) const { VStringBuffer xpath("Summaries/%s", summaryTypeName(type)); - CriticalBlock block(crit); - const char *list = p->queryProp(xpath); - if (!list) - return false; StringArray s; - s.appendList(list, "\n"); + { + CriticalBlock block(crit); + IPropertyTree * match = p->queryPropTree(xpath); + //If there is not entry then the information is not recorded in the workunit + if (!match) + return false; + + const char *list = match->queryProp(nullptr); + //If the information was recorded return true, even if ther are no results + if (!list) + return true; + s.appendList(list, "\n"); + } ForEachItemIn(idx, s) { const char *name = s.item(idx); @@ -8761,10 +8769,11 @@ bool CLocalWorkUnit::getSummary(SummaryType type, SummaryMap &map) const if (*end!=':') return false; // unrecognized format name = end+1; - if (map.find(name) == map.end()) + auto match = map.find(name); + if (match == map.end()) map[name] = flags; else - map[name] = map[name] & flags; + match->second &= flags; } } return true; diff --git a/ecl/hqlcpp/hqlcpp.ipp b/ecl/hqlcpp/hqlcpp.ipp index 0607c583892..ce13dba6eef 100644 --- a/ecl/hqlcpp/hqlcpp.ipp +++ b/ecl/hqlcpp/hqlcpp.ipp @@ -2146,19 +2146,23 @@ protected: SummaryMap summaries[(int) SummaryType::NumItems]; void noteSummaryInfo(const char *name, SummaryType type, bool isOpt, bool isSigned) { - if (type != SummaryType::None) - { - SummaryMap &map = summaries[(int) type]; - SummaryFlags flags = SummaryFlags::None; - if (isOpt) - flags |= SummaryFlags::IsOpt; - if (isSigned) - flags |= SummaryFlags::IsSigned; - if (map.find(name) == map.end()) - map[name] = flags; - else - map[name] = map[name] & flags; - } + if (type == SummaryType::None) + return; + //Spill files are meaningless in roxie, and no current benefit in recording them for hthor/thor + if (type == SummaryType::SpillFile) + return; + + SummaryMap &map = summaries[(int) type]; + SummaryFlags flags = SummaryFlags::None; + if (isOpt) + flags |= SummaryFlags::IsOpt; + if (isSigned) + flags |= SummaryFlags::IsSigned; + auto match = map.find(name); + if (match == map.end()) + map[name] = flags; + else + match->second &= flags; } }; From 9d2ef45da88d80a6bee63cdd7f7b50835bc15f7b Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 21 Jun 2024 08:06:16 +0100 Subject: [PATCH 117/151] HPCC-32158 Add SQL Driven OLAP engine for WU Metrics Signed-off-by: Gordon Smith --- esp/src/package-lock.json | 381 ++++++++++++------ esp/src/package.json | 7 +- esp/src/src-react/components/Metrics.tsx | 68 ++-- .../src-react/components/MetricsOptions.tsx | 19 +- esp/src/src-react/components/MetricsSQL.tsx | 174 ++++++++ esp/src/src-react/components/SourceEditor.tsx | 107 ++++- .../src-react/components/WorkunitDetails.tsx | 16 +- esp/src/src-react/hooks/duckdb.ts | 59 +++ esp/src/src-react/hooks/metrics.ts | 40 +- esp/src/src-react/layouts/DockPanel.tsx | 9 - esp/src/src/nls/hpcc.ts | 2 + esp/src/tsconfig.json | 14 +- esp/src/webpack.config.js | 5 + 13 files changed, 683 insertions(+), 218 deletions(-) create mode 100644 esp/src/src-react/components/MetricsSQL.tsx create mode 100644 esp/src/src-react/hooks/duckdb.ts diff --git a/esp/src/package-lock.json b/esp/src/package-lock.json index 1a1d9ac0fbf..06de90bbe29 100644 --- a/esp/src/package-lock.json +++ b/esp/src/package-lock.json @@ -16,11 +16,11 @@ "@fluentui/react-icons-mdl2": "1.3.59", "@fluentui/react-migration-v8-v9": "9.6.3", "@hpcc-js/chart": "2.83.3", - "@hpcc-js/codemirror": "2.61.4", + "@hpcc-js/codemirror": "2.62.0", "@hpcc-js/common": "2.71.17", "@hpcc-js/comms": "2.92.2", "@hpcc-js/dataflow": "8.1.6", - "@hpcc-js/eclwatch": "2.74.3", + "@hpcc-js/eclwatch": "2.74.5", "@hpcc-js/graph": "2.85.15", "@hpcc-js/html": "2.42.20", "@hpcc-js/layout": "2.49.22", @@ -30,6 +30,7 @@ "@hpcc-js/react": "2.53.16", "@hpcc-js/tree": "2.40.17", "@hpcc-js/util": "2.51.0", + "@hpcc-js/wasm": "2.17.1", "@kubernetes/client-node": "0.20.0", "clipboard": "2.0.11", "d3-dsv": "3.0.1", @@ -170,6 +171,7 @@ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", "dev": true, + "license": "MIT", "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", @@ -193,6 +195,7 @@ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.0.tgz", "integrity": "sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==", "dev": true, + "license": "MIT", "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } @@ -1824,9 +1827,10 @@ } }, "node_modules/@hpcc-js/codemirror": { - "version": "2.61.4", - "resolved": "https://registry.npmjs.org/@hpcc-js/codemirror/-/codemirror-2.61.4.tgz", - "integrity": "sha512-rscy1L5EcRhRtldjjwdurxC8RLWW8KY+B8EYj/XXH25blpvlt3P05Bdd6kotBIG18sV33sezaydhM7dqs+iltg==", + "version": "2.62.0", + "resolved": "https://registry.npmjs.org/@hpcc-js/codemirror/-/codemirror-2.62.0.tgz", + "integrity": "sha512-KgVvmPKVJWS6nG3pLsGxRApLRo259Tpf0EEIHQtbqFQHbFHQLr9r2T6aAMtoh4eehqvkqUedsorCCnmlfZCx7A==", + "license": "Apache-2.0", "dependencies": { "@hpcc-js/common": "^2.71.17" } @@ -1932,13 +1936,14 @@ } }, "node_modules/@hpcc-js/eclwatch": { - "version": "2.74.3", - "resolved": "https://registry.npmjs.org/@hpcc-js/eclwatch/-/eclwatch-2.74.3.tgz", - "integrity": "sha512-tsJfXAbREXNXAzui8Mc7Vb9J2xmc1A40I2+pTTOFnVeHPv8bzDvc5sGQXgRrkqqOkeMwzGsnlpbVmC7zTZ33UA==", + "version": "2.74.5", + "resolved": "https://registry.npmjs.org/@hpcc-js/eclwatch/-/eclwatch-2.74.5.tgz", + "integrity": "sha512-KGpefRbFD0ZIOq7eV3kF6Of2uG7wFA8C2o/vUUUy5+E+eG46qZRGqo2G4jLYqXbbCQ1RO8XHVcnXfeWR1XB4AQ==", + "license": "Apache-2.0", "dependencies": { - "@hpcc-js/codemirror": "^2.61.4", + "@hpcc-js/codemirror": "^2.62.0", "@hpcc-js/common": "^2.71.17", - "@hpcc-js/comms": "^2.92.1", + "@hpcc-js/comms": "^2.92.2", "@hpcc-js/dgrid": "^2.32.20", "@hpcc-js/graph": "^2.85.15", "@hpcc-js/layout": "^2.49.22", @@ -2083,11 +2088,66 @@ "tslib": "2.6.2" } }, + "node_modules/@hpcc-js/wasm": { + "version": "2.17.1", + "resolved": "https://registry.npmjs.org/@hpcc-js/wasm/-/wasm-2.17.1.tgz", + "integrity": "sha512-IeQFVcRir9vRbJqG/Eje2S/sIHatw/cx7Mp62S+J5VKiglc56kNUe8CxuZIeJaIo6YEuhIio/KnE3XN9oPI1Pg==", + "license": "Apache-2.0", + "dependencies": { + "yargs": "17.7.2" + }, + "bin": { + "dot-wasm": "bin/dot-wasm.js" + } + }, + "node_modules/@hpcc-js/wasm/node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@hpcc-js/wasm/node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@hpcc-js/wasm/node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, "node_modules/@humanwhocodes/config-array": { "version": "0.11.14", "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "deprecated": "Use @eslint/config-array instead", "dev": true, + "license": "Apache-2.0", "dependencies": { "@humanwhocodes/object-schema": "^2.0.2", "debug": "^4.3.1", @@ -2102,6 +2162,7 @@ "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", "dev": true, + "license": "Apache-2.0", "engines": { "node": ">=12.22" }, @@ -2111,10 +2172,12 @@ } }, "node_modules/@humanwhocodes/object-schema": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.2.tgz", - "integrity": "sha512-6EwiSjwWYP7pTckG6I5eyFANjPhmPjUX9JRLUSfNPC7FX7zK9gyZAfUEaECL6ALTpGX5AjnBq3C9XmVWPitNpw==", - "dev": true + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", + "dev": true, + "license": "BSD-3-Clause" }, "node_modules/@isaacs/cliui": { "version": "8.0.2", @@ -2213,14 +2276,15 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", - "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", "dev": true, + "license": "MIT", "dependencies": { - "@jridgewell/set-array": "^1.0.1", + "@jridgewell/set-array": "^1.2.1", "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { "node": ">=6.0.0" @@ -2236,10 +2300,11 @@ } }, "node_modules/@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", "dev": true, + "license": "MIT", "engines": { "node": ">=6.0.0" } @@ -2261,10 +2326,11 @@ "dev": true }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.22", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.22.tgz", - "integrity": "sha512-Wf963MzWtA2sjrNt+g18IAln9lKnlRp+K2eH4jjIoF1wYeq3aMREpG09xhlhdzS0EjwU7qmUJYangWa+151vZw==", + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", "dev": true, + "license": "MIT", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" @@ -2307,10 +2373,11 @@ } }, "node_modules/@leichtgewicht/ip-codec": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz", - "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A==", - "dev": true + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz", + "integrity": "sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw==", + "dev": true, + "license": "MIT" }, "node_modules/@lumino/algorithm": { "version": "1.9.2", @@ -3168,7 +3235,8 @@ "version": "0.12.0", "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/@types/scheduler": { "version": "0.16.2", @@ -3451,7 +3519,8 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", - "dev": true + "dev": true, + "license": "ISC" }, "node_modules/@webassemblyjs/ast": { "version": "1.12.1", @@ -3704,6 +3773,7 @@ "resolved": "https://registry.npmjs.org/acorn-import-assertions/-/acorn-import-assertions-1.9.0.tgz", "integrity": "sha512-cmMwop9x+8KFhxvKrKfPYmN6/pKTYYHBqLa0DfvVZcKMJWNyWLnaqND7dx/qn66R7ewM1UX5XMaDVP5wlVTaVA==", "dev": true, + "license": "MIT", "peerDependencies": { "acorn": "^8" } @@ -3713,6 +3783,7 @@ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", "dev": true, + "license": "MIT", "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } @@ -3832,7 +3903,6 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, "engines": { "node": ">=8" } @@ -3841,7 +3911,6 @@ "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, "dependencies": { "color-convert": "^2.0.1" }, @@ -3863,6 +3932,7 @@ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", "dev": true, + "license": "ISC", "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" @@ -3885,12 +3955,6 @@ "node": ">=12.17" } }, - "node_modules/array-flatten": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz", - "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ==", - "dev": true - }, "node_modules/array-union": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", @@ -3986,12 +4050,16 @@ } }, "node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/body-parser": { @@ -4059,13 +4127,12 @@ } }, "node_modules/bonjour-service": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.1.1.tgz", - "integrity": "sha512-Z/5lQRMOG9k7W+FkeGTNjh7htqn/2LMnfOvBZ8pynNZCM9MwkQkI3zeI4oz09uWdcgmgHugVvBqxGg4VQJ5PCg==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.2.1.tgz", + "integrity": "sha512-oSzCS2zV14bh2kji6vNe7vrpJYCHGvcZnlffFQ1MEoX/WOeQ/teD8SYWKR942OI3INjq8OMNJlbPK5LLLUxFDw==", "dev": true, + "license": "MIT", "dependencies": { - "array-flatten": "^2.1.2", - "dns-equal": "^1.0.0", "fast-deep-equal": "^3.1.3", "multicast-dns": "^7.2.5" } @@ -4086,21 +4153,22 @@ } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dev": true, + "license": "MIT", "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" } }, "node_modules/browserslist": { - "version": "4.22.2", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.22.2.tgz", - "integrity": "sha512-0UgcrvQmBDvZHFGdYUehrCNIazki7/lUP3kkoi/r3YB2amZbFM9J43ZRkJTXBUZK4gmx56+Sqk9+Vs9mwZx9+A==", + "version": "4.23.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.1.tgz", + "integrity": "sha512-TUfofFo/KsK/bWZ9TWQ5O26tsWW4Uhmt8IYklbnUa70udB6P2wA7w7o4PY4muaEPBQaAX+CEnmmIA41NVHtPVw==", "dev": true, "funding": [ { @@ -4116,11 +4184,12 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "caniuse-lite": "^1.0.30001565", - "electron-to-chromium": "^1.4.601", + "caniuse-lite": "^1.0.30001629", + "electron-to-chromium": "^1.4.796", "node-releases": "^2.0.14", - "update-browserslist-db": "^1.0.13" + "update-browserslist-db": "^1.0.16" }, "bin": { "browserslist": "cli.js" @@ -4202,14 +4271,15 @@ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/caniuse-lite": { - "version": "1.0.30001579", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001579.tgz", - "integrity": "sha512-u5AUVkixruKHJjw/pj9wISlcMpgFWzSrczLZbrqBSxukQixmg0SJ5sZTpvaFvxU0HoQKd4yoyAogyrAz9pzJnA==", + "version": "1.0.30001637", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001637.tgz", + "integrity": "sha512-1x0qRI1mD1o9e+7mBI7XtzFAP4XszbHaVWsMiGbSPLYekKTJF7K+FNk6AsXH4sUpc+qrsI3pVgf1Jdl/uGkuSQ==", "dev": true, "funding": [ { @@ -4224,7 +4294,8 @@ "type": "github", "url": "https://github.com/sponsors/ai" } - ] + ], + "license": "CC-BY-4.0" }, "node_modules/caseless": { "version": "0.12.0", @@ -4248,16 +4319,11 @@ } }, "node_modules/chokidar": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", - "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", "dev": true, - "funding": [ - { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - ], + "license": "MIT", "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", @@ -4270,6 +4336,9 @@ "engines": { "node": ">= 8.10.0" }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, "optionalDependencies": { "fsevents": "~2.3.2" } @@ -4279,6 +4348,7 @@ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", "dev": true, + "license": "ISC", "dependencies": { "is-glob": "^4.0.1" }, @@ -4372,7 +4442,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, "dependencies": { "color-name": "~1.1.4" }, @@ -4383,8 +4452,7 @@ "node_modules/color-name": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" }, "node_modules/colorette": { "version": "2.0.16", @@ -5048,17 +5116,12 @@ "node": ">=8" } }, - "node_modules/dns-equal": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz", - "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg==", - "dev": true - }, "node_modules/dns-packet": { "version": "5.6.1", "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", "dev": true, + "license": "MIT", "dependencies": { "@leichtgewicht/ip-codec": "^2.0.1" }, @@ -5165,10 +5228,11 @@ "dev": true }, "node_modules/electron-to-chromium": { - "version": "1.4.643", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.643.tgz", - "integrity": "sha512-QHscvvS7gt155PtoRC0dR2ilhL8E9LHhfTQEq1uD5AL0524rBLAwpAREFH06f87/e45B9XkR6Ki5dbhbCsVEIg==", - "dev": true + "version": "1.4.812", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.812.tgz", + "integrity": "sha512-7L8fC2Ey/b6SePDFKR2zHAy4mbdp1/38Yk5TsARO66W3hC5KEaeKMMHoxwtuH+jcu2AYLSn9QX04i95t6Fl1Hg==", + "dev": true, + "license": "ISC" }, "node_modules/element-resize-detector": { "version": "1.2.4", @@ -5181,8 +5245,7 @@ "node_modules/emoji-regex": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" }, "node_modules/emojis-list": { "version": "3.0.0", @@ -5203,10 +5266,11 @@ } }, "node_modules/enhanced-resolve": { - "version": "5.16.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.16.0.tgz", - "integrity": "sha512-O+QWCviPNSSLAD9Ucn8Awv+poAkqn3T1XY5/N7kR7rQO9yfSGWkYZDwpJ+iKF7B8rxaQKWngSqACpgzeapSyoA==", + "version": "5.17.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.0.tgz", + "integrity": "sha512-dwDPwZL0dmye8Txp2gzFmA6sxALaSvdRDjPH0viLcKrtlOL3tw62nWWweVD1SdILDTJrbrL6tdWVN58Wo6U3eA==", "dev": true, + "license": "MIT", "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" @@ -5299,10 +5363,10 @@ "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" }, "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", - "dev": true, + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "license": "MIT", "engines": { "node": ">=6" } @@ -5330,6 +5394,7 @@ "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.0.tgz", "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", "dev": true, + "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", @@ -5389,6 +5454,7 @@ "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz", "integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" }, @@ -5451,6 +5517,7 @@ "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", "dev": true, + "license": "MIT", "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" @@ -5467,6 +5534,7 @@ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", "dev": true, + "license": "MIT", "dependencies": { "p-locate": "^5.0.0" }, @@ -5482,6 +5550,7 @@ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, + "license": "MIT", "dependencies": { "yocto-queue": "^0.1.0" }, @@ -5497,6 +5566,7 @@ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", "dev": true, + "license": "MIT", "dependencies": { "p-limit": "^3.0.2" }, @@ -5512,6 +5582,7 @@ "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", "dev": true, + "license": "BSD-2-Clause", "dependencies": { "acorn": "^8.9.0", "acorn-jsx": "^5.3.2", @@ -5870,10 +5941,11 @@ } }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dev": true, + "license": "MIT", "dependencies": { "to-regex-range": "^5.0.1" }, @@ -6130,10 +6202,11 @@ } }, "node_modules/fs-monkey": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.5.tgz", - "integrity": "sha512-8uMbBjrhzW76TYgEV27Y5E//W2f/lTFmx78P2w19FZSxarhI/798APGQyuGCwmkNxgwGRhrLfvWyLBvNtuOmew==", - "dev": true + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.6.tgz", + "integrity": "sha512-b1FMfwetIKymC0eioW7mTywihSQE4oLzQn1dB6rZB5fx/3NpNEdAWeCSMB+60/AeT0TCXsxzAlcYVEFCTAksWg==", + "dev": true, + "license": "Unlicense" }, "node_modules/fs.realpath": { "version": "1.0.0", @@ -6141,6 +6214,21 @@ "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", "dev": true }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, "node_modules/function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", @@ -6151,7 +6239,6 @@ "version": "2.0.5", "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true, "engines": { "node": "6.* || 8.* || >= 10.*" } @@ -6254,6 +6341,7 @@ "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", "dev": true, + "license": "MIT", "dependencies": { "type-fest": "^0.20.2" }, @@ -6644,6 +6732,7 @@ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", "dev": true, + "license": "MIT", "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" @@ -6779,6 +6868,7 @@ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", "dev": true, + "license": "MIT", "dependencies": { "binary-extensions": "^2.0.0" }, @@ -6869,7 +6959,6 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, "engines": { "node": ">=8" } @@ -6918,6 +7007,7 @@ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.12.0" } @@ -6950,6 +7040,7 @@ "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" } @@ -8010,6 +8101,7 @@ "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", "dev": true, + "license": "Unlicense", "dependencies": { "fs-monkey": "^1.0.4" }, @@ -8223,6 +8315,7 @@ "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz", "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==", "dev": true, + "license": "MIT", "dependencies": { "dns-packet": "^5.2.2", "thunky": "^1.0.2" @@ -8366,6 +8459,7 @@ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", "dev": true, + "license": "MIT", "engines": { "node": ">=0.10.0" } @@ -8781,6 +8875,7 @@ "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", "dev": true, + "license": "MIT", "dependencies": { "@types/retry": "0.12.0", "retry": "^0.13.1" @@ -8803,6 +8898,7 @@ "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", "dev": true, + "license": "MIT", "dependencies": { "callsites": "^3.0.0" }, @@ -8919,10 +9015,11 @@ "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" }, "node_modules/picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", - "dev": true + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", + "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==", + "dev": true, + "license": "ISC" }, "node_modules/picomatch": { "version": "2.3.1", @@ -9437,6 +9534,7 @@ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", "dev": true, + "license": "MIT", "dependencies": { "picomatch": "^2.2.1" }, @@ -9526,7 +9624,6 @@ "version": "2.1.1", "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "dev": true, "engines": { "node": ">=0.10.0" } @@ -9594,6 +9691,7 @@ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", "dev": true, + "license": "MIT", "engines": { "node": ">=4" } @@ -9652,6 +9750,7 @@ "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", "dev": true, + "license": "MIT", "engines": { "node": ">= 4" } @@ -10540,7 +10639,6 @@ "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", @@ -10612,7 +10710,6 @@ "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, "dependencies": { "ansi-regex": "^5.0.1" }, @@ -10656,6 +10753,7 @@ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "dev": true, + "license": "MIT", "engines": { "node": ">=8" }, @@ -10760,19 +10858,29 @@ } }, "node_modules/tar": { - "version": "6.1.11", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.11.tgz", - "integrity": "sha512-an/KZQzQUkZCkuoAA64hM92X0Urb6VpRhAFllDzz44U2mcD5scmT3zBc4VgVpkugF580+DQn8eAFSyoQt0tznA==", + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "license": "ISC", "dependencies": { "chownr": "^2.0.0", "fs-minipass": "^2.0.0", - "minipass": "^3.0.0", + "minipass": "^5.0.0", "minizlib": "^2.1.1", "mkdirp": "^1.0.3", "yallist": "^4.0.0" }, "engines": { - "node": ">= 10" + "node": ">=10" + } + }, + "node_modules/tar/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "license": "ISC", + "engines": { + "node": ">=8" } }, "node_modules/terser": { @@ -10912,7 +11020,8 @@ "version": "1.1.0", "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz", "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/tiny-emitter": { "version": "2.1.0", @@ -10937,6 +11046,7 @@ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", "dev": true, + "license": "MIT", "dependencies": { "is-number": "^7.0.0" }, @@ -11029,6 +11139,7 @@ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", "dev": true, + "license": "(MIT OR CC0-1.0)", "engines": { "node": ">=10" }, @@ -11143,9 +11254,9 @@ } }, "node_modules/update-browserslist-db": { - "version": "1.0.13", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", - "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", + "version": "1.0.16", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.16.tgz", + "integrity": "sha512-KVbTxlBYlckhF5wgfyZXTWnMn7MMZjMu9XG8bPlliUOP9ThaF4QnhP8qrjrH7DRzHfSk0oQv1wToW+iA5GajEQ==", "dev": true, "funding": [ { @@ -11161,9 +11272,10 @@ "url": "https://github.com/sponsors/ai" } ], + "license": "MIT", "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" + "escalade": "^3.1.2", + "picocolors": "^1.0.1" }, "bin": { "update-browserslist-db": "cli.js" @@ -11320,6 +11432,7 @@ "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.91.0.tgz", "integrity": "sha512-rzVwlLeBWHJbmgTC/8TvAcu5vpJNII+MelQpylD4jNERPwpBJOE2lEcko1zJX3QJeLjTTAnQxn/OJ8bjDzVQaw==", "dev": true, + "license": "MIT", "dependencies": { "@types/eslint-scope": "^3.7.3", "@types/estree": "^1.0.5", @@ -11421,6 +11534,7 @@ "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz", "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==", "dev": true, + "license": "MIT", "dependencies": { "colorette": "^2.0.10", "memfs": "^3.4.3", @@ -11440,15 +11554,16 @@ } }, "node_modules/webpack-dev-middleware/node_modules/ajv": { - "version": "8.12.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", - "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.16.0.tgz", + "integrity": "sha512-F0twR8U1ZU67JIEtekUcLkXkoO5mMMmgGD8sK/xUFzJ805jxHQl92hImFAqqXMyMYjSPOyUPAwHYhB72g5sTXw==", "dev": true, + "license": "MIT", "dependencies": { - "fast-deep-equal": "^3.1.1", + "fast-deep-equal": "^3.1.3", "json-schema-traverse": "^1.0.0", "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" + "uri-js": "^4.4.1" }, "funding": { "type": "github", @@ -11460,6 +11575,7 @@ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz", "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", "dev": true, + "license": "MIT", "dependencies": { "fast-deep-equal": "^3.1.3" }, @@ -11471,13 +11587,15 @@ "version": "1.0.0", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "dev": true + "dev": true, + "license": "MIT" }, "node_modules/webpack-dev-middleware/node_modules/schema-utils": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.2.0.tgz", "integrity": "sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw==", "dev": true, + "license": "MIT", "dependencies": { "@types/json-schema": "^7.0.9", "ajv": "^8.9.0", @@ -11497,6 +11615,7 @@ "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.2.tgz", "integrity": "sha512-0XavAZbNJ5sDrCbkpWL8mia0o5WPOd2YGtxrEiZkBK9FjLppIUK2TgxK6qGD2P3hUXTJNNPVibrerKcx5WkR1g==", "dev": true, + "license": "MIT", "dependencies": { "@types/bonjour": "^3.5.9", "@types/connect-history-api-fallback": "^1.3.5", @@ -11589,7 +11708,9 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", "dev": true, + "license": "ISC", "dependencies": { "glob": "^7.1.3" }, @@ -11736,7 +11857,6 @@ "version": "7.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", @@ -11773,9 +11893,10 @@ "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" }, "node_modules/ws": { - "version": "8.14.2", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.14.2.tgz", - "integrity": "sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g==", + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", + "license": "MIT", "engines": { "node": ">=10.0.0" }, @@ -11813,7 +11934,6 @@ "version": "5.0.8", "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "dev": true, "engines": { "node": ">=10" } @@ -11864,6 +11984,7 @@ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" }, diff --git a/esp/src/package.json b/esp/src/package.json index b104c55aed4..9f825f692bf 100644 --- a/esp/src/package.json +++ b/esp/src/package.json @@ -42,11 +42,11 @@ "@fluentui/react-icons-mdl2": "1.3.59", "@fluentui/react-migration-v8-v9": "9.6.3", "@hpcc-js/chart": "2.83.3", - "@hpcc-js/codemirror": "2.61.4", + "@hpcc-js/codemirror": "2.62.0", "@hpcc-js/common": "2.71.17", "@hpcc-js/comms": "2.92.2", "@hpcc-js/dataflow": "8.1.6", - "@hpcc-js/eclwatch": "2.74.3", + "@hpcc-js/eclwatch": "2.74.5", "@hpcc-js/graph": "2.85.15", "@hpcc-js/html": "2.42.20", "@hpcc-js/layout": "2.49.22", @@ -56,6 +56,7 @@ "@hpcc-js/react": "2.53.16", "@hpcc-js/tree": "2.40.17", "@hpcc-js/util": "2.51.0", + "@hpcc-js/wasm": "2.17.1", "@kubernetes/client-node": "0.20.0", "clipboard": "2.0.11", "d3-dsv": "3.0.1", @@ -67,9 +68,9 @@ "es6-promise": "4.2.8", "font-awesome": "4.7.0", "formik": "2.4.5", + "octokit": "3.1.2", "put-selector": "0.3.6", "query-string": "7.1.3", - "octokit": "3.1.2", "react": "17.0.2", "react-dom": "17.0.2", "react-hook-form": "7.51.2", diff --git a/esp/src/src-react/components/Metrics.tsx b/esp/src/src-react/components/Metrics.tsx index 5373bb6ab59..ee05e3055e3 100644 --- a/esp/src/src-react/components/Metrics.tsx +++ b/esp/src/src-react/components/Metrics.tsx @@ -23,6 +23,7 @@ import { ShortVerticalDivider } from "./Common"; import { MetricsOptions } from "./MetricsOptions"; import { BreadcrumbInfo, OverflowBreadcrumb } from "./controls/OverflowBreadcrumb"; import { MetricsPropertiesTables } from "./MetricsPropertiesTables"; +import { MetricsSQL } from "./MetricsSQL"; const logger = scopedLogger("src-react/components/Metrics.tsx"); @@ -77,29 +78,34 @@ class TableEx extends Table { _rawDataMap: { [id: number]: string } = {}; metrics(metrics: any[], options: MetricsOptionsT, timelineFilter: string, scopeFilter: string): this { - this.columns(["##", nlsHPCC.Type, nlsHPCC.Scope, ...options.properties]); - this.data(metrics.filter(m => this.scopeFilterFunc(m, scopeFilter)).filter(row => { - return (timelineFilter === "" || row.name?.indexOf(timelineFilter) === 0) && - (options.scopeTypes.indexOf(row.type) >= 0); - }).map((row, idx) => { - if (idx === 0) { - this._rawDataMap = { - 0: "##", 1: "type", 2: "name" - }; - options.properties.forEach((p, idx2) => { - this._rawDataMap[3 + idx2] = p; - }); - } - row.__hpcc_id = row.name; - return [idx, row.type, row.name, ...options.properties.map(p => { - return row.__groupedProps[p]?.Value ?? - row.__groupedProps[p]?.Max ?? - row.__groupedProps[p]?.Avg ?? - row.__formattedProps[p] ?? - row[p] ?? - ""; - }), row]; - })); + this + .columns(["##"]) // Reset hash to force recalculation of default widths + .columns(["##", nlsHPCC.Type, nlsHPCC.Scope, ...options.properties]) + .data(metrics + .filter(m => this.scopeFilterFunc(m, scopeFilter)) + .filter(row => { + return (timelineFilter === "" || row.name?.indexOf(timelineFilter) === 0) && + (options.scopeTypes.indexOf(row.type) >= 0); + }).map((row, idx) => { + if (idx === 0) { + this._rawDataMap = { + 0: "##", 1: "type", 2: "name" + }; + options.properties.forEach((p, idx2) => { + this._rawDataMap[3 + idx2] = p; + }); + } + row.__hpcc_id = row.name; + return [idx, row.type, row.name, ...options.properties.map(p => { + return row.__groupedProps[p]?.Value ?? + row.__groupedProps[p]?.Max ?? + row.__groupedProps[p]?.Avg ?? + row.__formattedProps[p] ?? + row[p] ?? + ""; + }), row]; + })) + ; return this; } @@ -129,6 +135,8 @@ class TableEx extends Table { } } +type SelectedMetricsSource = "" | "scopesTable" | "scopesSqlTable" | "metricGraphWidget" | "hotspot" | "reset"; + interface MetricsProps { wuid: string; querySet?: string; @@ -146,7 +154,7 @@ export const Metrics: React.FunctionComponent = ({ }) => { const [_uiState, _setUIState] = React.useState({ ...defaultUIState }); const [timelineFilter, setTimelineFilter] = React.useState(""); - const [selectedMetricsSource, setSelectedMetricsSource] = React.useState<"" | "scopesTable" | "metricGraphWidget" | "hotspot" | "reset">(""); + const [selectedMetricsSource, setSelectedMetricsSource] = React.useState(""); const [selectedMetrics, setSelectedMetrics] = React.useState([]); const [selectedMetricsPtr, setSelectedMetricsPtr] = React.useState(-1); const [metrics, columns, _activities, _properties, _measures, _scopeTypes, fetchStatus, refresh] = useWUQueryMetrics(wuid, querySet, queryId); @@ -243,15 +251,18 @@ export const Metrics: React.FunctionComponent = ({ setScopeFilter(newValue || ""); }, []); + const scopesSelectionChanged = React.useCallback((source: SelectedMetricsSource, selection: IScope[]) => { + setSelectedMetricsSource(source); + pushUrl(`${parentUrl}/${selection.map(row => row.__lparam?.id ?? row.id).join(",")}`); + }, [parentUrl]); + const scopesTable = useConst(() => new TableEx() .multiSelect(true) .metrics([], options, timelineFilter, scopeFilter) .sortable(true) .on("click", debounce((row, col, sel) => { if (sel) { - const selection = scopesTable.selection(); - setSelectedMetricsSource("scopesTable"); - pushUrl(`${parentUrl}/${selection.map(row => row.__lparam.id).join(",")}`); + scopesSelectionChanged("scopesTable", scopesTable.selection()); } }, 100)) ); @@ -617,6 +628,9 @@ export const Metrics: React.FunctionComponent = ({ main={} /> + + scopesSelectionChanged("scopesSqlTable", selection)}> + diff --git a/esp/src/src-react/components/MetricsOptions.tsx b/esp/src/src-react/components/MetricsOptions.tsx index 549b644ae2b..db72c14579d 100644 --- a/esp/src/src-react/components/MetricsOptions.tsx +++ b/esp/src/src-react/components/MetricsOptions.tsx @@ -3,7 +3,7 @@ import { DefaultButton, PrimaryButton, Checkbox, Pivot, PivotItem, TextField } f import nlsHPCC from "src/nlsHPCC"; import { useMetricMeta, useMetricsOptions } from "../hooks/metrics"; import { MessageBox } from "../layouts/MessageBox"; -import { JSONSourceEditor } from "./SourceEditor"; +import { JSONSourceEditor, SourceEditor } from "./SourceEditor"; const width = 640; const innerHeight = 400; @@ -51,7 +51,7 @@ export const MetricsOptions: React.FunctionComponent = ({ /> } > - +
{ if (checked) { @@ -71,7 +71,7 @@ export const MetricsOptions: React.FunctionComponent = ({ })}
- +
{properties.map(p => { return = 0} onChange={(ev, checked) => { @@ -84,7 +84,14 @@ export const MetricsOptions: React.FunctionComponent = ({ })}
- + +
+ { + setOptions({ ...options, sql }); + }} /> +
+
+
{ setOptions({ ...options, ignoreGlobalStoreOutEdges: !!checked }); @@ -100,7 +107,7 @@ export const MetricsOptions: React.FunctionComponent = ({ }} />
- +
{ if (obj) { @@ -110,5 +117,5 @@ export const MetricsOptions: React.FunctionComponent = ({
- ; + ; }; \ No newline at end of file diff --git a/esp/src/src-react/components/MetricsSQL.tsx b/esp/src/src-react/components/MetricsSQL.tsx new file mode 100644 index 00000000000..76d202ac20e --- /dev/null +++ b/esp/src/src-react/components/MetricsSQL.tsx @@ -0,0 +1,174 @@ +import * as React from "react"; +import { CommandBarButton, Stack } from "@fluentui/react"; +import { useConst } from "@fluentui/react-hooks"; +import { IScope } from "@hpcc-js/comms"; +import { ICompletion } from "@hpcc-js/codemirror"; +import { Table } from "@hpcc-js/dgrid"; +import * as Utility from "src/Utility"; +import { useDuckDBConnection } from "../hooks/duckdb"; +import { HolyGrail } from "../layouts/HolyGrail"; +import { AutosizeHpccJSComponent } from "../layouts/HpccJSAdapter"; +import { debounce } from "../util/throttle"; +import { SQLSourceEditor } from "./SourceEditor"; +import nlsHPCC from "src/nlsHPCC"; + +const spaceRegex = new RegExp("\\s", "g"); + +interface MetricsDataProps { + defaultSql: string; + scopes: IScope[]; + onSelectionChanged: (selection: IScope[]) => void; +} + +export const MetricsSQL: React.FunctionComponent = ({ + defaultSql, + scopes, + onSelectionChanged +}) => { + + const cleanScopes = React.useMemo(() => { + return scopes.map(scope => { + const retVal = { ...scope }; + delete retVal.__children; + return retVal; + }); + }, [scopes]); + + const connection = useDuckDBConnection(cleanScopes, "metrics"); + const [schema, setSchema] = React.useState([]); + const [sql, setSql] = React.useState(defaultSql); + const [sqlError, setSqlError] = React.useState(); + const [dirtySql, setDirtySql] = React.useState(sql); + const [data, setData] = React.useState([]); + + // Grid --- + const columns = React.useMemo((): string[] => { + const retVal: string[] = []; + schema.forEach(col => { + retVal.push(col.column_name); + }); + return retVal; + }, [schema]); + + const scopesTable = useConst(() => new Table() + .multiSelect(true) + .sortable(true) + .noDataMessage(nlsHPCC.loadingMessage) + .on("click", debounce((row, col, sel) => { + if (sel) { + onSelectionChanged(scopesTable.selection()); + } + }, 100)) + ); + + React.useEffect(() => { + if (columns.length === 0 && data.length === 0 && sqlError) { + scopesTable + .columns(["Error"]) + .data(sqlError.message.split("\n").map(line => { + if (line.indexOf("LINE") === 0) { + } else if (line.includes("^")) { + line = line.replace(spaceRegex, " "); + } + return [line]; + })) + .lazyRender() + ; + } else { + scopesTable + .columns(["##"]) // Reset hash to force recalculation of default widths + .columns(["##", ...columns]) + .data(data.map((row, idx) => [idx + 1, ...row])) + .lazyRender() + ; + } + }, [columns, data, sqlError, scopesTable]); + + // Query --- + React.useEffect(() => { + if (cleanScopes.length === 0) { + setSchema([]); + setData([]); + } else if (connection) { + connection.query(`DESCRIBE ${sql}`).then(result => { + if (connection) { + setSchema(result.toArray().map((row) => row.toJSON())); + } + }).catch(e => { + setSchema([]); + }); + + setSqlError(undefined); + connection.query(sql).then(result => { + if (connection) { + setData(result.toArray().map((row) => { + return row.toArray(); + })); + } + }).catch(e => { + setSqlError(e); + setData([]); + }).finally(() => { + scopesTable.noDataMessage(nlsHPCC.noDataMessage); + }); + } + }, [cleanScopes.length, connection, scopesTable, sql]); + + // Selection --- + const onChange = React.useCallback((newSql: string) => { + setDirtySql(newSql); + }, []); + + const onFetchHints = React.useCallback((cm, option): Promise => { + const cursor = cm.getCursor(); + const lineStr = cm.getLine(cursor.line); + let lineEnd = cursor.ch; + let end = cm.indexFromPos({ line: cursor.line, ch: lineEnd }); + if (connection) { + return connection.query(`SELECT * FROM sql_auto_complete("${dirtySql.substring(0, end)}")`).then(result => { + if (connection) { + const hints = result.toArray().map((row) => row.toJSON()); + while (lineEnd < lineStr.length && /\w/.test(lineStr.charAt(lineEnd))) ++lineEnd; + end = cm.indexFromPos({ line: cursor.line, ch: lineEnd }); + const suggestion_start = hints.length ? hints[0].suggestion_start : end; + return { + list: hints.map(row => row.suggestion), + from: cm.posFromIndex(suggestion_start), + to: cm.posFromIndex(end) + }; + } + }).catch(e => { + return Promise.resolve(null); + }); + } + return Promise.resolve(null); + }, [connection, dirtySql]); + + const onSubmit = React.useCallback(() => { + setSql(dirtySql); + }, [dirtySql]); + + const onCopy = React.useCallback(() => { + const tsv = scopesTable.export("TSV"); + navigator?.clipboard?.writeText(tsv); + }, [scopesTable]); + + const onDownload = React.useCallback(() => { + const csv = scopesTable.export("CSV"); + Utility.downloadCSV(csv, "metrics.csv"); + }, [scopesTable]); + + return +
+ +
+ setSql(dirtySql)} /> + + + + } + main={} + />; +}; diff --git a/esp/src/src-react/components/SourceEditor.tsx b/esp/src/src-react/components/SourceEditor.tsx index 6a367c39c71..5e070c6f419 100644 --- a/esp/src/src-react/components/SourceEditor.tsx +++ b/esp/src/src-react/components/SourceEditor.tsx @@ -1,7 +1,7 @@ import * as React from "react"; import { CommandBar, ContextualMenuItemType, ICommandBarItemProps } from "@fluentui/react"; import { useConst, useOnEvent } from "@fluentui/react-hooks"; -import { Editor, ECLEditor, XMLEditor, JSONEditor } from "@hpcc-js/codemirror"; +import { Editor, ECLEditor, XMLEditor, JSONEditor, SQLEditor, ICompletion } from "@hpcc-js/codemirror"; import { Workunit } from "@hpcc-js/comms"; import nlsHPCC from "src/nlsHPCC"; import { HolyGrail } from "../layouts/HolyGrail"; @@ -12,7 +12,30 @@ import { ShortVerticalDivider } from "./Common"; import "eclwatch/css/cmDarcula.css"; -type ModeT = "ecl" | "xml" | "json" | "text"; +type ModeT = "ecl" | "xml" | "json" | "text" | "sql"; + +class SQLEditorEx extends SQLEditor { + + constructor() { + super(); + } + + enter(domNode, element) { + super.enter(domNode, element); + this.option("extraKeys", { + "Ctrl-Enter": cm => { + this.submit(); + }, + "Ctrl-S": cm => { + this.submit(); + } + + } as any); + } + + submit() { + } +} function newEditor(mode: ModeT) { switch (mode) { @@ -22,6 +45,8 @@ function newEditor(mode: ModeT) { return new XMLEditor(); case "json": return new JSONEditor(); + case "sql": + return new SQLEditorEx(); case "text": default: return new Editor(); @@ -32,14 +57,20 @@ interface SourceEditorProps { mode?: ModeT; text?: string; readonly?: boolean; - onChange?: (text: string) => void; + toolbar?: boolean; + onTextChange?: (text: string) => void; + onFetchHints?: (cm: any, option: any) => Promise; + onSubmit?: () => void; } export const SourceEditor: React.FunctionComponent = ({ mode = "text", text = "", readonly = false, - onChange = (text: string) => { } + toolbar = true, + onTextChange = (text: string) => { }, + onFetchHints, + onSubmit }) => { const { isDark } = useUserTheme(); @@ -55,23 +86,33 @@ export const SourceEditor: React.FunctionComponent = ({ { key: "divider_1", itemType: ContextualMenuItemType.Divider, onRender: () => }, ]; - const editor = useConst(() => newEditor(mode) - .on("changes", () => { - onChange(editor.text()); - }) - ); + const editor = useConst(() => newEditor(mode)); React.useEffect(() => { - editor.option("theme", isDark ? "darcula" : "default"); - if (editor.text() !== text) { - editor.text(text); - } + editor + .on("changes", onTextChange ? () => onTextChange(editor.text()) : undefined, true) + ; + }, [editor, onTextChange]); + React.useEffect(() => { editor - .readOnly(readonly) - .lazyRender() + .showHints(onFetchHints !== undefined) + .on("fetchHints", (cm, option) => { + if (onFetchHints) { + return onFetchHints(cm, option); + } + return Promise.resolve(null); + }, true) ; - }, [editor, text, readonly, isDark]); + }, [editor, onFetchHints]); + + React.useEffect(() => { + if (onSubmit) { + editor + .on("submit", onSubmit ? () => onSubmit() : undefined, true) + ; + } + }, [editor, onSubmit]); const handleThemeToggle = React.useCallback((evt) => { if (!editor) return; @@ -83,8 +124,20 @@ export const SourceEditor: React.FunctionComponent = ({ }, [editor]); useOnEvent(document, "eclwatch-theme-toggle", handleThemeToggle); + React.useEffect(() => { + editor.option("theme", isDark ? "darcula" : "default"); + if (editor.text() !== text) { + editor.text(text); + } + + editor + .readOnly(readonly) + .lazyRender() + ; + }, [editor, text, readonly, isDark]); + return } + header={toolbar && } main={ } @@ -144,7 +197,7 @@ export const JSONSourceEditor: React.FunctionComponent = } }, [onChange]); - return ; + return ; }; export interface WUXMLSourceEditorProps { @@ -243,3 +296,21 @@ export const FetchEditor: React.FunctionComponent = ({ return ; }; +interface SQLSourceEditorProps { + sql: string; + toolbar?: boolean; + onSqlChange?: (sql: string) => void; + onFetchHints?: (cm: any, option: any) => Promise; + onSubmit?: () => void; +} + +export const SQLSourceEditor: React.FunctionComponent = ({ + sql, + toolbar, + onSqlChange, + onFetchHints, + onSubmit +}) => { + return ; +}; + diff --git a/esp/src/src-react/components/WorkunitDetails.tsx b/esp/src/src-react/components/WorkunitDetails.tsx index 6d790b184a8..d7e84120d13 100644 --- a/esp/src/src-react/components/WorkunitDetails.tsx +++ b/esp/src/src-react/components/WorkunitDetails.tsx @@ -1,5 +1,5 @@ import * as React from "react"; -import { Icon } from "@fluentui/react"; +import { Icon, Shimmer } from "@fluentui/react"; import { WsWorkunits, WorkunitsService } from "@hpcc-js/comms"; import { scopedLogger } from "@hpcc-js/util"; import { SizeMe } from "react-sizeme"; @@ -16,7 +16,6 @@ import { Helpers } from "./Helpers"; import { IFrame } from "./IFrame"; import { Logs } from "./Logs"; import { useNextPrev } from "./Menu"; -import { Metrics } from "./Metrics"; import { Queries } from "./Queries"; import { Resources } from "./Resources"; import { Result } from "./Result"; @@ -29,6 +28,8 @@ import { WorkunitSummary } from "./WorkunitSummary"; import { TabInfo, DelayLoadedPanel, OverflowTabList } from "./controls/TabbedPanes/index"; import { ECLArchive } from "./ECLArchive"; +const Metrics = React.lazy(() => import("./Metrics").then(mod => ({ default: mod.Metrics }))); + const logger = scopedLogger("src-react/components/WorkunitDetails.tsx"); const workunitService = new WorkunitsService({ baseUrl: "" }); @@ -197,7 +198,16 @@ export const WorkunitDetails: React.FunctionComponent = ({ - + + + + + + + }> + + diff --git a/esp/src/src-react/hooks/duckdb.ts b/esp/src/src-react/hooks/duckdb.ts new file mode 100644 index 00000000000..75bfa8dd5cd --- /dev/null +++ b/esp/src/src-react/hooks/duckdb.ts @@ -0,0 +1,59 @@ +import * as React from "react"; +import { DuckDB } from "@hpcc-js/wasm/dist/duckdb"; + +type AsyncDuckDB = any; +type AsyncDuckDBConnection = any; + +export function useDuckDB(): [AsyncDuckDB] { + + const [db, setDb] = React.useState(); + + React.useEffect(() => { + const duckdb = DuckDB.load().then(duckdb => { + setDb(duckdb.db); + return duckdb; + }); + + return () => { + duckdb?.db?.close(); + }; + }, []); + + return [db]; +} + +export function useDuckDBConnection(scopes: T, name: string): AsyncDuckDBConnection | undefined { + + const [db] = useDuckDB(); + const [connection, setConnection] = React.useState(undefined); + + React.useEffect(() => { + let c: AsyncDuckDBConnection | undefined; + if (db) { + db.connect().then(async connection => { + await db.registerFileText(`${name}.json`, JSON.stringify(scopes)); + await connection.insertJSONFromPath(`${name}.json`, { name }); + await connection.close(); + c = await db.connect(); + try { // TODO: Move to @hpcc-js/wasm + await c.query("LOAD autocomplete").catch(e => { + console.log(e.message); + }); + } catch (e) { + console.log(e.message); + } + setConnection(c); + }); + } + return () => { + try { + c?.query(`DROP TABLE ${name}`); + } finally { + c?.close(); + } + + }; + }, [db, name, scopes]); + + return connection; +} diff --git a/esp/src/src-react/hooks/metrics.ts b/esp/src/src-react/hooks/metrics.ts index d377997ac92..24d4a5341e4 100644 --- a/esp/src/src-react/hooks/metrics.ts +++ b/esp/src/src-react/hooks/metrics.ts @@ -7,19 +7,35 @@ import { useWorkunit } from "./workunit"; import { useQuery } from "./query"; import { useCounter } from "./util"; -const logger = scopedLogger("src-react\hooks\metrics.ts"); +const logger = scopedLogger("src-react/hooks/metrics.ts"); -const defaults = { - scopeTypes: ["graph", "subgraph", "activity", "edge"], +const MetricOptionsVersion = 2; + +export interface MetricsOptions { + scopeTypes: string[]; + properties: string[]; + ignoreGlobalStoreOutEdges: boolean; + subgraphTpl; + activityTpl; + edgeTpl; + sql: string; + layout?: object; + showTimeline: boolean; +} + +const defaults: MetricsOptions = { + scopeTypes: ["graph", "subgraph", "activity", "operation", "workflow"], properties: ["TimeElapsed"], ignoreGlobalStoreOutEdges: true, subgraphTpl: "%id% - %TimeElapsed%", activityTpl: "%Label%", edgeTpl: "%Label%\n%NumRowsProcessed%\n%SkewMinRowsProcessed% / %SkewMaxRowsProcessed%", - layout: undefined + sql: "SELECT type, name, TimeElapsed, id\n FROM metrics\n WHERE TimeElapsed IS NOT NULL", + layout: undefined, + showTimeline: true }; -const options = { ...defaults }; +const options: MetricsOptions = { ...defaults }; function checkLayout(options: MetricsOptions): boolean { if (options?.layout && !options?.layout?.["main"]) { @@ -28,16 +44,6 @@ function checkLayout(options: MetricsOptions): boolean { return !!options?.layout; } -export interface MetricsOptions { - scopeTypes: string[]; - properties: string[]; - ignoreGlobalStoreOutEdges: boolean; - subgraphTpl; - activityTpl; - edgeTpl; - layout?: object -} - export function useMetricsOptions(): [MetricsOptions, (opts: MetricsOptions) => void, () => void, (toDefaults?: boolean) => void] { const store = useConst(() => userKeyValStore()); @@ -52,7 +58,7 @@ export function useMetricsOptions(): [MetricsOptions, (opts: MetricsOptions) => const save = React.useCallback(() => { if (checkLayout(options)) { - store?.set("MetricOptions", JSON.stringify(options), true); + store?.set(`MetricOptions-${MetricOptionsVersion}`, JSON.stringify(options), true); } }, [store]); @@ -60,7 +66,7 @@ export function useMetricsOptions(): [MetricsOptions, (opts: MetricsOptions) => if (toDefaults) { setOptions({ ...defaults }); } else { - store?.get("MetricOptions").then(opts => { + store?.get(`MetricOptions-${MetricOptionsVersion}`).then(opts => { const options = JSON.parse(opts); checkLayout(options); setOptions({ ...defaults, ...options }); diff --git a/esp/src/src-react/layouts/DockPanel.tsx b/esp/src/src-react/layouts/DockPanel.tsx index af586ee5919..8ebea0e1c74 100644 --- a/esp/src/src-react/layouts/DockPanel.tsx +++ b/esp/src/src-react/layouts/DockPanel.tsx @@ -51,8 +51,6 @@ export class ReactWidget extends HTMLWidget { this._div = element.append("div"); } - private _prevWidth; - private _prevHeight; update(domNode, element) { super.update(domNode, element); this._div @@ -66,13 +64,6 @@ export class ReactWidget extends HTMLWidget { , this._div.node() ); - - // TODO: Hack to make command bar resize... - if (this._prevWidth !== this.width() || this._prevHeight !== this.height()) { - this._prevWidth = this.width(); - this._prevHeight = this.height(); - window.dispatchEvent(new Event("resize")); - } } exit(domNode, element) { diff --git a/esp/src/src/nls/hpcc.ts b/esp/src/src/nls/hpcc.ts index 8588cf18c54..d6b2c3aad70 100644 --- a/esp/src/src/nls/hpcc.ts +++ b/esp/src/src/nls/hpcc.ts @@ -547,6 +547,7 @@ export = { Methods: "Methods", Metrics: "Metrics", MetricsGraph: "Metrics/Graph", + MetricsSQL: "Metrics (SQL)", Min: "Min", Mine: "Mine", MinNode: "Min Node", @@ -884,6 +885,7 @@ export = { Spill: "Spill", SplitPrefix: "Split Prefix", Spray: "Spray", + SQL: "SQL", Start: "Start", Started: "Started", Starting: "Starting", diff --git a/esp/src/tsconfig.json b/esp/src/tsconfig.json index dc6b41ab0ae..55fd90ef44b 100644 --- a/esp/src/tsconfig.json +++ b/esp/src/tsconfig.json @@ -3,9 +3,9 @@ "baseUrl": ".", "outDir": "./lib", "declarationDir": "./types", - "target": "es5", - "module": "amd", - "moduleResolution": "node", + "target": "ES5", + "module": "AMD", + "moduleResolution": "Node", "allowSyntheticDefaultImports": true, "sourceMap": true, "declaration": true, @@ -22,12 +22,16 @@ "downlevelIteration": true, "jsx": "react", "lib": [ - "dom", - "es2019" + "DOM", + "ES2019" ], "typeRoots": [], "types": [], "paths": { + "@hpcc-js/wasm": [ + "./node_modules/@hpcc-js/wasm", + "../../../hpcc-js-wasm" + ], "@hpcc-js/*": [ "./node_modules/@hpcc-js/*", "../../../hpcc-js/packages/*", diff --git a/esp/src/webpack.config.js b/esp/src/webpack.config.js index 3d15d932e6a..9365d0d16e8 100644 --- a/esp/src/webpack.config.js +++ b/esp/src/webpack.config.js @@ -88,8 +88,12 @@ module.exports = function (env) { }, resolve: { alias: { + "@hpcc-js/wasm/dist/duckdb": path.resolve(__dirname, "node_modules/@hpcc-js/wasm/dist/duckdb.js"), }, fallback: { + "@hpcc-js/wasm": [ + path.resolve(__dirname, "../../../hpcc-js-wasm"), + ], "@hpcc-js": [ path.resolve(__dirname, "../../../hpcc-js/packages"), path.resolve(__dirname, "../../../Visualization/packages") @@ -101,6 +105,7 @@ module.exports = function (env) { modules: ["node_modules"] }, + target: "web", mode: isProduction ? "production" : "development", devtool: isProduction ? undefined : "cheap-module-source-map", From 5187b4edb6fb57e55872ea6e30c175fafb451672 Mon Sep 17 00:00:00 2001 From: Gavin Halliday Date: Thu, 20 Jun 2024 16:09:30 +0100 Subject: [PATCH 118/151] HPCC-30433 Add match statistics to join activities in thor Signed-off-by: Gavin Halliday --- system/jlib/jstatcodes.h | 4 + system/jlib/jstats.cpp | 7 ++ .../hashdistrib/thhashdistribslave.cpp | 2 + thorlcr/activities/join/thjoinslave.cpp | 2 + .../lookupjoin/thlookupjoinslave.cpp | 19 ++++- thorlcr/activities/msort/thsortu.cpp | 78 ++++++++++++++++++- thorlcr/activities/msort/thsortu.hpp | 41 ++++++++-- .../activities/selfjoin/thselfjoinslave.cpp | 9 ++- thorlcr/thorutil/thormisc.cpp | 6 +- thorlcr/thorutil/thormisc.hpp | 1 + 10 files changed, 155 insertions(+), 14 deletions(-) diff --git a/system/jlib/jstatcodes.h b/system/jlib/jstatcodes.h index 9995b70ddfd..aa133ad5ba2 100644 --- a/system/jlib/jstatcodes.h +++ b/system/jlib/jstatcodes.h @@ -307,6 +307,10 @@ enum StatisticKind StSizeRemoteWrite, StSizePeakTempDisk, StSizePeakEphemeralDisk, + StNumMatchLeftRowsMax, + StNumMatchRightRowsMax, + StNumMatchCandidates, + StNumMatchCandidatesMax, StMax, //For any quantity there is potentially the following variants. diff --git a/system/jlib/jstats.cpp b/system/jlib/jstats.cpp index 57733e6c259..92f02d1bdec 100644 --- a/system/jlib/jstats.cpp +++ b/system/jlib/jstats.cpp @@ -979,6 +979,10 @@ static const constexpr StatisticMeta statsMetaData[StMax] = { { SIZESTAT(RemoteWrite), "Size of data sent to remote workers"}, { PEAKSIZESTAT(PeakTempDisk), "High water mark for temporary files"}, { PEAKSIZESTAT(PeakEphemeralDisk), "High water mark for emphemeral storage use"}, + { NUMSTAT(MatchLeftRowsMax), "The largest number of left rows in a join group" }, + { NUMSTAT(MatchRightRowsMax), "The largest number of right rows in a join group" }, + { NUMSTAT(MatchCandidates), "The number of candidate combinations of left and right rows forming join groups" }, + { NUMSTAT(MatchCandidatesMax), "The largest number of candidate combinations of left and right rows in a single group" }, }; static MapStringTo statisticNameMap(true); @@ -3105,6 +3109,9 @@ static bool isWorthReportingMergedValue(StatisticKind kind) { case StSizePeakMemory: case StSizePeakRowMemory: + case StNumMatchLeftRowsMax: + case StNumMatchRightRowsMax: + case StNumMatchCandidatesMax: //These only make sense for individual nodes, the aggregated value is meaningless return false; } diff --git a/thorlcr/activities/hashdistrib/thhashdistribslave.cpp b/thorlcr/activities/hashdistrib/thhashdistribslave.cpp index 0b814f94b63..61a49ced9a9 100644 --- a/thorlcr/activities/hashdistrib/thhashdistribslave.cpp +++ b/thorlcr/activities/hashdistrib/thhashdistribslave.cpp @@ -4042,6 +4042,7 @@ class HashJoinSlaveActivity : public CSlaveActivity, implements IStopInput strmR.clear(); { CriticalBlock b(joinHelperCrit); + joinhelper->gatherStats(inactiveStats); joinhelper.clear(); } PARENT::stop(); @@ -4087,6 +4088,7 @@ class HashJoinSlaveActivity : public CSlaveActivity, implements IStopInput } else { + joinhelper->gatherStats(activeStats); activeStats.setStatistic(StNumLeftRows, joinhelper->getLhsProgress()); activeStats.setStatistic(StNumRightRows, joinhelper->getRhsProgress()); } diff --git a/thorlcr/activities/join/thjoinslave.cpp b/thorlcr/activities/join/thjoinslave.cpp index f7a07ee9f69..f2262256ab7 100644 --- a/thorlcr/activities/join/thjoinslave.cpp +++ b/thorlcr/activities/join/thjoinslave.cpp @@ -378,6 +378,7 @@ class JoinSlaveActivity : public CSlaveActivity, implements ILookAheadStopNotify rhsProgressCount = joinhelper->getRhsProgress(); { CriticalBlock b(joinHelperCrit); + joinhelper->gatherStats(inactiveStats); joinhelper.clear(); } ActPrintLog("SortJoinSlaveActivity::stop"); @@ -627,6 +628,7 @@ class JoinSlaveActivity : public CSlaveActivity, implements ILookAheadStopNotify } else { + joinhelper->gatherStats(activeStats); activeStats.setStatistic(StNumLeftRows, joinhelper->getLhsProgress()); if (!isSelfJoin) activeStats.setStatistic(StNumRightRows, joinhelper->getRhsProgress()); diff --git a/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp b/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp index 09662742d09..3a0da40d7f3 100644 --- a/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp +++ b/thorlcr/activities/lookupjoin/thlookupjoinslave.cpp @@ -806,6 +806,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, { typedef CSlaveActivity PARENT; + JoinMatchStats matchStats; Owned leftexception; bool eos, eog, someSinceEog; @@ -949,6 +950,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, unsigned keepLimit; unsigned joined; unsigned joinCounter; + unsigned candidateCounter; OwnedConstThorRow defaultLeft; bool leftMatch, grouped; @@ -1165,10 +1167,12 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, inline const void *denormalizeNextRow() { ConstPointerArray filteredRhs; + unsigned candidates = 0; while (rhsNext) { if (abortSoon) return NULL; + candidates++; if (!fuzzyMatch || (HELPERBASE::match(leftRow, rhsNext))) { leftMatch = true; @@ -1187,6 +1191,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, } rhsNext = tableProxy->getNextRHS(currentHashEntry); // NB: currentHashEntry only used for Lookup,Many case } + matchStats.noteGroup(1, candidates); if (filteredRhs.ordinality() || (!leftMatch && 0!=(flags & JFleftouter))) { unsigned rcCount = 0; @@ -1238,6 +1243,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, { leftRow.setown(left->nextRow()); joinCounter = 0; + candidateCounter = 0; if (leftRow) { eog = false; @@ -1273,6 +1279,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, RtlDynamicRowBuilder rowBuilder(allocator); while (rhsNext) { + candidateCounter++; if (!fuzzyMatch || HELPERBASE::match(leftRow, rhsNext)) { leftMatch = true; @@ -1289,12 +1296,15 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, rhsNext = NULL; else rhsNext = tableProxy->getNextRHS(currentHashEntry); // NB: currentHashEntry only used for Lookup,Many case + if (!rhsNext) + matchStats.noteGroup(1, candidateCounter); return row.getClear(); } } } rhsNext = tableProxy->getNextRHS(currentHashEntry); // NB: currentHashEntry used for Lookup,Many or All cases } + matchStats.noteGroup(1, candidateCounter); if (!leftMatch && NULL == rhsNext && 0!=(flags & JFleftouter)) { size32_t sz = HELPERBASE::joinTransform(rowBuilder, leftRow, defaultRight, 0, JTFmatchedleft); @@ -1330,6 +1340,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, joined = 0; joinCounter = 0; + candidateCounter = 0; leftMatch = false; returnMany = false; @@ -1472,6 +1483,7 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, { joined = 0; joinCounter = 0; + candidateCounter = 0; leftMatch = false; rhsNext = NULL; @@ -1631,6 +1643,11 @@ class CInMemJoinBase : public CSlaveActivity, public CAllOrLookupHelper, { ActPrintLog("LHS input finished, %" RCPF "d rows read", count); } + virtual void gatherActiveStats(CRuntimeStatisticCollection &activeStats) const override + { + PARENT::gatherActiveStats(activeStats); + matchStats.gatherStats(activeStats); + } }; @@ -3359,7 +3376,7 @@ class CAllJoinSlaveActivity : public CInMemJoinBase } } public: - CAllJoinSlaveActivity(CGraphElementBase *_container) : PARENT(_container) + CAllJoinSlaveActivity(CGraphElementBase *_container) : PARENT(_container, allJoinActivityStatistics) { returnMany = true; } diff --git a/thorlcr/activities/msort/thsortu.cpp b/thorlcr/activities/msort/thsortu.cpp index 0caf778d494..0e2aafc0033 100644 --- a/thorlcr/activities/msort/thsortu.cpp +++ b/thorlcr/activities/msort/thsortu.cpp @@ -276,6 +276,7 @@ void swapRows(RtlDynamicRowBuilder &row1, RtlDynamicRowBuilder &row2) row1.swapWith(row2); } + class CJoinHelper : implements IJoinHelper, public CSimpleInterface { CActivityBase &activity; @@ -314,11 +315,13 @@ class CJoinHelper : implements IJoinHelper, public CSimpleInterface OwnedConstThorRow defaultRight; Linked strmL; Linked strmR; + JoinMatchStats matchStats; bool abort = false; bool nextleftgot = false; bool nextrightgot = false; unsigned atmost = (unsigned)-1; rowcount_t lhsProgressCount = 0, rhsProgressCount = 0; + rowcount_t startMatchLhsProgressCount = 0; unsigned keepmax = (unsigned)-1; unsigned abortlimit = (unsigned)-1; unsigned keepremaining = (unsigned)-1; @@ -819,8 +822,16 @@ class CJoinHelper : implements IJoinHelper, public CSimpleInterface } } while (state == JSonfail); + //We have read a row that does not match, so decrement by 1 to get the count for the row that mismatched + { + //Nested scope to avoid problems with variable leaking into the following case + rowcount_t nextStartMatchLhsProgressCount = lhsProgressCount - 1; + matchStats.noteGroup(nextStartMatchLhsProgressCount - startMatchLhsProgressCount, 0); + startMatchLhsProgressCount = nextStartMatchLhsProgressCount; + } // fall through case JScompare: + //Need to create a new match group when the right has been completely processed if (getL()) { rightidx = 0; rightgroupmatched = NULL; @@ -896,14 +907,29 @@ class CJoinHelper : implements IJoinHelper, public CSimpleInterface if (!hitatmost&&rightgroup.ordinality()) state = JSmatch; else if (cmp<0) + { + //Left row and no match right row + matchStats.noteGroup(1, 0); // This will not spot large left groups + startMatchLhsProgressCount = lhsProgressCount; ret.setown(outrow(Onext,Oouter)); + } else + { + //Right row with no matching left rows. + //This will not spot large right groups since it processes a row at a time + matchStats.noteGroup(0, 1); ret.setown(outrow(Oouter,Onext)); + } } } - else if (getR()) + else if (getR()) + { + //We would miss tracking a very large trailing right group, but it is not worth + //the extra work to spot it + //FUTURE: if (!rightouter) we could return null and stop reading the rhs. ret.setown(outrow(Oouter,Onext)); + } else return NULL; break; @@ -920,6 +946,7 @@ class CJoinHelper : implements IJoinHelper, public CSimpleInterface nextL(); } mcoreintercept->addWork(&leftgroup,&rightgroup); + startMatchLhsProgressCount = (lhsProgressCount - 1); // Never used, but keep consistent with other cases state = JScompare; } else if (rightidxdocompare(nextleft,prevleft); - if (cmp>0) + if (cmp>0) + { + //Finished processing this group -> gather the stats for the number of join candidates. + //lhsProgressCount is one higher than the the row count that follows the end of group + rowcount_t numLeftRows = (lhsProgressCount - 1) - startMatchLhsProgressCount; + matchStats.noteGroup(numLeftRows, rightgroup.ordinality()); + startMatchLhsProgressCount = (lhsProgressCount - 1); state = JSrightgrouponly; + } else if (cmp<0) { activity.logRow("prev: ", *allocatorL->queryOutputMeta(), prevleft); @@ -942,10 +976,17 @@ class CJoinHelper : implements IJoinHelper, public CSimpleInterface } } else + { + //Finished processing this group -> gather the stats for the number of join candidates. + rowcount_t numLeftRows = lhsProgressCount - startMatchLhsProgressCount; + matchStats.noteGroup(numLeftRows, rightgroup.ordinality()); + startMatchLhsProgressCount = lhsProgressCount; state = JSrightgrouponly; + } } break; - case JSrightgrouponly: + case JSrightgrouponly: + //FUTURE: Avoid walking the right group if it is an inner/left only join. // right group if (rightidx INITIAL_SELFJOIN_MATCH_WARNING_LEVEL) { Owned e = MakeActivityWarning(&activity, TE_SelfJoinMatchWarning, "Exceeded initial match limit"); e->queryData().append((unsigned)curgroup.ordinality()); @@ -1299,6 +1348,12 @@ class SelfJoinHelper: implements IJoinHelper, public CSimpleInterface virtual void stop() { abort = true; } virtual rowcount_t getLhsProgress() const { return progressCount; } virtual rowcount_t getRhsProgress() const { return progressCount; } + virtual void gatherStats(CRuntimeStatisticCollection & stats) const override + { + //Left and right progress could be added here. + matchStats.gatherStats(stats); + } + }; IJoinHelper *createDenormalizeHelper(CActivityBase &activity, IHThorDenormalizeArg *helper, IThorRowInterfaces *rowIf) @@ -1464,7 +1519,7 @@ class CMultiCoreJoinHelperBase: implements IJoinHelper, implements IMulticoreInt Owned exc; CriticalSection sect; bool eos, selfJoin; - + JoinMatchStats matchStats; void setException(IException *e,const char *title) { @@ -1561,6 +1616,18 @@ class CMultiCoreJoinHelperBase: implements IJoinHelper, implements IMulticoreInt } } + void noteGroupSizes(CThorExpandingRowArray *lgroup,CThorExpandingRowArray *rgroup) + { + rowidx_t numLeft = lgroup ? lgroup->ordinality() : 0; + rowidx_t numRight = lgroup ? lgroup->ordinality() : 0; + matchStats.noteGroup(numLeft, numRight); + } + + virtual void gatherStats(CRuntimeStatisticCollection & stats) const override + { + matchStats.gatherStats(stats); + } + CMultiCoreJoinHelperBase(CActivityBase &_activity, unsigned numthreads, bool _selfJoin, IJoinHelper *_jhelper, IHThorJoinArg *_helper, IThorRowInterfaces *_rowIf) : activity(_activity), rowIf(_rowIf) { @@ -1804,6 +1871,8 @@ class CMultiCoreJoinHelper: public CMultiCoreJoinHelperBase * The pull side, also pulls from the workers in sequence * This ensures the output is return in input order. */ + noteGroupSizes(lgroup, rgroup); + cWorker *worker = workers[curin]; worker->workready.wait(); workers[curin]->work.set(lgroup,rgroup); @@ -1987,6 +2056,7 @@ class CMultiCoreUnorderedJoinHelper: public CMultiCoreJoinHelperBase // IMulticoreIntercept impl. virtual void addWork(CThorExpandingRowArray *lgroup,CThorExpandingRowArray *rgroup) { + noteGroupSizes(lgroup, rgroup); cWorkItem *item = new cWorkItem(activity, lgroup, rgroup); workqueue.enqueue(item); } diff --git a/thorlcr/activities/msort/thsortu.hpp b/thorlcr/activities/msort/thsortu.hpp index eef6aa1c929..006b3f4c7b6 100644 --- a/thorlcr/activities/msort/thsortu.hpp +++ b/thorlcr/activities/msort/thsortu.hpp @@ -60,18 +60,49 @@ interface IJoinHelper: public IRowStream virtual rowcount_t getRhsProgress() const = 0; virtual const void *nextRow() = 0; virtual void stop() = 0; + virtual void gatherStats(CRuntimeStatisticCollection & stats) const = 0; }; IJoinHelper *createJoinHelper(CActivityBase &activity, IHThorJoinArg *helper, IThorRowInterfaces *rowIf, bool parallelmatch, bool unsortedoutput); IJoinHelper *createSelfJoinHelper(CActivityBase &activity, IHThorJoinArg *helper, IThorRowInterfaces *rowIf, bool parallelmatch, bool unsortedoutput); IJoinHelper *createDenormalizeHelper(CActivityBase &activity, IHThorDenormalizeArg *helper, IThorRowInterfaces *rowIf); - - ILimitedCompareHelper *createLimitedCompareHelper(); - - - +//Included here so this can be shared between join and lookup join. +class JoinMatchStats +{ +public: + void gatherStats(CRuntimeStatisticCollection & stats) const + { + //Left and right progress could be added here. + if (maxLeftGroupSize) + stats.addStatistic(StNumMatchLeftRowsMax, maxLeftGroupSize); + if (maxRightGroupSize) + stats.addStatistic(StNumMatchRightRowsMax, maxRightGroupSize); + if (numMatchCandidates) + stats.addStatistic(StNumMatchCandidates, numMatchCandidates); + if (maxMatchCandidates) + stats.addStatistic(StNumMatchCandidatesMax, maxMatchCandidates); + } + + void noteGroup(rowcount_t numLeft, rowcount_t numRight) + { + rowcount_t numCandidates = numLeft * numRight; + if (numLeft > maxLeftGroupSize) + maxLeftGroupSize = numLeft; + if (numRight > maxRightGroupSize) + maxRightGroupSize = numRight; + numMatchCandidates += numCandidates; + if (numCandidates > maxMatchCandidates) + maxMatchCandidates = numCandidates; + } + +public: + stat_type maxLeftGroupSize = 0; + stat_type maxRightGroupSize = 0; + stat_type numMatchCandidates = 0; + stat_type maxMatchCandidates = 0; +}; #endif diff --git a/thorlcr/activities/selfjoin/thselfjoinslave.cpp b/thorlcr/activities/selfjoin/thselfjoinslave.cpp index 8a951a8fe06..5b76eba370a 100644 --- a/thorlcr/activities/selfjoin/thselfjoinslave.cpp +++ b/thorlcr/activities/selfjoin/thselfjoinslave.cpp @@ -195,6 +195,7 @@ class SelfJoinSlaveActivity : public CSlaveActivity } { CriticalBlock b(joinHelperCrit); + joinhelper->gatherStats(inactiveStats); joinhelper.clear(); } if (strm) @@ -231,8 +232,12 @@ class SelfJoinSlaveActivity : public CSlaveActivity { PARENT::gatherActiveStats(activeStats); CriticalBlock b(joinHelperCrit); - rowcount_t p = joinhelper?joinhelper->getLhsProgress():0; - activeStats.setStatistic(StNumLeftRows, p); + if (joinhelper) + { + joinhelper->gatherStats(activeStats); + rowcount_t p = joinhelper->getLhsProgress(); + activeStats.setStatistic(StNumLeftRows, p); + } mergeStats(activeStats, sorter, spillStatistics); // No danger of a race with reset() because that never replaces a valid sorter } }; diff --git a/thorlcr/thorutil/thormisc.cpp b/thorlcr/thorutil/thormisc.cpp index 48dc1231ee1..9bed2fdc36b 100644 --- a/thorlcr/thorutil/thormisc.cpp +++ b/thorlcr/thorutil/thormisc.cpp @@ -83,8 +83,10 @@ const StatisticsMapping indexReadActivityStatistics({StNumRowsProcessed}, indexR const StatisticsMapping indexWriteActivityStatistics({StPerReplicated, StNumLeafCacheAdds, StNumNodeCacheAdds, StNumBlobCacheAdds }, basicActivityStatistics, diskWriteRemoteStatistics); const StatisticsMapping keyedJoinActivityStatistics({ StNumIndexAccepted, StNumPreFiltered, StNumDiskSeeks, StNumDiskAccepted, StNumDiskRejected}, basicActivityStatistics, indexReadFileStatistics); const StatisticsMapping loopActivityStatistics({StNumIterations}, basicActivityStatistics); -const StatisticsMapping lookupJoinActivityStatistics({StNumSmartJoinSlavesDegradedToStd, StNumSmartJoinDegradedToLocal}, spillStatistics, basicActivityStatistics); -const StatisticsMapping joinActivityStatistics({StNumLeftRows, StNumRightRows}, basicActivityStatistics, spillStatistics); +const StatisticsMapping commonJoinActivityStatistics({StNumMatchLeftRowsMax, StNumMatchRightRowsMax, StNumMatchCandidates, StNumMatchCandidatesMax}, basicActivityStatistics); +const StatisticsMapping allJoinActivityStatistics({}, commonJoinActivityStatistics); +const StatisticsMapping lookupJoinActivityStatistics({StNumSmartJoinSlavesDegradedToStd, StNumSmartJoinDegradedToLocal}, spillStatistics, commonJoinActivityStatistics); +const StatisticsMapping joinActivityStatistics({StNumLeftRows, StNumRightRows}, commonJoinActivityStatistics, spillStatistics); const StatisticsMapping diskReadActivityStatistics({StNumDiskRowsRead, }, basicActivityStatistics, diskReadRemoteStatistics); const StatisticsMapping diskWriteActivityStatistics({StPerReplicated}, basicActivityStatistics, diskWriteRemoteStatistics); const StatisticsMapping sortActivityStatistics({}, basicActivityStatistics, spillStatistics); diff --git a/thorlcr/thorutil/thormisc.hpp b/thorlcr/thorutil/thormisc.hpp index d760f3d06da..51d3a11043e 100644 --- a/thorlcr/thorutil/thormisc.hpp +++ b/thorlcr/thorutil/thormisc.hpp @@ -152,6 +152,7 @@ extern graph_decl const StatisticsMapping indexReadActivityStatistics; extern graph_decl const StatisticsMapping indexWriteActivityStatistics; extern graph_decl const StatisticsMapping joinActivityStatistics; extern graph_decl const StatisticsMapping keyedJoinActivityStatistics; +extern graph_decl const StatisticsMapping allJoinActivityStatistics; extern graph_decl const StatisticsMapping lookupJoinActivityStatistics; extern graph_decl const StatisticsMapping loopActivityStatistics; extern graph_decl const StatisticsMapping diskReadActivityStatistics; From 88d27cbd564967c7ccc5ae46e602e0d5a3e294df Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Thu, 27 Jun 2024 14:49:43 +0100 Subject: [PATCH 119/151] HPCC-32169 Remove stale removePhysicalFiles code Signed-off-by: Jake Smith --- dali/base/dadfs.cpp | 91 +-------------------------------------------- dali/base/dadfs.hpp | 3 -- 2 files changed, 2 insertions(+), 92 deletions(-) diff --git a/dali/base/dadfs.cpp b/dali/base/dadfs.cpp index f64f40c9ca2..48ec2a1bb63 100644 --- a/dali/base/dadfs.cpp +++ b/dali/base/dadfs.cpp @@ -232,6 +232,8 @@ extern da_decl cost_type calcDiskWriteCost(const StringArray & clusters, stat_ty return writeCost; } +// JCSMORE - I suspect this function should be removed/deprecated. It does not deal with dirPerPart or striping. +// makePhysicalPartName supports both, but does not deal with groups/endpoints) RemoteFilename &constructPartFilename(IGroup *grp,unsigned partno,unsigned partmax,const char *name,const char *partmask,const char *partdir,unsigned copy,ClusterPartDiskMapSpec &mspec,RemoteFilename &rfn) { partno--; @@ -12008,95 +12010,6 @@ void CDistributedFileDirectory::setDefaultPreferredClusters(const char *clusters defprefclusters.set(clusters); } -bool removePhysicalFiles(IGroup *grp,const char *_filemask,unsigned short port,ClusterPartDiskMapSpec &mspec,IMultiException *mexcept) -{ - // TBD this won't remove repeated parts - - - PROGLOG("removePhysicalFiles(%s)",_filemask); - if (!isAbsolutePath(_filemask)) - throw MakeStringException(-1,"removePhysicalFiles: Filename %s must be complete path",_filemask); - - size32_t l = strlen(_filemask); - while (l&&isdigit(_filemask[l-1])) - l--; - unsigned width=0; - if (l&&(_filemask[l-1]=='_')) - width = atoi(_filemask+l); - if (!width) - width = grp->ordinality(); - - CriticalSection errcrit; - class casyncfor: public CAsyncFor - { - unsigned short port; - CriticalSection &errcrit; - IMultiException *mexcept; - unsigned width; - StringAttr filemask; - IGroup *grp; - ClusterPartDiskMapSpec &mspec; - public: - bool ok; - casyncfor(IGroup *_grp,const char *_filemask,unsigned _width,unsigned short _port,ClusterPartDiskMapSpec &_mspec,IMultiException *_mexcept,CriticalSection &_errcrit) - : mspec(_mspec),filemask(_filemask),errcrit(_errcrit) - { - grp = _grp; - port = _port; - ok = true; - mexcept = _mexcept; - width = _width; - } - void Do(unsigned i) - { - for (unsigned copy = 0; copy < 2; copy++) // ** TBD - { - RemoteFilename rfn; - constructPartFilename(grp,i+1,width,NULL,filemask,"",copy>0,mspec,rfn); - if (port) - rfn.setPort(port); // if daliservix - Owned partfile = createIFile(rfn); - StringBuffer eps; - try - { - unsigned start = msTick(); -#if 1 - if (partfile->remove()) { - PROGLOG("Removed '%s'",partfile->queryFilename()); - unsigned t = msTick()-start; - if (t>5*1000) - DBGLOG("Removing %s from %s took %ds", partfile->queryFilename(), rfn.queryEndpoint().getEndpointHostText(eps).str(), t/1000); - } - else - IWARNLOG("Failed to remove file part %s from %s", partfile->queryFilename(),rfn.queryEndpoint().getEndpointHostText(eps).str()); -#else - if (partfile->exists()) - PROGLOG("Would remove '%s'",partfile->queryFilename()); -#endif - - } - catch (IException *e) - { - CriticalBlock block(errcrit); - if (mexcept) - mexcept->append(*e); - else { - StringBuffer s("Failed to remove file part "); - s.append(partfile->queryFilename()).append(" from "); - rfn.queryEndpoint().getEndpointHostText(s); - EXCLOG(e, s.str()); - e->Release(); - } - ok = false; - } - } - } - } afor(grp,_filemask,width,port,mspec,mexcept,errcrit); - afor.For(width,10,false,true); - return afor.ok; -} - - IDaliServer *createDaliDFSServer(IPropertyTree *config) { assertex(!daliDFSServer); // initialization problem diff --git a/dali/base/dadfs.hpp b/dali/base/dadfs.hpp index 003fca751c9..6cde9f34875 100644 --- a/dali/base/dadfs.hpp +++ b/dali/base/dadfs.hpp @@ -828,9 +828,6 @@ extern da_decl IDFPartFilter *createPartFilter(const char *filter); | '-' */ -extern da_decl bool removePhysicalFiles(IGroup *grp,const char *_filemask,unsigned short port, ClusterPartDiskMapSpec &mspec,IMultiException *mexcept); -// for removing orphaned files - // for server use interface IDaliServer; extern da_decl IDaliServer *createDaliDFSServer(IPropertyTree *config); // called for coven members From 626baa0ee89a06aa797e453bf029f0dcf8d2644d Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Thu, 27 Jun 2024 10:05:39 -0400 Subject: [PATCH 120/151] HPCC-32170 ECL Watch v9 fix ZAP dialog relative time param corrects the RelativeTimeRangeBuffer request param on the ZAP dialog Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- esp/src/src-react/components/forms/ZAPDialog.tsx | 10 +++++----- esp/src/src/nls/hpcc.ts | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/esp/src/src-react/components/forms/ZAPDialog.tsx b/esp/src/src-react/components/forms/ZAPDialog.tsx index 750e997efba..d43147c8f3c 100644 --- a/esp/src/src-react/components/forms/ZAPDialog.tsx +++ b/esp/src/src-react/components/forms/ZAPDialog.tsx @@ -67,7 +67,7 @@ interface ZAPDialogValues { StartDate?: string; EndDate?: string; }; - RelativeLogTimeRangeBuffer?: string; + RelativeTimeRangeBuffer?: string; LineLimit?: string; LineStartFrom?: string; SelectColumnMode?: ColumnMode; @@ -103,7 +103,7 @@ const defaultValues: ZAPDialogValues = { StartDate: "", EndDate: "", }, - RelativeLogTimeRangeBuffer: "", + RelativeTimeRangeBuffer: "", LineLimit: "10000", LineStartFrom: "0", SelectColumnMode: ColumnMode.DEFAULT, @@ -464,7 +464,7 @@ export const ZAPDialog: React.FunctionComponent = ({ rules={{ validate: { hasValue: (value, formValues) => { - if (value === "" && formValues.LogFilter.RelativeLogTimeRangeBuffer === "") { + if (value === "" && formValues.LogFilter.RelativeTimeRangeBuffer === "") { return nlsHPCC.LogFilterTimeRequired; } return true; @@ -496,14 +496,14 @@ export const ZAPDialog: React.FunctionComponent = ({ } /> Date: Fri, 24 May 2024 15:27:45 -0400 Subject: [PATCH 121/151] HPCC-31920 ECL Watch v9 fix error deleting last subfile Fixes an issue in ECL Watch v9 where attempting to delete the last subfile of a superfile would cause the subfile to not be properly removed and an exception to be logged. Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- esp/src/src-react/components/FileDetails.tsx | 4 +-- .../components/LogicalFileSummary.tsx | 2 +- esp/src/src-react/components/SubFiles.tsx | 27 +++++++++++-------- esp/src/src-react/components/SuperFiles.tsx | 4 +-- esp/src/src-react/hooks/file.ts | 22 ++++++++++++++- 5 files changed, 42 insertions(+), 17 deletions(-) diff --git a/esp/src/src-react/components/FileDetails.tsx b/esp/src/src-react/components/FileDetails.tsx index 599339bc7da..c936e617142 100644 --- a/esp/src/src-react/components/FileDetails.tsx +++ b/esp/src/src-react/components/FileDetails.tsx @@ -41,10 +41,10 @@ export const FileDetails: React.FunctionComponent = ({ }) => { const [file] = useFile(cluster, logicalFile); React.useEffect(() => { - if (file?.NodeGroup && cluster === undefined) { + if (file?.NodeGroup && cluster === undefined && !file?.isSuperfile) { replaceUrl(`/files/${file.NodeGroup}/${logicalFile}`); } - }, [cluster, file?.NodeGroup, logicalFile]); + }, [cluster, file?.NodeGroup, file?.isSuperfile, logicalFile]); const [defFile] = useDefFile(cluster, logicalFile, WsDfu.DFUDefFileFormat.def); const [xmlFile] = useDefFile(cluster, logicalFile, WsDfu.DFUDefFileFormat.xml); diff --git a/esp/src/src-react/components/LogicalFileSummary.tsx b/esp/src/src-react/components/LogicalFileSummary.tsx index 469f2c57b83..a2551783357 100644 --- a/esp/src/src-react/components/LogicalFileSummary.tsx +++ b/esp/src/src-react/components/LogicalFileSummary.tsx @@ -181,7 +181,7 @@ export const LogicalFileSummary: React.FunctionComponent ({ label: "", type: "link", value: row.Name, href: `#/files/${row.Name}` })) }, + "SuperOwner": { label: nlsHPCC.SuperFile, type: "links", links: file?.Superfiles?.DFULogicalFile?.map(row => ({ label: "", type: "link", value: row.Name, href: `#/files/${row.NodeGroup !== null ? row.NodeGroup : undefined}/${row.Name}` })) }, "NodeGroup": { label: nlsHPCC.ClusterName, type: "string", value: file?.NodeGroup, readonly: true }, "Description": { label: nlsHPCC.Description, type: "string", value: description }, "JobName": { label: nlsHPCC.JobName, type: "string", value: file?.JobName, readonly: true }, diff --git a/esp/src/src-react/components/SubFiles.tsx b/esp/src/src-react/components/SubFiles.tsx index 7337454931f..e7088869472 100644 --- a/esp/src/src-react/components/SubFiles.tsx +++ b/esp/src/src-react/components/SubFiles.tsx @@ -5,7 +5,7 @@ import nlsHPCC from "src/nlsHPCC"; import { QuerySortItem } from "src/store/Store"; import * as WsDfu from "src/WsDfu"; import { useConfirm } from "../hooks/confirm"; -import { useFile } from "../hooks/file"; +import { useFile, useSubfiles } from "../hooks/file"; import { FluentGrid, useCopyButtons, useFluentStoreState, FluentColumns } from "./controls/Grid"; import { ShortVerticalDivider } from "./Common"; import { pushUrl } from "../util/history"; @@ -28,7 +28,8 @@ export const SubFiles: React.FunctionComponent = ({ sort = defaultSort }) => { - const [file, , , refresh] = useFile(cluster, logicalFile); + const [file] = useFile(cluster, logicalFile); + const [subfiles, refreshSubfiles] = useSubfiles(cluster, logicalFile); const [uiState, setUIState] = React.useState({ ...defaultUIState }); const [data, setData] = React.useState([]); const { @@ -101,26 +102,30 @@ export const SubFiles: React.FunctionComponent = ({ message: nlsHPCC.RemoveSubfiles2, items: selection.map(item => item.Name), onSubmit: React.useCallback(() => { - WsDfu.SuperfileAction("remove", file.Name, selection, false).then(() => refresh()); - }, [file, refresh, selection]) + WsDfu.SuperfileAction("remove", file.Name, selection, false).then(() => refreshSubfiles()); + }, [file, refreshSubfiles, selection]) }); React.useEffect(() => { - const subfiles = []; + const files = []; const promises = []; - file?.subfiles?.Item.forEach(item => { + subfiles?.Item.forEach(item => { const logicalFile = ESPLogicalFile.Get("", item); promises.push(logicalFile.getInfo2({ onAfterSend: function (response) { } })); - subfiles.push(logicalFile); + files.push(logicalFile); }); - Promise.all(promises).then(logicalFiles => { - setData(subfiles); - }); - }, [file?.subfiles]); + if (promises.length) { + Promise.all(promises).then(logicalFiles => { + setData(files); + }); + } else { + setData(files); + } + }, [file, subfiles]); const buttons = React.useMemo((): ICommandBarItemProps[] => [ { diff --git a/esp/src/src-react/components/SuperFiles.tsx b/esp/src/src-react/components/SuperFiles.tsx index b1cbe3bdd0b..b9cb69e99e4 100644 --- a/esp/src/src-react/components/SuperFiles.tsx +++ b/esp/src/src-react/components/SuperFiles.tsx @@ -43,11 +43,11 @@ export const SuperFiles: React.FunctionComponent = ({ label: nlsHPCC.Name, sortable: true, formatter: (name, row) => { - return {name}; + return {name}; } } }; - }, [cluster]); + }, []); // Command Bar --- const buttons = React.useMemo((): ICommandBarItemProps[] => [ diff --git a/esp/src/src-react/hooks/file.ts b/esp/src/src-react/hooks/file.ts index 7210998f5cf..7ea78a2cc70 100644 --- a/esp/src/src-react/hooks/file.ts +++ b/esp/src/src-react/hooks/file.ts @@ -14,7 +14,7 @@ export function useFile(cluster: string, name: string): [LogicalFile, boolean, n const [count, increment] = useCounter(); React.useEffect(() => { - const file = LogicalFile.attach({ baseUrl: "" }, cluster, name); + const file = LogicalFile.attach({ baseUrl: "" }, cluster === "undefined" ? undefined : cluster, name); let active = true; let handle; const fetchInfo = singletonDebounce(file, "fetchInfo"); @@ -87,3 +87,23 @@ export function useFileHistory(cluster: string, name: string): [WsDfu.Origin[], return [history, eraseHistory, increment]; } + +export function useSubfiles(cluster: string, name: string): [WsDfu.subfiles, () => void] { + + const [file] = useFile(cluster, name); + const [subfiles, setSubfiles] = React.useState({ Item: [] }); + const [count, increment] = useCounter(); + + React.useEffect(() => { + if (file) { + file.fetchInfo() + .then(response => { + setSubfiles(response.subfiles ?? { Item: [] }); + }) + .catch(err => logger.error(err)) + ; + } + }, [file, count]); + + return [subfiles, increment]; +} From 40b49e5cc4ed465cacba3e719d895d81751902bc Mon Sep 17 00:00:00 2001 From: Shamser Ahmed Date: Fri, 7 Jun 2024 14:38:56 +0100 Subject: [PATCH 122/151] HPCC-32000 Spill stats for nsplitter StSizePeakEphemeralDisk, StSizePeakTempDisk, StNumSpills, and StSizeSpillFile for nsplitter implemented. Signed-off-by: Shamser Ahmed --- .../activities/nsplitter/thnsplitterslave.cpp | 8 +- thorlcr/master/thactivitymaster.cpp | 4 +- thorlcr/thorutil/thbuf.cpp | 96 ++++++++++++++----- thorlcr/thorutil/thbuf.hpp | 1 + thorlcr/thorutil/thormisc.cpp | 4 +- thorlcr/thorutil/thormisc.hpp | 2 + 6 files changed, 89 insertions(+), 26 deletions(-) diff --git a/thorlcr/activities/nsplitter/thnsplitterslave.cpp b/thorlcr/activities/nsplitter/thnsplitterslave.cpp index de22da08908..ca9ccd0d6df 100644 --- a/thorlcr/activities/nsplitter/thnsplitterslave.cpp +++ b/thorlcr/activities/nsplitter/thnsplitterslave.cpp @@ -152,7 +152,7 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf } } public: - NSplitterSlaveActivity(CGraphElementBase *_container) : CSlaveActivity(_container), writer(*this) + NSplitterSlaveActivity(CGraphElementBase *_container) : CSlaveActivity(_container, nsplitterActivityStatistics), writer(*this) { numOutputs = container.getOutputs(); connectedOutputSet.setown(createBitSet()); @@ -401,6 +401,12 @@ class NSplitterSlaveActivity : public CSlaveActivity, implements ISharedSmartBuf if (sharedRowStream) sharedRowStream->cancel(); } + virtual void gatherActiveStats(CRuntimeStatisticCollection &activeStats) const override + { + PARENT::gatherActiveStats(activeStats); + if (sharedRowStream) + ::mergeStats(activeStats, sharedRowStream); + } // ISharedSmartBufferCallback impl. virtual void paged() { pagedOut = true; } virtual void blocked() diff --git a/thorlcr/master/thactivitymaster.cpp b/thorlcr/master/thactivitymaster.cpp index 82ca2d647a9..f80ee0bd258 100644 --- a/thorlcr/master/thactivitymaster.cpp +++ b/thorlcr/master/thactivitymaster.cpp @@ -170,7 +170,6 @@ class CGenericMasterGraphElement : public CMasterGraphElement case TAKcase: case TAKchildcase: case TAKdegroup: - case TAKsplit: case TAKproject: case TAKprefetchproject: case TAKprefetchcountproject: @@ -210,6 +209,9 @@ class CGenericMasterGraphElement : public CMasterGraphElement case TAKemptyaction: ret = new CMasterActivity(this); break; + case TAKsplit: + ret = new CMasterActivity(this, nsplitterActivityStatistics); + break; case TAKsoap_rowdataset: case TAKsoap_rowaction: case TAKsoap_datasetdataset: diff --git a/thorlcr/thorutil/thbuf.cpp b/thorlcr/thorutil/thbuf.cpp index b1377f6db00..5ab144e8ee9 100644 --- a/thorlcr/thorutil/thbuf.cpp +++ b/thorlcr/thorutil/thbuf.cpp @@ -1753,6 +1753,10 @@ class CSharedWriteAheadBase : public CSimpleInterface, implements ISharedSmartBu queryCOutput(c).reset(); inMemRows->reset(0); } + virtual unsigned __int64 getStatistic(StatisticKind kind) const override + { + return 0; + } friend class COutput; friend class CRowSet; }; @@ -2145,6 +2149,24 @@ class CSharedWriteAheadDisk : public CSharedWriteAheadBase tempFileIO->setSize(0); tempFileOwner->noteSize(0); } + virtual unsigned __int64 getStatistic(StatisticKind kind) const override + { + switch (kind) + { + case StSizeSpillFile: + return tempFileIO->getStatistic(StSizeDiskWrite); + case StCycleDiskWriteIOCycles: + case StTimeDiskWriteIO: + case StSizeDiskWrite: + return 0; + case StNumSpills: + return 1; + case StTimeSpillElapsed: + return tempFileIO->getStatistic(StCycleDiskWriteIOCycles); + default: + return tempFileIO->getStatistic(kind); + } + } }; ISharedSmartBuffer *createSharedSmartDiskBuffer(CActivityBase *activity, const char *spillname, unsigned outputs, IThorRowInterfaces *rowIf) @@ -2433,7 +2455,7 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf totalInputRowsRead = 0; // not used until spilling begins, represents count of all rows read rowcount_t inMemTotalRows = 0; // whilst in memory, represents count of all rows seen CriticalSection readAheadCS; // ensure single reader (leader), reads ahead (updates rows/totalInputRowsRead/inMemTotalRows) - Owned iFile; + Owned tempFileOwner; Owned iFileIO; Owned outputStream; Linked compressHandler; @@ -2442,6 +2464,9 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfflush(); + tempFileOwner->noteSize(iFileIO->getStatistic(StSizeDiskWrite)); + ::mergeStats(inactiveStats, iFileIO); + iFileIO.clear(); + outputStream.clear(); + } } void createOutputStream() { // NB: Called once, when spilling starts. - auto res = createSerialOutputStream(iFile, compressHandler, options, numOutputs + 1); + tempFileOwner.setown(activity.createOwnedTempFile(baseTmpFilename)); + auto res = createSerialOutputStream(&(tempFileOwner->queryIFile()), compressHandler, options, numOutputs + 1); outputStream.setown(std::get<0>(res)); iFileIO.setown(std::get<1>(res)); totalInputRowsRead = inMemTotalRows; @@ -2517,7 +2549,7 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfflush(); totalInputRowsRead.fetch_add(newRowsWritten); - + tempFileOwner->noteSize(iFileIO->getStatistic(StSizeDiskWrite)); // JCSMORE - could track size written, and start new file at this point (e.g. every 100MB), // and track their starting points (by row #) in a vector // We could then tell if/when the readers catch up, and remove consumed files as they do. @@ -2528,9 +2560,10 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf(row)); } public: - explicit CSharedFullSpillingWriteAhead(CActivityBase *_activity, unsigned _numOutputs, IRowStream *_input, bool _inputGrouped, const SharedRowStreamReaderOptions &_options, IThorRowInterfaces *rowIf, const char *tempFileName, ICompressHandler *_compressHandler) - : activity(*_activity), numOutputs(_numOutputs), input(_input), inputGrouped(_inputGrouped), options(_options), compressHandler(_compressHandler), - meta(rowIf->queryRowMetaData()), serializer(rowIf->queryRowSerializer()), allocator(rowIf->queryRowAllocator()), deserializer(rowIf->queryRowDeserializer()) + explicit CSharedFullSpillingWriteAhead(CActivityBase *_activity, unsigned _numOutputs, IRowStream *_input, bool _inputGrouped, const SharedRowStreamReaderOptions &_options, IThorRowInterfaces *rowIf, const char *_baseTmpFilename, ICompressHandler *_compressHandler) + : activity(*_activity), numOutputs(_numOutputs), input(_input), inputGrouped(_inputGrouped), options(_options), compressHandler(_compressHandler), baseTmpFilename(_baseTmpFilename), + meta(rowIf->queryRowMetaData()), serializer(rowIf->queryRowSerializer()), allocator(rowIf->queryRowAllocator()), deserializer(rowIf->queryRowDeserializer()), + inactiveStats(spillingWriteAheadStatistics) { assertex(input); @@ -2541,15 +2574,10 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfremove(); - } + closeWriter(); freeRows(); } void outputStopped(unsigned output) @@ -2568,15 +2596,15 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfqueryFilename(), tracing.str()); + activity.ActPrintLog("CSharedFullSpillingWriteAhead::outputStopped closing tempfile writer: %s %s", tempFileOwner->queryIFile().queryFilename(), tracing.str()); closeWriter(); - iFile->remove(); + tempFileOwner.clear(); } } } std::tuple getReadStream() // also pass back IFileIO for stats purposes { - return createSerialInputStream(iFile, compressHandler, options, numOutputs + 1); // +1 for writer + return createSerialInputStream(&(tempFileOwner->queryIFile()), compressHandler, options, numOutputs + 1); // +1 for writer } bool checkWriteAhead(rowcount_t &outputRowsAvailable) { @@ -2623,8 +2651,8 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOf= options.inMemMaxMem) // too much in memory, spill { // NB: this will reset rowMemUsage, however, each reader will continue to consume rows until they catch up (or stop) - ActPrintLog(&activity, "Spilling to temp storage [file = %s, outputRowsAvailable = %" I64F "u, start = %" I64F "u, end = %" I64F "u, count = %u]", iFile->queryFilename(), outputRowsAvailable, inMemTotalRows - rows.size(), inMemTotalRows, (unsigned)rows.size()); createOutputStream(); + ActPrintLog(&activity, "Spilling to temp storage [file = %s, outputRowsAvailable = %" I64F "u, start = %" I64F "u, end = %" I64F "u, count = %u]", tempFileOwner->queryIFile().queryFilename(), outputRowsAvailable, inMemTotalRows - rows.size(), inMemTotalRows, (unsigned)rows.size()); return false; } @@ -2686,11 +2714,7 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfremove(); - } + closeWriter(); for (auto &output: outputs) output->reset(); freeRows(); @@ -2701,6 +2725,32 @@ class CSharedFullSpillingWriteAhead : public CInterfaceOfgetStatistic(useKind); + v += inactiveStats.getStatisticValue(useKind); + return v; + } }; ISharedRowStreamReader *createSharedFullSpillingWriteAhead(CActivityBase *_activity, unsigned numOutputs, IRowStream *_input, bool _inputGrouped, const SharedRowStreamReaderOptions &options, IThorRowInterfaces *_rowIf, const char *tempFileName, ICompressHandler *compressHandler) diff --git a/thorlcr/thorutil/thbuf.hpp b/thorlcr/thorutil/thbuf.hpp index 1750f63b007..fb5a66af8fa 100644 --- a/thorlcr/thorutil/thbuf.hpp +++ b/thorlcr/thorutil/thbuf.hpp @@ -87,6 +87,7 @@ interface ISharedRowStreamReader : extends IInterface virtual IRowStream *queryOutput(unsigned output) = 0; virtual void cancel()=0; virtual void reset() = 0; + virtual unsigned __int64 getStatistic(StatisticKind kind) const = 0; }; diff --git a/thorlcr/thorutil/thormisc.cpp b/thorlcr/thorutil/thormisc.cpp index 48dc1231ee1..4d36f2e1686 100644 --- a/thorlcr/thorutil/thormisc.cpp +++ b/thorlcr/thorutil/thormisc.cpp @@ -92,8 +92,10 @@ const StatisticsMapping graphStatistics({StNumExecutions, StSizeSpillFile, StSiz const StatisticsMapping diskReadPartStatistics({StNumDiskRowsRead}, diskReadRemoteStatistics); const StatisticsMapping indexDistribActivityStatistics({}, basicActivityStatistics, jhtreeCacheStatistics); const StatisticsMapping soapcallActivityStatistics({}, basicActivityStatistics, soapcallStatistics); -const StatisticsMapping hashDedupActivityStatistics({StNumSpills, StSizeSpillFile, StTimeSortElapsed, StSizePeakTempDisk}, diskWriteRemoteStatistics, basicActivityStatistics); +const StatisticsMapping hashDedupActivityStatistics({}, spillStatistics, diskWriteRemoteStatistics, basicActivityStatistics); const StatisticsMapping hashDistribActivityStatistics({StNumLocalRows, StNumRemoteRows, StSizeRemoteWrite}, basicActivityStatistics); +const StatisticsMapping nsplitterActivityStatistics({}, spillStatistics, basicActivityStatistics); +const StatisticsMapping spillingWriteAheadStatistics({}, spillStatistics); MODULE_INIT(INIT_PRIORITY_STANDARD) { diff --git a/thorlcr/thorutil/thormisc.hpp b/thorlcr/thorutil/thormisc.hpp index d760f3d06da..4ba1cc18664 100644 --- a/thorlcr/thorutil/thormisc.hpp +++ b/thorlcr/thorutil/thormisc.hpp @@ -166,6 +166,8 @@ extern graph_decl const StatisticsMapping soapcallActivityStatistics; extern graph_decl const StatisticsMapping indexReadFileStatistics; extern graph_decl const StatisticsMapping hashDedupActivityStatistics; extern graph_decl const StatisticsMapping hashDistribActivityStatistics; +extern graph_decl const StatisticsMapping nsplitterActivityStatistics; +extern graph_decl const StatisticsMapping spillingWriteAheadStatistics; class BooleanOnOff { From 881d5e2cf955d5f44763fc895b9d0d1508946562 Mon Sep 17 00:00:00 2001 From: Rodrigo Pastrana Date: Fri, 3 May 2024 17:28:52 -0400 Subject: [PATCH 123/151] HPCC-29546 Grafana/loki logaccess plugin - Provides Grafana/loki curl based logaccess plugin - Updates helm/managed/loggin/loki-stack/README - Provides mechanism to create grafana-hpcc logaccess secret - Adds encodeCSV jstring logic - Adds encodeCSV unittest - Attempts to minimize StringBuffer resizes - Adds sortby support - Enables csv header reporting Signed-off-by: Rodrigo Pastrana --- helm/managed/logging/loki-stack/README.md | 63 +- .../create-grafana-logaccess-secret.sh | 69 ++ .../loki-stack/grafana-hpcc-logaccess.yaml | 43 + .../loki-stack/secrets-templates/password | 1 + .../loki-stack/secrets-templates/username | 1 + system/jlib/jlog.cpp | 35 +- system/jlib/jstring.cpp | 36 + system/jlib/jstring.hpp | 5 + system/logaccess/CMakeLists.txt | 1 + system/logaccess/Grafana/CMakeLists.txt | 19 + .../Grafana/CurlClient/CMakeLists.txt | 45 + .../Grafana/CurlClient/GrafanaCurlClient.cpp | 867 ++++++++++++++++++ .../Grafana/CurlClient/GrafanaCurlClient.hpp | 108 +++ testing/unittests/jlibtests.cpp | 25 +- 14 files changed, 1293 insertions(+), 25 deletions(-) create mode 100755 helm/managed/logging/loki-stack/create-grafana-logaccess-secret.sh create mode 100644 helm/managed/logging/loki-stack/grafana-hpcc-logaccess.yaml create mode 100644 helm/managed/logging/loki-stack/secrets-templates/password create mode 100644 helm/managed/logging/loki-stack/secrets-templates/username create mode 100644 system/logaccess/Grafana/CMakeLists.txt create mode 100644 system/logaccess/Grafana/CurlClient/CMakeLists.txt create mode 100644 system/logaccess/Grafana/CurlClient/GrafanaCurlClient.cpp create mode 100644 system/logaccess/Grafana/CurlClient/GrafanaCurlClient.hpp diff --git a/helm/managed/logging/loki-stack/README.md b/helm/managed/logging/loki-stack/README.md index 266288393c1..6c4714e898c 100644 --- a/helm/managed/logging/loki-stack/README.md +++ b/helm/managed/logging/loki-stack/README.md @@ -78,4 +78,65 @@ The default Loki-Stack chart will not declare permanent storage and therefore lo loki: persistence: enabled: true -``` \ No newline at end of file +``` + +## Configure HPCC logAccess +The logAccess feature allows HPCC to query and package relevant logs for various features such as ZAP report, WorkUnit helper logs, ECLWatch log viewer, etc. + +### Provide target Grafana/Loki access information + +HPCC logAccess requires access to the Grafana username/password. Those values must be provided via a secure secret object. + +The secret is expected to be in the 'esp' category, and be named 'grafana-logaccess'. The following key-value pairs are required (key names must be spelled exactly as shown here) + + username - This should contain the Grafana username + password - This should contain the Grafana password + +The included 'create-grafana-logaccess-secret.sh' helper can be used to create the necessary secret. + +Example scripted secret creation command (assuming ./secrets-templates contains a file named exactly as the above keys): + +``` + create-grafana-logaccess-secret.sh -d HPCC-Platform/helm/managed/logging/loki-stack/secrets-templates/ -n hpcc +``` + +Otherwise, users can create the secret manually. + +Example manual secret creation command (assuming ./secrets-templates contains a file named exactly as the above keys): + +``` + kubectl create secret generic grafana-logaccess --from-file=HPCC-Platform/helm/managed//logging/loki-stack/secrets-templates/ -n hpcc +``` + +### Configure HPCC logAccess + +The target HPCC deployment should be directed to use the desired Grafana endpoint with the Loki datasource, and the newly created secret by providing appropriate logAccess values (such as ./grafana-hpcc-logaccess.yaml). + +Example use: + +``` + helm install myhpcc hpcc/hpcc -f HPCC-Platform/helm/managed/logging/loki-stack/grafana-hpcc-logaccess.yaml +``` + +#### + +The grafana hpcc logaccess values should provide Grafana connection information, such as the host, and port; the Loki datasource where the logs reside; the k8s namespace under which the logs were created (non-default namespace highly recommended); and the hpcc component log format (table|json|xml) + +``` +Example use: + global: + logAccess: + name: "Grafana/loki stack log access" + type: "GrafanaCurl" + connection: + protocol: "http" + host: "myloki4hpcclogs-grafana.default.svc.cluster.local" + port: 3000 + datasource: + id: "1" + name: "Loki" + namespace: + name: "hpcc" + logFormat: + type: "json" +``` diff --git a/helm/managed/logging/loki-stack/create-grafana-logaccess-secret.sh b/helm/managed/logging/loki-stack/create-grafana-logaccess-secret.sh new file mode 100755 index 00000000000..f4c7efbed09 --- /dev/null +++ b/helm/managed/logging/loki-stack/create-grafana-logaccess-secret.sh @@ -0,0 +1,69 @@ +#!/bin/bash +WORK_DIR=$(dirname $0) +source ${WORK_DIR}/env-loganalytics + +k8scommand="kubectl" +secretname="grafana-logaccess" +secretsdir="${WORK_DIR}/secrets-templates" +namespace="default" + +usage() +{ + echo "Creates necessary k8s secret used by HPCC's logAccess to access Loki data source through Grafana" + echo "> create-grafana-logaccess-secret.sh [Options]" + echo "" + echo "Options:" + echo "-d Specifies directory containing required secret values in self named files." + echo " Defaults to /<${secretssubdir}>" + echo "-h Print Usage message" + echo "-n Specifies namespace for secret" + echo "" + echo "Requires directory containing secret values in dedicated files." + echo "Defaults to ${secretssubdir} if not specified via -d option." + echo "" + echo "Expected directory structure:" + echo "${secretsdir}/" + echo " password - Should contain Grafana user name" + echo " username - Should contain Grafana password" +} + +while [ "$#" -gt 0 ]; do + arg=$1 + case "${arg}" in + -h) + usage + exit + ;; + -d) shift + secretsdir=$1 + ;; + -n) shift + namespace=$1 + ;; + esac + shift +done + +echo "Creating '${namespace}/${secretname}' secret." + +command -v ${k8scommand} >/dev/null 2>&1 || { echo >&2 "Aborting - '${k8scommand}' not found!"; exit 1; } + +errormessage=$(${k8scommand} get secret ${secretname} -n ${namespace} 2>&1) +if [[ $? -eq 0 ]] +then + echo "WARNING: Target secret '${namespace}/${secretname}' already exists! Delete it and re-run if secret update desired." + echo "${errormessage}" + exit 1 +fi + +errormessage=$(${k8scommand} create secret generic ${secretname} --from-file=${secretsdir} -n ${namespace} ) +if [[ $? -ne 0 ]] +then + echo "Error creating: Target secret '${namespace}/${secretname}'!" + echo >&2 + usage + exit 1 +else + echo "Target secret '${namespace}/${secretname}' successfully created!" + ${k8scommand} get secret ${secretname} -n ${namespace} +fi diff --git a/helm/managed/logging/loki-stack/grafana-hpcc-logaccess.yaml b/helm/managed/logging/loki-stack/grafana-hpcc-logaccess.yaml new file mode 100644 index 00000000000..70d09058960 --- /dev/null +++ b/helm/managed/logging/loki-stack/grafana-hpcc-logaccess.yaml @@ -0,0 +1,43 @@ +# Configures HPCC logAccess to target grafana/loki +global: + logAccess: + name: "Grafana/loki stack log access" + type: "GrafanaCurl" + connection: + protocol: "http" + host: "myloki4hpcclogs-grafana.default.svc.cluster.local" + port: 3000 + datasource: + id: "1" + name: "Loki" + namespace: + name: "hpcc" + logFormat: + type: "json" + logMaps: + - type: "global" + searchColumn: "log" + columnMode: "DEFAULT" + - type: "components" + storeName: "stream" + searchColumn: "component" + columnMode: "MIN" + columnType: "string" + - type: "timestamp" + storeName: "values" + searchColumn: "time" + columnMode: "ALL" + columnType: "datetime" + - type: "pod" + storeName: "stream" + searchColumn: "pod" + columnMode: "ALL" + columnType: "string" +secrets: + esp: + grafana-logaccess: "grafana-logaccess" +vaults: + esp: + - name: my-grafana-logaccess-vault + url: http://${env.VAULT_SERVICE_HOST}:${env.VAULT_SERVICE_PORT}/v1/secret/data/esp/${secret} + kind: kv-v2 diff --git a/helm/managed/logging/loki-stack/secrets-templates/password b/helm/managed/logging/loki-stack/secrets-templates/password new file mode 100644 index 00000000000..6b3a9a39380 --- /dev/null +++ b/helm/managed/logging/loki-stack/secrets-templates/password @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/helm/managed/logging/loki-stack/secrets-templates/username b/helm/managed/logging/loki-stack/secrets-templates/username new file mode 100644 index 00000000000..f77b00407e0 --- /dev/null +++ b/helm/managed/logging/loki-stack/secrets-templates/username @@ -0,0 +1 @@ +admin \ No newline at end of file diff --git a/system/jlib/jlog.cpp b/system/jlib/jlog.cpp index 4b7fe5b3ec0..2d8bf718e3e 100644 --- a/system/jlib/jlog.cpp +++ b/system/jlib/jlog.cpp @@ -3213,32 +3213,21 @@ IRemoteLogAccess *queryRemoteLogAccessor() { const char * simulatedGlobalYaml = R"!!(global: logAccess: - name: "Azure LogAnalytics LogAccess" - type: "AzureLogAnalyticsCurl" + name: "Grafana/loki stack log access" + type: "GrafanaCurl" connection: #workspaceID: "ef060646-ef24-48a5-b88c-b1f3fbe40271" - workspaceID: "XYZ" #ID of the Azure LogAnalytics workspace to query logs from + #workspaceID: "XYZ" #ID of the Azure LogAnalytics workspace to query logs from #tenantID: "ABC" #The Tenant ID, required for KQL API access - clientID: "DEF" #ID of Azure Active Directory registered application with api.loganalytics.io access - logMaps: - - type: "global" - storeName: "ContainerLog" - searchColumn: "LogEntry" - timeStampColumn: "hpcc_log_timestamp" - - type: "workunits" - storeName: "ContainerLog" - searchColumn: "hpcc_log_jobid" - - type: "components" - searchColumn: "ContainerID" - - type: "audience" - searchColumn: "hpcc_log_audience" - - type: "class" - searchColumn: "hpcc_log_class" - - type: "instance" - storeName: "ContainerInventory" - searchColumn: "Name" - - type: "host" - searchColumn: "Computer" + #clientID: "DEF" #ID of Azure Active Directory registered application with api.loganalytics.io access + protocol: "http" + host: "localhost" + port: "3000" + datasource: + id: "1" + name: "Loki" + namespace: + name: "hpcc" )!!"; Owned testTree = createPTreeFromYAMLString(simulatedGlobalYaml, ipt_none, ptr_ignoreWhiteSpace, nullptr); logAccessPluginConfig.setown(testTree->getPropTree("global/logAccess")); diff --git a/system/jlib/jstring.cpp b/system/jlib/jstring.cpp index 5a1be75faf6..50951938ae5 100644 --- a/system/jlib/jstring.cpp +++ b/system/jlib/jstring.cpp @@ -2367,6 +2367,42 @@ StringBuffer &encodeJSON(StringBuffer &s, const char *value) return encodeJSON(s, strlen(value), value); } +inline StringBuffer & encodeCSVChar(StringBuffer & encodedCSV, char ch) +{ + byte next = ch; + switch (next) + { + case '\"': + encodedCSV.append("\""); + encodedCSV.append(next); + break; + //Any other character that needs to be escaped? + default: + encodedCSV.append(next); + break; + } + return encodedCSV; +} + +StringBuffer & encodeCSVColumn(StringBuffer & encodedCSV, unsigned size, const char *rawCSVCol) +{ + if (!rawCSVCol) + return encodedCSV; + encodedCSV.ensureCapacity(size+2); // Minimum size that will be written + encodedCSV.append("\""); + for (size32_t i = 0; i < size; i++) + encodeCSVChar(encodedCSV, rawCSVCol[i]); + encodedCSV.append("\""); + return encodedCSV; +} + +StringBuffer & encodeCSVColumn(StringBuffer & encodedCSV, const char *rawCSVCol) +{ + if (!rawCSVCol) + return encodedCSV; + return encodeCSVColumn(encodedCSV, strlen(rawCSVCol), rawCSVCol); +} + bool checkUnicodeLiteral(char const * str, unsigned length, unsigned & ep, StringBuffer & msg) { unsigned i; diff --git a/system/jlib/jstring.hpp b/system/jlib/jstring.hpp index 5a153555041..b3fe7651daf 100644 --- a/system/jlib/jstring.hpp +++ b/system/jlib/jstring.hpp @@ -479,6 +479,11 @@ inline StringBuffer &delimitJSON(StringBuffer &s, bool addNewline=false, bool es return s; } +/* +* Encodes a CSV column, not an entire CSV record +*/ +jlib_decl StringBuffer &encodeCSVColumn(StringBuffer &s, const char *value); + jlib_decl StringBuffer &encodeJSON(StringBuffer &s, const char *value); jlib_decl StringBuffer &encodeJSON(StringBuffer &s, unsigned len, const char *value); diff --git a/system/logaccess/CMakeLists.txt b/system/logaccess/CMakeLists.txt index 80ea08d0281..51c349ebf34 100644 --- a/system/logaccess/CMakeLists.txt +++ b/system/logaccess/CMakeLists.txt @@ -19,4 +19,5 @@ IF(NOT CLIENTTOOLS_ONLY) HPCC_ADD_SUBDIRECTORY (ElasticStack) ENDIF() HPCC_ADD_SUBDIRECTORY (Azure) + HPCC_ADD_SUBDIRECTORY (Grafana) ENDIF() diff --git a/system/logaccess/Grafana/CMakeLists.txt b/system/logaccess/Grafana/CMakeLists.txt new file mode 100644 index 00000000000..2a6ea152a52 --- /dev/null +++ b/system/logaccess/Grafana/CMakeLists.txt @@ -0,0 +1,19 @@ +############################################################################### +# HPCC SYSTEMS software Copyright (C) 2022 HPCC Systems®. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +IF(NOT CLIENTTOOLS_ONLY) + HPCC_ADD_SUBDIRECTORY (CurlClient) +ENDIF() diff --git a/system/logaccess/Grafana/CurlClient/CMakeLists.txt b/system/logaccess/Grafana/CurlClient/CMakeLists.txt new file mode 100644 index 00000000000..a749dacd715 --- /dev/null +++ b/system/logaccess/Grafana/CurlClient/CMakeLists.txt @@ -0,0 +1,45 @@ +############################################################################### +# HPCC SYSTEMS software Copyright (C) 2022 HPCC Systems®. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +project(GrafanaCurllogaccess) + +# Required installed libraries +find_package(CURL REQUIRED) + +set(srcs + ${CMAKE_CURRENT_SOURCE_DIR}/GrafanaCurlClient.cpp +) + +include_directories( + ${HPCC_SOURCE_DIR}/system/include + ${HPCC_SOURCE_DIR}/system/jlib + ${CURL_INCLUDE_DIR} +) + +add_definitions(-DGRAFANA_CURL_LOGACCESS_EXPORTS) + +HPCC_ADD_LIBRARY(${PROJECT_NAME} SHARED ${srcs}) + +target_link_libraries(${PROJECT_NAME} + PRIVATE jlib + PRIVATE ${CURL_LIBRARIES} +) + +install(TARGETS ${PROJECT_NAME} + RUNTIME DESTINATION ${EXEC_DIR} + LIBRARY DESTINATION ${LIB_DIR} + CALC_DEPS +) diff --git a/system/logaccess/Grafana/CurlClient/GrafanaCurlClient.cpp b/system/logaccess/Grafana/CurlClient/GrafanaCurlClient.cpp new file mode 100644 index 00000000000..5ada0237838 --- /dev/null +++ b/system/logaccess/Grafana/CurlClient/GrafanaCurlClient.cpp @@ -0,0 +1,867 @@ +/*############################################################################## + + HPCC SYSTEMS software Copyright (C) 2024 HPCC Systems®. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +############################################################################## */ + +#include "GrafanaCurlClient.hpp" + +#include "platform.h" +#include +#include +#include + +#include +#include +#include + +#ifdef _CONTAINERIZED +//In containerized world, most likely localhost is not the target grafana host +static constexpr const char * DEFAULT_GRAFANA_HOST = "mycluster-grafana.default.svc.cluster.local"; +#else +//In baremetal, localhost is good guess as any +static constexpr const char * DEFAULT_GRAFANA_HOST = "localhost"; +#endif + +static constexpr const char * DEFAULT_GRAFANA_PROTOCOL = "http"; +static constexpr const char * DEFAULT_GRAFANA_PORT = "3000"; +static constexpr const char * DEFAULT_DATASOURCE_ID = "1"; + +static constexpr const char * defaultNamespaceStream = "default"; +static constexpr const char * defaultExpectedLogFormat = "table"; //"json"; + +static constexpr const char * logMapIndexPatternAtt = "@storeName"; +static constexpr const char * logMapSearchColAtt = "@searchColumn"; +static constexpr const char * logMapTimeStampColAtt = "@timeStampColumn"; +static constexpr const char * logMapKeyColAtt = "@keyColumn"; +static constexpr const char * logMapDisableJoinsAtt = "@disableJoins"; + +static constexpr std::size_t defaultMaxRecordsPerFetch = 100; + +/* +* To be used as a callback for curl_easy_setopt to capture the response from a curl request +*/ +size_t stringCallback(char *contents, size_t size, size_t nmemb, void *userp) +{ + ((std::string*)userp)->append((char*)contents, size * nmemb); + return size * nmemb; +} + +/* +* Constructs a curl based client request based on the provided connection string and targetURI +* The response is reported in the readBuffer +* Uses stringCallback to handle successfull curl requests +*/ +void GrafanaLogAccessCurlClient::submitQuery(std::string & readBuffer, const char * targetURI) +{ + if (isEmptyString(m_grafanaConnectionStr.str())) + throw makeStringExceptionV(-1, "%s Cannot submit query, empty connection string detected!", COMPONENT_NAME); + + if (isEmptyString(targetURI)) + throw makeStringExceptionV(-1, "%s Cannot submit query, empty request URI detected!", COMPONENT_NAME); + + OwnedPtrCustomFree curlHandle = curl_easy_init(); + if (curlHandle) + { + CURLcode curlResponseCode; + OwnedPtrCustomFree headers = nullptr; + char curlErrBuffer[CURL_ERROR_SIZE]; + curlErrBuffer[0] = '\0'; + + VStringBuffer requestURL("%s%s%s", m_grafanaConnectionStr.str(), m_dataSourcesAPIURI.str(), targetURI); + + if (curl_easy_setopt(curlHandle, CURLOPT_URL, requestURL.str()) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_URL' (%s)!", COMPONENT_NAME, requestURL.str()); + + int curloptretcode = curl_easy_setopt(curlHandle, CURLOPT_HTTPAUTH, (long)CURLAUTH_BASIC); + if (curloptretcode != CURLE_OK) + { + if (curloptretcode == CURLE_UNKNOWN_OPTION) + throw makeStringExceptionV(-1, "%s: Log query request: UNKNONW option 'CURLOPT_HTTPAUTH'!", COMPONENT_NAME); + if (curloptretcode == CURLE_NOT_BUILT_IN) + throw makeStringExceptionV(-1, "%s: Log query request: bitmask specified not built-in! 'CURLOPT_HTTPAUTH'/'CURLAUTH_BASIC'!", COMPONENT_NAME); + + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_HTTPAUTH':'CURLAUTH_BASIC'!", COMPONENT_NAME); + } + + //allow annonymous connections?? + if (isEmptyString(m_grafanaUserName.str())) + throw makeStringExceptionV(-1, "%s: Log query request: Empty user name detected!", COMPONENT_NAME); + + //allow non-secure connections?? + if (isEmptyString(m_grafanaPassword.str())) + throw makeStringExceptionV(-1, "%s: Log query request: Empty password detected!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_USERNAME, m_grafanaUserName.str())) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_USERNAME' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_PASSWORD, m_grafanaPassword.str())) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_PASSWORD' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_POST, 0) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not disable 'CURLOPT_POST' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_HTTPGET, 1) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_HTTPGET' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_NOPROGRESS, 1) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_NOPROGRESS' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_WRITEFUNCTION, stringCallback) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_WRITEFUNCTION' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_WRITEDATA, &readBuffer) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_WRITEDATA' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_USERAGENT, "HPCC Systems LogAccess client") != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_USERAGENT' option!", COMPONENT_NAME); + + if (curl_easy_setopt(curlHandle, CURLOPT_ERRORBUFFER, curlErrBuffer) != CURLE_OK) + throw makeStringExceptionV(-1, "%s: Log query request: Could not set 'CURLOPT_ERRORBUFFER' option!", COMPONENT_NAME); + + //If we set CURLOPT_FAILONERROR, we'll miss the actual error message returned in the response + //(curl_easy_setopt(curlHandle, CURLOPT_FAILONERROR, 1L) != CURLE_OK) // non HTTP Success treated as error + + try + { + curlResponseCode = curl_easy_perform(curlHandle); + } + catch (...) + { + throw makeStringExceptionV(-1, "%s LogQL request: Unknown libcurl error", COMPONENT_NAME); + } + + long response_code; + curl_easy_getinfo(curlHandle, CURLINFO_RESPONSE_CODE, &response_code); + + if (curlResponseCode != CURLE_OK || response_code != 200) + { + throw makeStringExceptionV(-1,"%s Error (%d): '%s'", COMPONENT_NAME, curlResponseCode, (readBuffer.length() != 0 ? readBuffer.c_str() : curlErrBuffer[0] ? curlErrBuffer : "Unknown Error")); + } + else if (readBuffer.length() == 0) + throw makeStringExceptionV(-1, "%s LogQL request: Empty response!", COMPONENT_NAME); + } +} + +/* + * This method consumes a JSON formatted data source response from a successful Grafana Loki query + * It extracts the data source information and populates the m_targetDataSource structure and constructs + * the URI to access the Loki API + * + * If this operation fails, an exception is thrown + */ +void GrafanaLogAccessCurlClient::processDatasourceJsonResp(const std::string & retrievedDocument) +{ + Owned tree = createPTreeFromJSONString(retrievedDocument.c_str()); + if (!tree) + throw makeStringExceptionV(-1, "%s: Could not parse data source query response!", COMPONENT_NAME); + + if (tree->hasProp("uid")) + m_targetDataSource.uid.set(tree->queryProp("uid")); + if (tree->hasProp("name")) + m_targetDataSource.name.set(tree->queryProp("name")); + if (tree->hasProp("type")) + m_targetDataSource.type.set(tree->queryProp("type")); + if (tree->hasProp("id")) + m_targetDataSource.id.set(tree->queryProp("id")); + + //Other elements that could be extracted from the data source response: + //basicAuthPassword, version, basicAuthUser, access=proxy, isDefault, withCredentials, readOnly, database + //url=http://myloki4hpcclogs:3100, secureJsonFields, user, password, basicAuth, jsonData, typeLogoUrl + + if (isEmptyString(m_targetDataSource.id.get())) + throw makeStringExceptionV(-1, "%s: DataSource query response does not include 'id'", COMPONENT_NAME); + if (isEmptyString(m_targetDataSource.type.get())) + throw makeStringExceptionV(-1, "%s: DataSource query response does not include 'type'", COMPONENT_NAME); + + //This URI is used to access the Loki API, if not properly populated, nothing will work! + m_dataSourcesAPIURI.setf("/api/datasources/proxy/%s/%s/api/v1" , m_targetDataSource.id.get(), m_targetDataSource.type.get()); +} + +/* + * This method consumes a logLine string from a successful Grafana Loki query + * The LogLine is wrapped in the desired output format + */ +void formatResultLine(StringBuffer & returnbuf, const char * resultLine, const char * resultLineName, LogAccessLogFormat format, bool & isFirstLine) +{ + switch (format) + { + case LOGACCESS_LOGFORMAT_xml: + { + returnbuf.appendf("<%s>", resultLineName); + encodeXML(resultLine, returnbuf); + returnbuf.appendf("", resultLineName); + isFirstLine = false; + break; + } + case LOGACCESS_LOGFORMAT_json: + { + if (!isFirstLine) + returnbuf.append(", "); + + returnbuf.append("\""); + encodeJSON(returnbuf,resultLine); + returnbuf.append("\""); + isFirstLine = false; + break; + } + case LOGACCESS_LOGFORMAT_csv: + { + encodeCSVColumn(returnbuf, resultLine); //Currently treating entire log line as a single CSV column + returnbuf.newline(); + isFirstLine = false; + break; + } + default: + break; + } +} + +/* + * This method consumes an Iterator of values elements from a successful Grafana Loki query + * It ignores the 1st child (ingest timestamp in ns), and formats the 2nd child (log line) into the desired format + */ +void processValues(StringBuffer & returnbuf, IPropertyTreeIterator * valuesIter, LogAccessLogFormat format, bool & isFirstLine) +{ + ForEach(*valuesIter) + { + IPropertyTree & values = valuesIter->query(); + int numofvalues = values.getCount("values"); + if (values.getCount("values") == 2) + { + //const char * insertTimeStamp = values.queryProp("values[1]"); + formatResultLine(returnbuf, values.queryProp("values[2]"), "line", format, isFirstLine); + } + else + { + throw makeStringExceptionV(-1, "%s: Detected unexpected Grafana/Loki values response format!: %s", COMPONENT_NAME, values.queryProp(".")); + } + } +} + +/* + * This starts the encapsulation of the logaccess response in the desired format + */ +inline void resultsWrapStart(StringBuffer & returnbuf, LogAccessLogFormat format, bool reportHeader) +{ + switch (format) + { + case LOGACCESS_LOGFORMAT_xml: + { + returnbuf.append(""); + break; + } + case LOGACCESS_LOGFORMAT_json: + { + returnbuf.append("{\"lines\": ["); + break; + } + case LOGACCESS_LOGFORMAT_csv: + { + if (reportHeader) + { + returnbuf.append("line"); // this is the entire header for CSV if we're only reporting the line + returnbuf.newline(); + } + break; + } + default: + break; + } +} + +/* + * This finishes the encapsulation of the logaccess response in the desired format + */ +inline void resultsWrapEnd(StringBuffer & returnbuf, LogAccessLogFormat format) +{ + switch (format) + { + case LOGACCESS_LOGFORMAT_xml: + { + returnbuf.append(""); + break; + } + case LOGACCESS_LOGFORMAT_json: + { + returnbuf.append("]}"); + break; + } + case LOGACCESS_LOGFORMAT_csv: + break; + default: + break; + } +} + +/* + * This method consumes JSON formatted elements from a successful Grafana Loki query + * It extracts all values elements processes them into the desired format + */ +void wrapResult(StringBuffer & returnbuf, IPropertyTree * result, LogAccessLogFormat format, bool & isFirstLine) +{ + Owned logLineIter; + + if (result->hasProp("values")) + { + logLineIter.setown(result->getElements("values")); + } + + processValues(returnbuf, logLineIter, format, isFirstLine); +} + +/* + * This method consumes the JSON response from a Grafana Loki query + * It attempts to unwrap the response and extract the log payload, and reports it in the desired format + */ +void GrafanaLogAccessCurlClient::processQueryJsonResp(LogQueryResultDetails & resultDetails, const std::string & retrievedDocument, StringBuffer & returnbuf, LogAccessLogFormat format, bool reportHeader) +{ + resultDetails.totalReceived = 0; + resultDetails.totalAvailable = 0; + + Owned tree = createPTreeFromJSONString(retrievedDocument.c_str()); + if (!tree) + throw makeStringExceptionV(-1, "%s: Could not parse log query response", COMPONENT_NAME); + + if (!tree->hasProp("data")) + throw makeStringExceptionV(-1, "%s: Query respose did not contain data element!", COMPONENT_NAME); + + IPropertyTree * data = tree->queryPropTree("data"); + if (!data) + throw makeStringExceptionV(-1, "%s: Could no parse data element!", COMPONENT_NAME); + + //process stats first, in case reported entries returned can help preallocate return buffer? + if (data->hasProp("stats")) + { + if (data->hasProp("stats/summary/totalEntriesReturned")) + { + resultDetails.totalReceived = data->getPropInt64("stats/summary/totalEntriesReturned"); + } + } + //should any of these query stats be reported? + /*"stats": {"summary": { "bytesProcessedPerSecond": 7187731, "linesProcessedPerSecond": 14201, + "totalBytesProcessed": 49601, "totalLinesProcessed": 98, "execTime": 0.006900786, "queueTime": 0.000045301, + "subqueries": 1, "totalEntriesReturned": 98}, + "querier": { "store": { "totalChunksRef": 1, "totalChunksDownloaded": 1, + "chunksDownloadTime": 916811, "chunk": {"headChunkBytes": 0, + "headChunkLines": 0, "decompressedBytes": 49601, + "decompressedLines": 98, "compressedBytes": 6571,"totalDuplicates": 0 }}}, + "ingester": {"totalReached": 0, "totalChunksMatched": 0, "totalBatches": 0, "totalLinesSent": 0, + "store": {"totalChunksRef": 0, "totalChunksDownloaded": 0, "chunksDownloadTime": 0, + "chunk": {"headChunkBytes": 0,"headChunkLines": 0,"decompressedBytes": 0, + "decompressedLines": 0,"compressedBytes": 0, "totalDuplicates": 0 }}}*/ + + if (data->hasProp("result")) //if no data, empty query rep + { + returnbuf.ensureCapacity(retrievedDocument.length());// this is difficult to predict, at least the size of the response? + //Adds the format prefix to the return buffer + resultsWrapStart(returnbuf, format, reportHeader); + + bool isFirstLine = true; + Owned resultIter = data->getElements("result"); + //many result elements can be returned, each with a unique set of labels + ForEach(*resultIter) + { + IPropertyTree & result = resultIter->query(); + wrapResult(returnbuf, &result, format, isFirstLine); + } + + //Adds the format postfix to the return buffer + resultsWrapEnd(returnbuf, format); + } +} + +/* + * This method constructs a query string for Grafana to provide all info for a given data source + * The method attemps to populate the m_targetDataSource structure with the data source information + */ +void GrafanaLogAccessCurlClient::fetchDatasourceByName(const char * targetDataSourceName) +{ + DBGLOG("%s: Fetching data source by name: '%s'", COMPONENT_NAME, targetDataSourceName); + if (isEmptyString(targetDataSourceName)) + throw makeStringExceptionV(-1, "%s: fetchDatasourceByName: Empty data source name!", COMPONENT_NAME); + + std::string readBuffer; + VStringBuffer targetURI("/api/datasources/name/%s", targetDataSourceName); + submitQuery(readBuffer, targetURI.str()); + processDatasourceJsonResp(readBuffer); +} + +/* +* sumbits a Grafana Loki query to fetch all available datasources +* The response is expected to be a JSON formatted list of datasources +*/ +void GrafanaLogAccessCurlClient::fetchDatasources(std::string & readBuffer) +{ + submitQuery(readBuffer, "/"); +} + +/* +* sumbits a Grafana Loki query to fetch all labels +* The response is expected to be a JSON formatted list of labels +*/ +void GrafanaLogAccessCurlClient::fetchLabels(std::string & readBuffer) +{ + submitQuery(readBuffer, "/label"); +} + +/* + * Creates query filter and stream selector strings for the LogQL query based on the filter options provided +*/ +void GrafanaLogAccessCurlClient::populateQueryFilterAndStreamSelector(StringBuffer & queryString, StringBuffer & streamSelector, const ILogAccessFilter * filter) +{ + if (filter == nullptr) + throw makeStringExceptionV(-1, "%s: Null filter detected while creating LogQL query string", COMPONENT_NAME); + + const char * queryOperator = " |~ "; + StringBuffer queryValue; + StringBuffer streamField; + StringBuffer queryField; + + filter->toString(queryValue); + switch (filter->filterType()) + { + case LOGACCESS_FILTER_jobid: + { + DBGLOG("%s: Searching log entries by jobid: '%s'...", COMPONENT_NAME, queryValue.str()); + break; + } + case LOGACCESS_FILTER_class: + { + DBGLOG("%s: Searching log entries by class: '%s'...", COMPONENT_NAME, queryValue.str()); + break; + } + case LOGACCESS_FILTER_audience: + { + DBGLOG("%s: Searching log entries by target audience: '%s'...", COMPONENT_NAME, queryValue.str()); + break; + } + case LOGACCESS_FILTER_component: + { + if (m_componentsColumn.isStream) + streamField = m_componentsColumn.name; + + DBGLOG("%s: Searching '%s' component log entries...", COMPONENT_NAME, queryValue.str()); + break; + } + case LOGACCESS_FILTER_instance: + { + if (m_instanceColumn.isStream) + streamField = m_instanceColumn.name; + + DBGLOG("%s: Searching log entries by HPCC component instance: '%s'", COMPONENT_NAME, queryValue.str() ); + break; + } + case LOGACCESS_FILTER_wildcard: + { + if (queryValue.isEmpty()) + throw makeStringExceptionV(-1, "%s: Wildcard filter cannot be empty!", COMPONENT_NAME); + + DBGLOG("%s: Searching log entries by wildcard filter: '%s %s %s'...", COMPONENT_NAME, queryField.str(), queryOperator, queryValue.str()); + break; + } + case LOGACCESS_FILTER_or: + case LOGACCESS_FILTER_and: + { + StringBuffer op(logAccessFilterTypeToString(filter->filterType())); + queryString.append(" ( "); + populateQueryFilterAndStreamSelector(queryString, streamSelector, filter->leftFilterClause()); + queryString.append(" "); + queryString.append(op.toLowerCase()); //LogQL or | and + queryString.append(" "); + populateQueryFilterAndStreamSelector(queryString, streamSelector, filter->rightFilterClause()); + queryString.append(" ) "); + return; // queryString populated, need to break out + } + case LOGACCESS_FILTER_pod: + { + if (m_podColumn.isStream) + streamField = m_podColumn.name; + + DBGLOG("%s: Searching log entries by Pod: '%s'", COMPONENT_NAME, queryValue.str() ); + break; + } + case LOGACCESS_FILTER_column: + { + if (filter->getFieldName() == nullptr) + throw makeStringExceptionV(-1, "%s: empty field name detected in filter by column!", COMPONENT_NAME); + break; + } + //case LOGACCESS_FILTER_trace: + //case LOGACCESS_FILTER_span: + default: + throw makeStringExceptionV(-1, "%s: Unknown query criteria type encountered: '%s'", COMPONENT_NAME, queryValue.str()); + } + + //We're constructing two clauses, the stream selector and the query filter + //the streamSelector is a comma separated list of key value pairs + if (!streamField.isEmpty()) + { + if (!streamSelector.isEmpty()) + streamSelector.append(", "); + + streamSelector.appendf(" %s=\"%s\" ", streamField.str(), queryValue.str()); + } + else + { + //the query filter is a sequence of expressions seperated by a logical operator + queryString.append(" ").append(queryField.str()).append(queryOperator); + if (strcmp(m_expectedLogFormat, "table")==0) + queryString.append(" \"").append(queryValue.str()).append("\" "); + else + queryString.append("\"").append(queryValue.str()).append("\""); + } +} + +/* +Translates LogAccess defined SortBy direction enum value to +the LogQL/Loki counterpart +*/ +const char * sortByDirection(SortByDirection direction) +{ + switch (direction) + { + case SORTBY_DIRECTION_ascending: + return "FORWARD"; + case SORTBY_DIRECTION_descending: + case SORTBY_DIRECTION_none: + default: + return "BACKWARD"; + } +} + +/* +* Constructs LogQL query based on filter options, and sets Loki specific query parameters, + submits query, processes responce and returns the log entries in the desired format +*/ +bool GrafanaLogAccessCurlClient::fetchLog(LogQueryResultDetails & resultDetails, const LogAccessConditions & options, StringBuffer & returnbuf, LogAccessLogFormat format) +{ + try + { + resultDetails.totalReceived = 0; + resultDetails.totalAvailable = 0; + + const LogAccessTimeRange & trange = options.getTimeRange(); + if (trange.getStartt().isNull()) + throw makeStringExceptionV(-1, "%s: start time must be provided!", COMPONENT_NAME); + + StringBuffer fullQuery; + fullQuery.set("/query_range?"); + + if (options.getSortByConditions().length() > 0) + { + if (options.getSortByConditions().length() > 1) + UWARNLOG("%s: LogQL sorting is only supported by one field!", COMPONENT_NAME); + + SortByCondition condition = options.getSortByConditions().item(0); + switch (condition.byKnownField) + { + case LOGACCESS_MAPPEDFIELD_timestamp: + break; + case LOGACCESS_MAPPEDFIELD_jobid: + case LOGACCESS_MAPPEDFIELD_component: + case LOGACCESS_MAPPEDFIELD_class: + case LOGACCESS_MAPPEDFIELD_audience: + case LOGACCESS_MAPPEDFIELD_instance: + case LOGACCESS_MAPPEDFIELD_host: + case LOGACCESS_MAPPEDFIELD_unmapped: + default: + throw makeStringExceptionV(-1, "%s: LogQL sorting is only supported by ingest timestamp!", COMPONENT_NAME); + } + + const char * direction = sortByDirection(condition.direction); + if (!isEmptyString(direction)) + fullQuery.appendf("direction=%s", direction); + } + + fullQuery.append("&limit=").append(std::to_string(options.getLimit()).c_str()); + fullQuery.append("&query="); + //At this point the log field appears as a detected field and is not formated + // Detected fields + //if output is json: + // log "{ \"MSG\": \"QueryFilesInUse.unsubscribe() called\", \"MID\": \"104\", \"AUD\": \"USR\", \"CLS\": \"PRO\", \"DATE\": \"2024-06-06\", \"TIME\": \"22:03:00.229\", \"PID\": \"8\", \"TID\": \"8\", \"JOBID\": \"UNK\" }\n" + //if output is table: + // log "00000174 USR PRO 2024-06-19 19:20:58.089 8 160 UNK \"WUUpdate: W20240619-192058\"\n" + // stream "stderr" + // time "2024-06-06T22:03:00.230759942Z" + // ts 2024-06-06T22:03:00.382Z + // tsNs 1717711380382410602 + + StringBuffer logLineParser; + //from https://grafana.com/docs/loki/latest/query/log_queries/ + //Adding | json to your pipeline will extract all json properties as labels if the log line is a valid json document. Nested properties are flattened into label keys using the _ separator. + logLineParser.set(" | json log"); //this parses the log entry and extracts the log field into a label + logLineParser.append(" | line_format \"{{.log}}\""); //Formats output line to only contain log label + //This drops the stream, and various insert timestamps + + //we're always going to get a stream container, and a the log line... + //the stream container contains unnecessary, and redundant lines + //there's documentation of a 'drop' command whch doesn't work in practice + //online recomendation is to clear those stream entries... + logLineParser.append(" | label_format log=\"\", filename=\"\", namespace=\"\", node_name=\"\", job=\"\"");// app=\"\", component=\"\", container=\"\", instance=\"\"); + + /* we're not going to attempt to parse the log line for now, + return the entire log line in raw format + if (strcmp(m_expectedLogFormat.get(), "json") == 0) + { + logLineParser.append( " | json "); + //at this point, the stream "log" looks like this: + // { "MSG": "ESP server started.", "MID": "89", "AUD": "PRG", "CLS": "INF", "DATE": "2024-06-19", "TIME": "14:56:36.648", "PID": "8", "TID": "8", "JOBID": "UNK" } + //no need to format "log" into json + logLineParser.append(" | line_format \"{{.log}}\""); + } + else + { + //parses log into individual fields as labels + logLineParser.append(" | pattern \" \""); + //the "pattern" parser is not reliable, sensitive to number of spaces, and the order of the fields + + //do we want to manually format the return format at the server? + logLineParser.append(" | line_format \"{ \\\"MID\\\":\\\"{{.MID}}\\\", \\\"AUD\\\":\\\"{{.AUD}}\\\", \\\"MSG\\\":\\\"{{.MSG}}\\\" }\""); + } + */ + + //if we parse the logline as above, We could control the individual fields returned + //HPCC_LOG_TYPE="CLS", HPCC_LOG_MESSAGE="MSG", HPCC_LOG_JOBID="JOBID" | HPCC_LOG_JOBID="UNK" + + //"All LogQL queries contain a log stream selector." - https://grafana.com/docs/loki/latest/query/log_queries/ + StringBuffer streamSelector; + StringBuffer queryFilter; + populateQueryFilterAndStreamSelector(queryFilter, streamSelector, options.queryFilter()); + if (!streamSelector.isEmpty()) + streamSelector.append(", "); + + streamSelector.appendf("namespace=\"%s\"", m_targetNamespace.get()); + + fullQuery.append("{"); + encodeURL(fullQuery, streamSelector.str()); + fullQuery.append("}"); + encodeURL(fullQuery, queryFilter.str()); + encodeURL(fullQuery, logLineParser.str()); + + fullQuery.appendf("&start=%s000000000", std::to_string(trange.getStartt().getSimple()).c_str()); + if (trange.getEndt().isNull() != -1) //aka 'to' has been initialized + { + fullQuery.appendf("&end=%s000000000", std::to_string(trange.getEndt().getSimple()).c_str()); + } + + DBGLOG("FetchLog query: %s", fullQuery.str()); + + std::string readBuffer; + submitQuery(readBuffer, fullQuery.str()); + + processQueryJsonResp(resultDetails, readBuffer, returnbuf, format, true); + //DBGLOG("Query fetchLog result: %s", readBuffer.c_str()); + } + catch(IException * e) + { + StringBuffer description; + IERRLOG("%s: query exception: (%d) - %s", COMPONENT_NAME, e->errorCode(), e->errorMessage(description).str()); + e->Release(); + } + return false; +} + +GrafanaLogAccessCurlClient::GrafanaLogAccessCurlClient(IPropertyTree & logAccessPluginConfig) +{ + m_pluginCfg.set(&logAccessPluginConfig); + + const char * protocol = logAccessPluginConfig.queryProp("connection/@protocol"); + const char * host = logAccessPluginConfig.queryProp("connection/@host"); + const char * port = logAccessPluginConfig.queryProp("connection/@port"); + + m_grafanaConnectionStr = isEmptyString(protocol) ? DEFAULT_GRAFANA_PROTOCOL : protocol; + m_grafanaConnectionStr.append("://"); + m_grafanaConnectionStr.append(isEmptyString(host) ? DEFAULT_GRAFANA_HOST : host); + m_grafanaConnectionStr.append(":").append((!port || !*port) ? DEFAULT_GRAFANA_PORT : port); + + m_targetDataSource.id.set(logAccessPluginConfig.hasProp("datasource/@id") ? logAccessPluginConfig.queryProp("datasource/@id") : DEFAULT_DATASOURCE_ID); + m_targetDataSource.name.set(logAccessPluginConfig.hasProp("datasource/@name") ? logAccessPluginConfig.queryProp("datasource/@name") : DEFAULT_DATASOURCE_NAME); + + if (logAccessPluginConfig.hasProp("namespace/@name")) + { + m_targetNamespace.set(logAccessPluginConfig.queryProp("namespace/@name")); + } + + if (isEmptyString(m_targetNamespace.get())) + { + m_targetNamespace.set(defaultNamespaceStream); + OWARNLOG("%s: No namespace specified! Loki logaccess should target non-default namespaced logs!!!", COMPONENT_NAME); + } + + Owned secretTree = getSecret("esp", "grafana-logaccess"); + if (secretTree) + { + DBGLOG("Grafana LogAccess: loading esp/grafana-logaccess secret"); + + getSecretKeyValue(m_grafanaUserName.clear(), secretTree, "username"); + if (isEmptyString(m_grafanaUserName.str())) + throw makeStringExceptionV(-1, "%s: Empty Grafana user name detected!", COMPONENT_NAME); + + getSecretKeyValue(m_grafanaPassword.clear(), secretTree, "password"); + if (isEmptyString(m_grafanaPassword.str())) + throw makeStringExceptionV(-1, "%s: Empty Grafana password detected!", COMPONENT_NAME); + } + else + { + DBGLOG("%s: could not load esp/grafana-logaccess secret", COMPONENT_NAME); + } + + if (isEmptyString(m_grafanaUserName.str()) || isEmptyString(m_grafanaPassword.str())) + { + OWARNLOG("%s: Grafana credentials not found in secret, searching in grafana logaccess configuration", COMPONENT_NAME); + + if (logAccessPluginConfig.hasProp("connection/@username")) + m_grafanaUserName.set(logAccessPluginConfig.queryProp("connection/@username")); + + if (logAccessPluginConfig.hasProp("connection/@password")) + m_grafanaPassword.set(logAccessPluginConfig.queryProp("connection/@password")); + } + + //this is very important, without this, we can't target the correct datasource + fetchDatasourceByName(m_targetDataSource.name.get()); + + std::string availableLabels; + fetchLabels(availableLabels); + DBGLOG("%s: Available labels on target loki/grafana: %s", COMPONENT_NAME, availableLabels.c_str()); + + m_expectedLogFormat = defaultExpectedLogFormat; + if (logAccessPluginConfig.hasProp("logFormat/@type")) + { + m_expectedLogFormat.set(logAccessPluginConfig.queryProp("logFormat/@type")); + } + + Owned logMapIter = m_pluginCfg->getElements("logMaps"); + ForEach(*logMapIter) + { + IPropertyTree & logMap = logMapIter->query(); + const char * logMapType = logMap.queryProp("@type"); + if (streq(logMapType, "global")) + { + if (logMap.hasProp(logMapIndexPatternAtt)) + if (strcmp(logMap.queryProp(logMapIndexPatternAtt), "stream")==0) + m_globalSearchCol.isStream = true; + + if (logMap.hasProp(logMapSearchColAtt)) + m_globalSearchCol.name = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "workunits")) + { + if (logMap.hasProp(logMapSearchColAtt)) + m_workunitsColumn = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "components")) + { + if (logMap.hasProp(logMapIndexPatternAtt)) + if (strcmp(logMap.queryProp(logMapIndexPatternAtt), "stream")==0) + m_componentsColumn.isStream = true; + + if (logMap.hasProp(logMapSearchColAtt)) + m_componentsColumn.name = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "class")) + { + if (logMap.hasProp(logMapSearchColAtt)) + m_classColumn = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "audience")) + { + if (logMap.hasProp(logMapSearchColAtt)) + m_audienceColumn = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "instance")) + { + if (logMap.hasProp(logMapIndexPatternAtt)) + if (strcmp(logMap.queryProp(logMapIndexPatternAtt), "stream")==0) + m_instanceColumn.isStream = true; + + if (logMap.hasProp(logMapSearchColAtt)) + m_instanceColumn.name = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "node")) + { + if (logMap.hasProp(logMapIndexPatternAtt)) + if (strcmp(logMap.queryProp(logMapIndexPatternAtt), "stream")==0) + m_nodeColumn.isStream = true; + + if (logMap.hasProp(logMapSearchColAtt)) + m_nodeColumn.name = logMap.queryProp(logMapSearchColAtt); + } + else if (streq(logMapType, "host")) + { + OWARNLOG("%s: 'host' LogMap entry is NOT supported!", COMPONENT_NAME); + } + else if (streq(logMapType, "pod")) + { + if (logMap.hasProp(logMapIndexPatternAtt)) + if (strcmp(logMap.queryProp(logMapIndexPatternAtt), "stream")==0) + m_podColumn.isStream = true; + + if (logMap.hasProp(logMapSearchColAtt)) + m_podColumn.name = logMap.queryProp(logMapSearchColAtt); + } + else + { + ERRLOG("Encountered invalid LogAccess field map type: '%s'", logMapType); + } + } + + DBGLOG("%s: targeting: '%s' - datasource: '%s'", COMPONENT_NAME, m_grafanaConnectionStr.str(), m_dataSourcesAPIURI.str()); +} + +class GrafanaLogaccessStream : public CInterfaceOf +{ +public: + virtual bool readLogEntries(StringBuffer & record, unsigned & recsRead) override + { + DBGLOG("%s: GrafanaLogaccessStream readLogEntries called", COMPONENT_NAME); + LogQueryResultDetails resultDetails; + m_remoteLogAccessor->fetchLog(resultDetails, m_options, record, m_outputFormat); + recsRead = resultDetails.totalReceived; + DBGLOG("%s: GrafanaLogaccessStream readLogEntries returned %d records", COMPONENT_NAME, recsRead); + + return false; + } + + GrafanaLogaccessStream(IRemoteLogAccess * grafanaQueryClient, const LogAccessConditions & options, LogAccessLogFormat format, unsigned int pageSize) + { + DBGLOG("%s: GrafanaLogaccessStream created", COMPONENT_NAME); + m_remoteLogAccessor.set(grafanaQueryClient); + m_outputFormat = format; + m_pageSize = pageSize; + m_options = options; + } + +private: + unsigned int m_pageSize; + bool m_hasBeenScrolled = false; + LogAccessLogFormat m_outputFormat; + LogAccessConditions m_options; + Owned m_remoteLogAccessor; +}; + +IRemoteLogAccessStream * GrafanaLogAccessCurlClient::getLogReader(const LogAccessConditions & options, LogAccessLogFormat format) +{ + return getLogReader(options, format, defaultMaxRecordsPerFetch); +} + +IRemoteLogAccessStream * GrafanaLogAccessCurlClient::getLogReader(const LogAccessConditions & options, LogAccessLogFormat format, unsigned int pageSize) +{ + return new GrafanaLogaccessStream(this, options, format, pageSize); +} + +extern "C" IRemoteLogAccess * createInstance(IPropertyTree & logAccessPluginConfig) +{ + return new GrafanaLogAccessCurlClient(logAccessPluginConfig); +} \ No newline at end of file diff --git a/system/logaccess/Grafana/CurlClient/GrafanaCurlClient.hpp b/system/logaccess/Grafana/CurlClient/GrafanaCurlClient.hpp new file mode 100644 index 00000000000..fb6f71cff98 --- /dev/null +++ b/system/logaccess/Grafana/CurlClient/GrafanaCurlClient.hpp @@ -0,0 +1,108 @@ +/*############################################################################## + + HPCC SYSTEMS software Copyright (C) 2024 HPCC Systems®. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +############################################################################## */ + +#pragma once + +#include "jlog.hpp" +#include "jlog.ipp" +#include "jptree.hpp" +#include "jstring.hpp" +#include +#include "jsecrets.hpp" + +#define COMPONENT_NAME "GrafanaLogAccessCurlClient" + +static constexpr const char * DEFAULT_DATASOURCE_NAME = "Loki"; +static constexpr const char * DEFAULT_DATASOURCE_TYPE = "loki"; +static constexpr const char * DEFAULT_DATASOURCE_INDEX = "1"; + +struct GrafanaDataSource +{ + StringAttr type = DEFAULT_DATASOURCE_TYPE; + StringAttr name = DEFAULT_DATASOURCE_NAME; + StringAttr id = DEFAULT_DATASOURCE_INDEX; + StringAttr uid; + //Other Grafana datasource attributes: + //basicAuthPassword, version, basicAuthUser, access = proxy, isDefault + //withCredentials, url http://myloki4hpcclogs:3100, secureJsonFields + //user, password, basicAuth, jsonData, typeLogoUrl, readOnly, database +}; + +struct LogField +{ + StringAttr name; + bool isStream; + LogField(const char * name, bool isStream = false) : name(name), isStream(isStream) {} +}; + +static constexpr int defaultEntryLimit = 100; +static constexpr int defaultEntryStart = 0; + +class GrafanaLogAccessCurlClient : public CInterfaceOf +{ +private: + static constexpr const char * type = "grafanaloganalyticscurl"; + Owned m_pluginCfg; + StringBuffer m_grafanaConnectionStr; + GrafanaDataSource m_targetDataSource; + + StringBuffer m_grafanaUserName; + StringBuffer m_grafanaPassword; + StringBuffer m_dataSourcesAPIURI; + StringAttr m_targetNamespace; + + LogField m_globalSearchCol = LogField("log"); + LogField m_workunitsColumn = LogField("JOBID"); + LogField m_componentsColumn = LogField("component", true); + LogField m_audienceColumn = LogField("AUD"); + LogField m_classColumn = LogField("CLS"); + LogField m_instanceColumn = LogField("instance", true); + LogField m_podColumn = LogField("pod", true); + LogField m_containerColumn = LogField("container", true); + LogField m_messageColumn = LogField("MSG"); + LogField m_nodeColumn = LogField("node_name", true); + LogField m_logTimestampColumn = LogField("TIME"); + LogField m_logDatestampColumn = LogField("DATE"); + LogField m_logSequesnceColumn = LogField("MID"); + LogField m_logProcIDColumn = LogField("PID"); + LogField m_logThreadIDColumn = LogField("TID"); + //LogField m_logTraceIDColumn = LogField("TRC"); + //LogField m_logSpanIDColumn = LogField("SPN"); + + StringAttr m_expectedLogFormat; //json|table|xml + +public: + GrafanaLogAccessCurlClient(IPropertyTree & logAccessPluginConfig); + void processQueryJsonResp(LogQueryResultDetails & resultDetails, const std::string & retrievedDocument, StringBuffer & returnbuf, LogAccessLogFormat format, bool reportHeader); + void processDatasourceJsonResp(const std::string & retrievedDocument); + void fetchDatasourceByName(const char * targetDataSourceName); + void fetchDatasources(std::string & readBuffer); + void fetchLabels(std::string & readBuffer); + void submitQuery(std::string & readBuffer, const char * targetURI); + + void populateQueryFilterAndStreamSelector(StringBuffer & queryString, StringBuffer & streamSelector, const ILogAccessFilter * filter); + static void timestampQueryRangeString(StringBuffer & range, std::time_t from, std::time_t to); + + // IRemoteLogAccess methods + virtual bool fetchLog(LogQueryResultDetails & resultDetails, const LogAccessConditions & options, StringBuffer & returnbuf, LogAccessLogFormat format) override; + virtual const char * getRemoteLogAccessType() const override { return type; } + virtual IPropertyTree * queryLogMap() const override { return m_pluginCfg->queryPropTree(""); } + virtual const char * fetchConnectionStr() const override { return m_grafanaConnectionStr.str(); } + virtual IRemoteLogAccessStream * getLogReader(const LogAccessConditions & options, LogAccessLogFormat format) override; + virtual IRemoteLogAccessStream * getLogReader(const LogAccessConditions & options, LogAccessLogFormat format, unsigned int pageSize) override; + virtual bool supportsResultPaging() const override { return false;} +}; \ No newline at end of file diff --git a/testing/unittests/jlibtests.cpp b/testing/unittests/jlibtests.cpp index d5cca3b2864..cc72e5adbd6 100644 --- a/testing/unittests/jlibtests.cpp +++ b/testing/unittests/jlibtests.cpp @@ -68,7 +68,6 @@ class JlibTraceTest : public CppUnit::TestFixture CPPUNIT_TEST(manualTestScopeEnd); CPPUNIT_TEST(testActiveSpans); CPPUNIT_TEST(testSpanFetchMethods); - //CPPUNIT_TEST(testJTraceJLOGExporterprintResources); //CPPUNIT_TEST(testJTraceJLOGExporterprintAttributes); CPPUNIT_TEST(manualTestsDeclaredSpanStartTime); @@ -826,6 +825,30 @@ class JlibTraceTest : public CppUnit::TestFixture CPPUNIT_TEST_SUITE_REGISTRATION( JlibTraceTest ); CPPUNIT_TEST_SUITE_NAMED_REGISTRATION( JlibTraceTest, "JlibTraceTest" ); +class JlibStringTest : public CppUnit::TestFixture +{ +public: + CPPUNIT_TEST_SUITE(JlibStringTest); + CPPUNIT_TEST(testEncodeCSVColumn); + CPPUNIT_TEST_SUITE_END(); + +protected: +void testEncodeCSVColumn() + { + const char * csvCol1 = "hello,world"; + StringBuffer encodedCSV; + encodeCSVColumn(encodedCSV, csvCol1); + CPPUNIT_ASSERT_EQUAL_STR(encodedCSV.str(), "\"hello,world\""); + + const char * csvCol2 = "hello world, \"how are you?\""; + encodedCSV.clear(); + encodeCSVColumn(encodedCSV, csvCol2); + CPPUNIT_ASSERT_EQUAL_STR(encodedCSV.str(), "\"hello world, \"\"how are you?\"\"\""); + } +}; + +CPPUNIT_TEST_SUITE_REGISTRATION( JlibStringTest ); +CPPUNIT_TEST_SUITE_NAMED_REGISTRATION( JlibStringTest, "JlibStringTest" ); class JlibSemTest : public CppUnit::TestFixture { From 9c1debad5b1c32d1ba08ababddbc90e215e68cc1 Mon Sep 17 00:00:00 2001 From: Jake Smith Date: Thu, 27 Jun 2024 20:25:21 +0100 Subject: [PATCH 124/151] HPCC-32174 Incorrect epoll event added for non-ssl rowservice Signed-off-by: Jake Smith --- fs/dafsserver/dafsserver.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/dafsserver/dafsserver.cpp b/fs/dafsserver/dafsserver.cpp index 44ee5a96cf0..282559612fe 100644 --- a/fs/dafsserver/dafsserver.cpp +++ b/fs/dafsserver/dafsserver.cpp @@ -5627,7 +5627,7 @@ class CRemoteFileServer : implements IRemoteFileServer, public CInterface eps.getEndpointHostText(peerURL.clear()); PROGLOG("Server accepting row service socket from %s", peerURL.str()); #endif - addClient(acceptedRSSock.getClear(), true, true); + addClient(acceptedRSSock.getClear(), rowServiceSSL, true); } } else From 1434740768259ab0df4ad473cf5f0b30f9c97f57 Mon Sep 17 00:00:00 2001 From: Kunal Aswani Date: Thu, 27 Jun 2024 16:22:00 -0400 Subject: [PATCH 125/151] HPCC-32155 PT-BR Translations 9.8.x Translations add for Brazilian Portuguese (PT-BR) Signed-off-by: Kunal Aswani --- esp/src/src/nls/pt-br/hpcc.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/esp/src/src/nls/pt-br/hpcc.ts b/esp/src/src/nls/pt-br/hpcc.ts index a6c24fdd548..a60183f5a5c 100644 --- a/esp/src/src/nls/pt-br/hpcc.ts +++ b/esp/src/src/nls/pt-br/hpcc.ts @@ -412,6 +412,8 @@ IgnoreGlobalStoreOutEdges: "Ignorar Global Store Out Edges", Import: "Importar", Inactive: "Inativo", + IncludePerComponentLogs: "Incluir logs por componente", + IncludeRelatedLogs: "Incluir logs relacionados", IncludeSlaveLogs: "Incluie logs escravos", IncludeSubFileInfo: "Incluir informações de subarquivo?", Index: "Índice", @@ -583,6 +585,7 @@ Newest: "Mais recente", NewPassword: "Senha Nova", NextSelection: "Próxima Seleção", + NextWorkunit: "Próxima WU", NoCommon: "Incomum", NoContent: "(sem conteúdo)", NoContentPleaseSelectItem: "Sem conteúdo - selecione um item", @@ -715,6 +718,7 @@ PressCtrlCToCopy: "Pressione ctrl + c para copiar.", Preview: "Prévia", PreviousSelection: "Seleção Anterior", + PreviousWorkunit: "WU anterior", PrimaryLost: "Principal Perdido", PrimaryMonitoring: "Monitorador Principal", Priority: "Prioridade", @@ -836,6 +840,7 @@ Save: "Salvar", Scope: "Escopo", SearchResults: "Resultado da Busca", + Seconds: "Segundos", SecondsRemaining: "Segundos Restantes", Security: "Segurança", SecurityMessageHTML: "Visualizar apenas HTML de usuários confiáveis. Este workunit foi criada por ‘{__placeholder__}’. Renderizar HTML?", @@ -935,6 +940,7 @@ SVGSource: "Fonte de SVG", Sync: "Sync", SyncSelection: "Sincronizar seleção", + Syntax: "Sintaxe", SystemServers: "Servidores do Sistema", Table: "Tabela", tag: "tag", @@ -962,6 +968,7 @@ TimeMaxTotalExecuteMinutes: "Tempo Máximo Total de Minutos de Execução", TimeMeanTotalExecuteMinutes: "Tempo Médio Total de Minutos de Execução", TimeMinTotalExecuteMinutes: "Tempo Min, Total de Minutos de Execução", + TimePenalty: "Penalidade de tempo", Timers: "Controladores de tempo", TimeSeconds: "Tempo (Segundos)", TimeStamp: "Timestamp", @@ -1130,6 +1137,7 @@ WildcardFilter: "Filtro Curinga", Workflows: "Fluxo de Trabalho", Workunit: "Tarefa", + WorkunitNotFound: "WU não encontrada", Workunits: "Tarefas", WorkUnitScopeDefaultPermissions: "Permisos por defect de alcaces de Workunit", Wrap: "Embrulho", From b570bd80ea3c0b66fe6c9b7d6a12054e849eb881 Mon Sep 17 00:00:00 2001 From: Kunal Aswani Date: Thu, 27 Jun 2024 16:35:50 -0400 Subject: [PATCH 126/151] HPCC-32157 ZH Translations 9.8.x Translations added for Chinese (ZH). Signed-off-by: Kunal Aswani --- esp/src/src/nls/zh/hpcc.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/esp/src/src/nls/zh/hpcc.ts b/esp/src/src/nls/zh/hpcc.ts index 65cd9273116..eb8e7e677aa 100644 --- a/esp/src/src/nls/zh/hpcc.ts +++ b/esp/src/src/nls/zh/hpcc.ts @@ -412,6 +412,8 @@ IgnoreGlobalStoreOutEdges: "省略全局存储活动的边线", Import: "输入", Inactive: "未激活", + IncludePerComponentLogs: "包括每个组件的日志", + IncludeRelatedLogs: "包扩相关日志", IncludeSlaveLogs: "包括从属服务器日志", IncludeSubFileInfo: "包括子文件信息", Index: "索引", @@ -583,6 +585,7 @@ Newest: "最新", NewPassword: "新密码", NextSelection: "下一个选择", + NextWorkunit: "下一个Workunit", NoCommon: "非常用", NoContent: "(无内容)", NoContentPleaseSelectItem: "无内容,请选择一个项目", @@ -715,6 +718,7 @@ PressCtrlCToCopy: "用ctrl+c键复制", Preview: "预览", PreviousSelection: "上一个选择", + PreviousWorkunit: "前一个Workunit", PrimaryLost: "主要丢失", PrimaryMonitoring: "主监控", Priority: "优先级", @@ -836,6 +840,7 @@ Save: "保存", Scope: "范围", SearchResults: "查询结果", + Seconds: "秒", SecondsRemaining: "剩余时间(秒)", Security: "安全", SecurityMessageHTML: "仅查看来自受信任用户的 HTML。该工作单元由 {__placeholder__}. 生成HTML?", @@ -934,6 +939,7 @@ SuspendedReason: "暂停使用的原因", SVGSource: "SVG原始数据", SyncSelection: "选择同步", + Syntax: "语法", SystemServers: "系统服务器", Table: "表", tag: "标记", @@ -961,6 +967,7 @@ TimeMaxTotalExecuteMinutes: "总运行时间最大值(分钟)", TimeMeanTotalExecuteMinutes: "总运行时间均值(分钟)", TimeMinTotalExecuteMinutes: "总运行时间最小值(分钟)", + TimePenalty: "时间惩罚", Timers: "定时器", TimeSeconds: "时间(秒)", TimeStamp: "时间戳", @@ -1128,6 +1135,7 @@ WildcardFilter: "筛选关键字", Workflows: "工作流程", Workunit: "工作单元", + WorkunitNotFound: "无法找到Workunit", Workunits: "工作单元", WorkUnitScopeDefaultPermissions: "工作单元默认权限", Wrap: "自动换行", From ce48f75f6ce298bd86161fcfa39fba88c8b8cf58 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 28 Jun 2024 08:35:48 +0100 Subject: [PATCH 127/151] HPCC-32172 ECL Playground results flickering Flipping between scroll and no scroll Signed-off-by: Gordon Smith --- esp/src/src-react/components/ECLPlayground.tsx | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/esp/src/src-react/components/ECLPlayground.tsx b/esp/src/src-react/components/ECLPlayground.tsx index 665e0c699ad..9fe22b20ec7 100644 --- a/esp/src/src-react/components/ECLPlayground.tsx +++ b/esp/src/src-react/components/ECLPlayground.tsx @@ -512,18 +512,18 @@ export const ECLPlayground: React.FunctionComponent = (props
- - {outputMode === OutputMode.ERRORS ? ( - + +
+ {outputMode === OutputMode.ERRORS ? ( + - ) : outputMode === OutputMode.RESULTS ? ( - + ) : outputMode === OutputMode.RESULTS ? ( + - ) : outputMode === OutputMode.VIS ? ( -
+ ) : outputMode === OutputMode.VIS ? ( -
- ) : null} + ) : null} +
; From 16ab6898a919cf9774809407a2ad08573c5857d9 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 28 Jun 2024 12:34:54 +0100 Subject: [PATCH 128/151] Split off 9.6.26 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 68c7fe4b3da..ea41709c68c 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.6.25-closedown0 +version: 9.6.27-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.6.25-closedown0 +appVersion: 9.6.27-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index f964c2fe1ca..4f650bbbd0e 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1523,7 +1523,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index c17ffe5e3c3..2f761c8f4ac 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index ef506e3dd4a..fdbbe268fc0 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index f4d23e11edd..998b361af9d 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index afc9f83e710..6f875530323 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -139,7 +139,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 70b356ab907..da2ba3beae5 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -147,7 +147,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index be9d9f553bf..8dc0b418482 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index fe9260babc3..a1570fe8acc 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -125,7 +125,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index bc262ad6ac0..5e9cb065aac 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 82d5c1f2e86..867edf1e9f0 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 8e0d6a593be..08641b21ea6 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 25b3c5fea31..3c3505a8c98 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -88,7 +88,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -153,7 +153,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -220,7 +220,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -353,7 +353,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -418,7 +418,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.6.25-closedown0 + helmVersion: 9.6.27-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index ff08503f2e4..3ffe34264d4 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 6 ) -set ( HPCC_POINT 25 ) +set ( HPCC_POINT 27 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-20T15:58:07Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-28T11:34:54Z" ) ### From f196e5553a0dde44117f817cf8f207b8f2579cc1 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 28 Jun 2024 12:37:40 +0100 Subject: [PATCH 129/151] Split off 9.4.74 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index c194d77e8d6..63368e54cce 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.4.73-closedown0 +version: 9.4.75-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.4.73-closedown0 +appVersion: 9.4.75-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index ab3e33f4178..225a27e6a17 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1519,7 +1519,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index a50440cf06b..71fbbe75ba9 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index 21ade987987..2b61cc50e8f 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 36fdadbb6cf..e6b3ca0c48f 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 7b86a7449d1..33e874fa680 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -60,7 +60,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -137,7 +137,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 882b2b7f63c..952d68140c3 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -143,7 +143,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index c69b739f350..12f07079b7d 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index c28acbaaa8f..11064a7e82f 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -122,7 +122,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index e5621197105..eccf6fb914c 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 1c740401a65..02f2ef84769 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 01d5361a5b5..2eeccbb7348 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 8656e97f866..e8f015076d1 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -86,7 +86,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -151,7 +151,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -218,7 +218,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -351,7 +351,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -416,7 +416,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.4.73-closedown0 + helmVersion: 9.4.75-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 22b14233235..39620d6c68a 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 4 ) -set ( HPCC_POINT 73 ) +set ( HPCC_POINT 75 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-20T15:59:29Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-28T11:37:40Z" ) ### From ad19dc98582b730613b1922074ea44b7249c9d0b Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 28 Jun 2024 12:39:35 +0100 Subject: [PATCH 130/151] Split off 9.2.100 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 6fb3b6188af..dc7185a7fec 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.2.99-closedown0 +version: 9.2.101-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.2.99-closedown0 +appVersion: 9.2.101-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 4658ca74316..ed555ffd9a7 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1405,7 +1405,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index d9ab3304dae..95f4a3b6cd8 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index 2c88ddbe102..6f60764deb1 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index faa5493a225..f2eaaa5c17d 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 1f5a8da4b07..5a398cf4cc8 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -133,7 +133,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 991b88ec767..7c19cc105fd 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -140,7 +140,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 713eec2275a..f5eb9ef1a03 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index c11fc1ac990..7652a41ae91 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -120,7 +120,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 5d75783d0e0..e822313c7cb 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 735c7d17117..f115994c70a 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -178,7 +178,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -240,7 +240,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -346,7 +346,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index a660b392ae3..4bcbcc6b41b 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index a99ee0d0356..e50507bff55 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -145,7 +145,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -210,7 +210,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -341,7 +341,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -404,7 +404,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.2.99-closedown0 + helmVersion: 9.2.101-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 9f37c6d68fd..94b859bb5c4 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 2 ) -set ( HPCC_POINT 99 ) +set ( HPCC_POINT 101 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-20T16:00:57Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-28T11:39:35Z" ) ### From de3722b5bb4ed202ea31d69e9391c7727e52d9c4 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 28 Jun 2024 12:40:56 +0100 Subject: [PATCH 131/151] Split off 9.0.122 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index c03ac127f3d..f9d3358da5a 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.0.121-closedown0 +version: 9.0.123-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.0.121-closedown0 +appVersion: 9.0.123-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 3ee14df6e41..3fdd3c487dd 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1314,7 +1314,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index c05a073f715..b59f13fec1e 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 annotations: checksum/config: {{ $configSHA }} spec: diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index 32ae6256bb3..e7589f792fa 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index ed71697b16c..bae7d7e23ae 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index a73c6190982..966a904a848 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -135,7 +135,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 744cc72f048..218819150d2 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -142,7 +142,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 5bef21a5f76..0c23e48df74 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 58de354af8e..4ad9c05b577 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -117,7 +117,7 @@ spec: server: {{ .name | quote }} accessDali: "yes" app: {{ $application }} - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 37109c9eedf..c2eed816fc7 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 9087d9a6b7a..9d3f9c2eb6c 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -180,7 +180,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -242,7 +242,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -347,7 +347,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 9d18fb5a35e..9262cfb938a 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -52,7 +52,7 @@ spec: run: {{ $serviceName | quote }} server: {{ $serviceName | quote }} accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index be3d07f7bda..9d716ffb94b 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -147,7 +147,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -214,7 +214,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -347,7 +347,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -412,7 +412,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.0.121-closedown0 + helmVersion: 9.0.123-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 7040e49d251..a9ed4df74c0 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 0 ) -set ( HPCC_POINT 121 ) +set ( HPCC_POINT 123 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-20T16:03:34Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-06-28T11:40:55Z" ) ### From e1a583a692a4afe60297919ab86733f79874b4b9 Mon Sep 17 00:00:00 2001 From: Jim DeFabia Date: Fri, 28 Jun 2024 11:07:03 -0400 Subject: [PATCH 132/151] HPCC-32178 Fix Typo in Type Casting Example code Signed-off-by: Jim DeFabia --- docs/EN_US/ECLLanguageReference/ECLR_mods/Value-TypeCasting.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/EN_US/ECLLanguageReference/ECLR_mods/Value-TypeCasting.xml b/docs/EN_US/ECLLanguageReference/ECLR_mods/Value-TypeCasting.xml index 337b77dd7f0..65dfc5827ec 100644 --- a/docs/EN_US/ECLLanguageReference/ECLR_mods/Value-TypeCasting.xml +++ b/docs/EN_US/ECLLanguageReference/ECLR_mods/Value-TypeCasting.xml @@ -34,7 +34,7 @@ MyValue := (INTEGER) MAP(MyString = '1' => MyString, '0'); MySet := (SET OF INTEGER1) [1,2,3,4,5,6,7,8,9,10]; //casts from a SET OF INTEGER8 (the default) to SET OF INTEGER1 -UTPUT(MyBoolean); +OUTPUT(MyBoolean); OUTPUT(MyString); OUTPUT(MyValue); OUTPUT(MySet); From a08f59252af3bc10690bdcc32259437f91572b1d Mon Sep 17 00:00:00 2001 From: Kunal Aswani Date: Fri, 28 Jun 2024 13:26:53 -0400 Subject: [PATCH 133/151] HPCC-32152 FR Translations 9.8.x Translations added for French (FR). Signed-off-by: Kunal Aswani --- esp/src/src/nls/fr/hpcc.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/esp/src/src/nls/fr/hpcc.ts b/esp/src/src/nls/fr/hpcc.ts index d4f65ed9ca6..d2385e93462 100644 --- a/esp/src/src/nls/fr/hpcc.ts +++ b/esp/src/src/nls/fr/hpcc.ts @@ -412,6 +412,8 @@ export = { IgnoreGlobalStoreOutEdges: "Ignorer les bords de sortie du dépot global", Import: "Importer", Inactive: "Inactive", + IncludePerComponentLogs: "Inclure les journaux par composant", + IncludeRelatedLogs: "Inclure les journaux associés", IncludeSlaveLogs: "Inclure les journaux esclaves", IncludeSubFileInfo: "Inclure les informations du sous-fichier ?", Index: "Indice", @@ -583,6 +585,7 @@ export = { Newest: "Le plus récent", NewPassword: "Nouveau mot de passe", NextSelection: "Séléction suivante", + NextWorkunit: "Workunit suivante", NoCommon: "Pas commun", NoContent: "(Pas de contenu)", NoContentPleaseSelectItem: "Aucun contenu - veuillez sélectionner un élément", @@ -713,6 +716,7 @@ export = { PressCtrlCToCopy: "Appuyez sur ctrl + c pour copier.", Preview: "Aperçu", PreviousSelection: "Séléction précédante", + PreviousWorkunit: "Workunit précédente", PrimaryLost: "Primaire perdu", PrimaryMonitoring: "Surveillance primaire", Priority: "Priorité", @@ -832,6 +836,7 @@ export = { Save: "Sauver", Scope: "Portée", SearchResults: "Résultats de recherche", + Seconds: "Secondes", SecondsRemaining: "Secondes restantes", Security: "Sécurité", SecurityMessageHTML: "Afficher uniquement le code HTML des utilisateurs confiables. Cette unité de travail a été créée par '{__placeholder__}'. Afficher le HTML ?", @@ -930,6 +935,7 @@ export = { SuspendedReason: "Raison d'être suspendu", SVGSource: "Source SVG", SyncSelection: "Synchroniser avec la sélection", + Syntax: "Syntaxe", SystemServers: "Système de serveurs", Table: "Table", tag: "étiquette", @@ -957,6 +963,7 @@ export = { TimeMaxTotalExecuteMinutes: "Temps Maximum Total Exécuter Minutes", TimeMeanTotalExecuteMinutes: "Temps Moyen Total Exécuter Minutes", TimeMinTotalExecuteMinutes: "Temps Minimum Total Exécuter Minutes", + TimePenalty: "Pénalité de temps", Timers: "Minuteurs", TimeSeconds: "Temps (secondes)", TimeStamp: "Horodatage", @@ -1123,6 +1130,7 @@ export = { WildcardFilter: "Filtre générique", Workflows: "Flux de travail", Workunit: "Workunit", + WorkunitNotFound: "Workunit introuvable", Workunits: "Workunits", WorkUnitScopeDefaultPermissions: "Autorisations par défaut de l'étendue du workunit", Wrap: "Emballage", From ff5fe665ffcede96b8297eb9f65a8467f8cfeeab Mon Sep 17 00:00:00 2001 From: Kunal Aswani Date: Fri, 28 Jun 2024 13:31:53 -0400 Subject: [PATCH 134/151] HPCC-32153 HU Translations 9.8.x Transaltions added for Hungarian (HU). Signed-off-by: Kunal Aswani --- esp/src/src/nls/hu/hpcc.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/esp/src/src/nls/hu/hpcc.ts b/esp/src/src/nls/hu/hpcc.ts index 0bed99cb1c3..ef57638c03d 100644 --- a/esp/src/src/nls/hu/hpcc.ts +++ b/esp/src/src/nls/hu/hpcc.ts @@ -412,6 +412,8 @@ export = { IgnoreGlobalStoreOutEdges: "A 'Global Storage Out Edges' metrika figyelmen kívül hagyása", Import: "Import", Inactive: "Inaktív", + IncludePerComponentLogs: "Komponensenkénti naplók felvétele", + IncludeRelatedLogs: "Tartalmazza a kapcsolódó naplókat", IncludeSlaveLogs: "Slave logok hozzáadása", IncludeSubFileInfo: "Hozzáadjuk az al-fájl információkat is?", Index: "Index", @@ -583,6 +585,7 @@ export = { Newest: "Legújabb", NewPassword: "Új jelszó", NextSelection: "Következő kiválasztott", + NextWorkunit: "Következő munkaegység", NoCommon: "Nem közös", NoContent: "(Nincs tartalom)", NoContentPleaseSelectItem: "Nincs tartalom - válaszon ki egy elemet", @@ -715,6 +718,7 @@ export = { PressCtrlCToCopy: "A másoláshoz nyomja meg a - gombokat.", Preview: "Előnézet", PreviousSelection: "Előző kiválasztott", + PreviousWorkunit: "Előző munkaegység", PrimaryLost: "Elveszett az elsődleges változat", PrimaryMonitoring: "Elsődleges felügyelet", Priority: "Prioritás", @@ -836,6 +840,7 @@ export = { Save: "Mentés", Scope: "Hatókör", SearchResults: "Keresések", + Seconds: "Másodpercek", SecondsRemaining: "másodperc maradt", Security: "Biztonság", SecurityMessageHTML: "Csak megbízható felhasználóktól származó HTML tekinthető meg. Ezt a munkaegységet '{__placeholder__}' hozta létre. HTML megjelenítése?", @@ -935,6 +940,7 @@ export = { SVGSource: "SVG forrás", Sync: "Szinkronizálás", SyncSelection: "Szinkronizálás a kiválasztottakhoz", + Syntax: "Szintaxis", SystemServers: "Rendszer kiszolgálók", Table: "Táblázat", tag: "jel", @@ -962,6 +968,7 @@ export = { TimeMaxTotalExecuteMinutes: "Maximális futási idők összege (perc)", TimeMeanTotalExecuteMinutes: "Átlagos futási idők összege (perc)", TimeMinTotalExecuteMinutes: "Minimális futási idők összege (perc)", + TimePenalty: "Időbüntetés", Timers: "Időzítések", TimeSeconds: "Idő (másodperc)", TimeStamp: "Időbélyeg", @@ -1129,6 +1136,7 @@ export = { WildcardFilter: "Wildcard szűrő", Workflows: "Munkafolyamat", Workunit: "Feladat", + WorkunitNotFound: "Munkaegység nem található", Workunits: "Feladatok", WorkUnitScopeDefaultPermissions: "Alapértelmezett feldolgozás-hatókör hozzáférési jogok", Wrap: "Csomagol (Wrap)", From 8e98659212521841cc54db981450e0fb39428280 Mon Sep 17 00:00:00 2001 From: Kunal Aswani Date: Fri, 28 Jun 2024 13:35:49 -0400 Subject: [PATCH 135/151] HPCC-32151 BS Translations 9.8.x Translations added for Bosnian (BS). Signed-off-by: Kunal Aswani --- esp/src/src/nls/bs/hpcc.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/esp/src/src/nls/bs/hpcc.ts b/esp/src/src/nls/bs/hpcc.ts index 7a4097a5a93..cfe65e83908 100644 --- a/esp/src/src/nls/bs/hpcc.ts +++ b/esp/src/src/nls/bs/hpcc.ts @@ -412,6 +412,8 @@ IgnoreGlobalStoreOutEdges: "Zanemarite globalne ivice", Import: "Uvoz", Inactive: "Neaktivan", + IncludePerComponentLogs: "Uključite zapise po komponenti", + IncludeRelatedLogs: "Uključite relevantne zapise", IncludeSlaveLogs: "Uključite Izvještaje Sa Nodova Izvršilaca", IncludeSubFileInfo: "Uključiti informacije o poddatoteci?", Index: "Indeks", @@ -583,6 +585,7 @@ Newest: "Najnoviji", NewPassword: "Nova Lozinka", NextSelection: "Slijedeća Selekcija", + NextWorkunit: "Sljedeća radna jedinica", NoCommon: "Nema uobičajenog", NoContent: "(Bez sadržaja)", NoContentPleaseSelectItem: "Nema sadržaja - odaberite stavku", @@ -715,6 +718,7 @@ PressCtrlCToCopy: "Pritisnite ctrl + c za kopiranje.", Preview: "Pregled", PreviousSelection: "Prethodna Selekcija", + PreviousWorkunit: "Prethodna radna jedinica", PrimaryLost: "Primarni Je Izgubljen", PrimaryMonitoring: "Primarni Nadzor", Priority: "Prioritet", @@ -836,6 +840,7 @@ Save: "Sačuvajte", Scope: "Područje", SearchResults: "Rezultati Pretraživanja", + Seconds: "Sekunde", SecondsRemaining: "Preostalo Sekundi", Security: "Sigurnost", SecurityMessageHTML: "Pogledajte HTML samo od pouzdanih korisnika. Ovu radnu jedinicu kreirao je '{__placeholder__}'. Prikaži HTML?", @@ -934,6 +939,7 @@ SuspendedReason: "Razlog za Suspendovanje", SVGSource: "SVG Izvor", SyncSelection: "Sinhronizujte Sa Odabranim", + Syntax: "Sintaksa", SystemServers: "Sistem Servera", Table: "Tabela", tag: "tag", @@ -961,6 +967,7 @@ TimeMaxTotalExecuteMinutes: "Maksimalno ukupno vrijeme izvršavanja u minutama", TimeMeanTotalExecuteMinutes: "Prosječno ukupno vrijeme izvršavanja u minutama", TimeMinTotalExecuteMinutes: "Minimalno ukupno vrijeme izvršavanja u minutama", + TimePenalty: "Vremenska kazna", Timers: "Mjerači Vremena", TimeSeconds: "Vrijeme (Sekunde)", TimeStamp: "Vremenska Oznaka", @@ -1128,6 +1135,7 @@ WildcardFilter: "Višeznačni filter", Workflows: "Tokovi poslova", Workunit: "Radna Jedinica", + WorkunitNotFound: "Radna jedinica nije pronađena", Workunits: "Radne Jedinice", WorkUnitScopeDefaultPermissions: "Unaprijed Definisane Dozvole za Prostor za Radne Jedinice", Wrap: "Zamotajte", From 93aee6e0ba02e11444a13212400ae8c61f72b948 Mon Sep 17 00:00:00 2001 From: Kunal Aswani Date: Fri, 28 Jun 2024 13:41:24 -0400 Subject: [PATCH 136/151] HPCC-32154 HR Translations 9.8.x Translations added for Croatian (HR). Signed-off-by: Kunal Aswani --- esp/src/src/nls/hr/hpcc.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/esp/src/src/nls/hr/hpcc.ts b/esp/src/src/nls/hr/hpcc.ts index 72c1c4b2558..741b9486323 100644 --- a/esp/src/src/nls/hr/hpcc.ts +++ b/esp/src/src/nls/hr/hpcc.ts @@ -412,6 +412,8 @@ IgnoreGlobalStoreOutEdges: "Zanemarite globalne rubove", Import: "Uvoz", Inactive: "Neaktivan", + IncludePerComponentLogs: "Uključite zapise po komponenti", + IncludeRelatedLogs: "Uključite relevantne zapise", IncludeSlaveLogs: "Uključite Izvještaje Sa Nodova Izvršilaca", IncludeSubFileInfo: "Uključiti informacije o poddatoteci?", Index: "Indeks", @@ -583,6 +585,7 @@ Newest: "Najnoviji", NewPassword: "Nova Lozinka", NextSelection: "Sljedeći Odabir", + NextWorkunit: "Sljedeća radna jedinica", NoCommon: "Nema uobičajenog", NoContent: "(Bez sadržaja)", NoContentPleaseSelectItem: "Nema sadržaja - odaberite stavku", @@ -715,6 +718,7 @@ PressCtrlCToCopy: "Pritisnite ctrl + c za kopiranje.", Preview: "Pregled", PreviousSelection: "Prethodni Odabir", + PreviousWorkunit: "Prethodna radna jedinica", PrimaryLost: "Primarni Je Izgubljen", PrimaryMonitoring: "Primarni Nadzor", Priority: "Prioritet", @@ -836,6 +840,7 @@ Save: "Sačuvajte", Scope: "Područje", SearchResults: "Rezultati Pretraživanja", + Seconds: "Sekunde", SecondsRemaining: "Preostalo Sekundi", Security: "Sigurnost", SecurityMessageHTML: "Gledajte HTML samo od pouzdanih korisnika. Ovu radnu jedinicu izradio je '{__placeholder__}'. Prikaži HTML?", @@ -934,6 +939,7 @@ SuspendedReason: "Razlog za Suspendovanje", SVGSource: "SVG Izvor", SyncSelection: "Sinhronizujte Sa Odabranim", + Syntax: "Sintaksa", SystemServers: "Sistem Servera", Table: "Tabela", tag: "tag", @@ -961,6 +967,7 @@ TimeMaxTotalExecuteMinutes: "Maksimalno ukupno vrijeme izvršavanja u minutama", TimeMeanTotalExecuteMinutes: "Prosječno ukupno vrijeme izvršavanja u minutama", TimeMinTotalExecuteMinutes: "Minimalno ukupno vrijeme izvršavanja u minutama", + TimePenalty: "Vremenska kazna", Timers: "Mjerači Vremena", TimeSeconds: "Vrijeme (Sekunde)", TimeStamp: "Vremenska Oznaka", @@ -1128,6 +1135,7 @@ WildcardFilter: "Filter zamjenskih znakova", Workflows: "Tokovi poslova", Workunit: "Radna Jedinica", + WorkunitNotFound: "Radna jedinica nije pronađena", Workunits: "Radne Jedinice", WorkUnitScopeDefaultPermissions: "Unaprijed Definisane Dozvole za Prostor za Radne Jedinice", Wrap: "Zamotajte", From 71d2c4e00439f294d74c77050641a8fe083c6b07 Mon Sep 17 00:00:00 2001 From: Kunal Aswani Date: Fri, 28 Jun 2024 13:46:58 -0400 Subject: [PATCH 137/151] HPCC-32156 SR Translations 9.8.x Translations added for Serbian (SR). Signed-off-by: Kunal Aswani --- esp/src/src/nls/sr/hpcc.ts | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/esp/src/src/nls/sr/hpcc.ts b/esp/src/src/nls/sr/hpcc.ts index a1924b198c1..a43a4e52a55 100644 --- a/esp/src/src/nls/sr/hpcc.ts +++ b/esp/src/src/nls/sr/hpcc.ts @@ -413,6 +413,8 @@ IgnoreGlobalStoreOutEdges: "Занемари глобалне ивице", Import: "Увоз", Inactive: "Неактиван", + IncludePerComponentLogs: "Укључите записе по компоненти", + IncludeRelatedLogs: "Укључите релевантне записе", IncludeSlaveLogs: "Укључите Извештаје Са Нодова Извршилаца", IncludeSubFileInfo: "Укључити информације о поддатотеци?", Index: "Индекс", @@ -584,6 +586,7 @@ Newest: "Најнивији", NewPassword: "Нова Лозинка", NextSelection: "Следећи Избор", + NextWorkunit: "Следећа радна јединица", NoCommon: "Није уобичајено", NoContent: "(Без садржаја)", NoContentPleaseSelectItem: "Нема садржаја - изаберите ставку", @@ -716,6 +719,7 @@ PressCtrlCToCopy: "Притисните цтрл + ц да бисте копирали.", Preview: "Преглед", PreviousSelection: "Претходни Избор", + PreviousWorkunit: "Претходна радна јединица", PrimaryLost: "Примарни Је Изгубљен", PrimaryMonitoring: "Основно Надгледање", Priority: "Приоритет", @@ -837,6 +841,7 @@ Save: "Сачувајте", Scope: "Подручје", SearchResults: "Резултати Претраживања", + Seconds: "Cекунде", SecondsRemaining: "Преостало Секунди", Security: "Сигурност", SecurityMessageHTML: "Погледајте ХТМЛ само од поузданих корисника. Ову радну јединицу је направио '{__плацехолдер__}'. Прикажи ХТМЛ?", @@ -935,6 +940,7 @@ SuspendedReason: "Разлог за Суспендовање", SVGSource: "СВГ Извор", SyncSelection: "Синхронизујте Са Одабраним", + Syntax: "Синтакса", SystemServers: "Систем Сервера", Table: "табела", tag: "таг", @@ -962,6 +968,7 @@ TimeMaxTotalExecuteMinutes: "Максимално укупно време извршавања у минутима", TimeMeanTotalExecuteMinutes: "Просечно укупно време извршавања у минутима", TimeMinTotalExecuteMinutes: "Минимално укупно време извршавања у минутима", + TimePenalty: "Bременска Kазна", Timers: "Мерачи Времена", TimeSeconds: "Време (Секунде)", TimeStamp: "Временска Ознака", @@ -1129,6 +1136,7 @@ WildcardFilter: "Вилдцард филтер", Workflows: "Токови послова", Workunit: "Радна Јединица", + WorkunitNotFound: "Радна јединица није пронађена", Workunits: "Радне Јединице", WorkUnitScopeDefaultPermissions: "Унапред Дефинисане Дозвиле за Простор за Радне Јединице", Wrap: "Замотајте", From f742683bdc8e24dc62218ac788680237a7c22044 Mon Sep 17 00:00:00 2001 From: "Dan S. Camper" Date: Fri, 28 Jun 2024 13:52:08 -0500 Subject: [PATCH 138/151] HPCC-32181 eclcc crashes if REGEXREPLACE subject is an empty string --- rtl/eclrtl/eclregex.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rtl/eclrtl/eclregex.cpp b/rtl/eclrtl/eclregex.cpp index 497b645078d..9aeed058250 100644 --- a/rtl/eclrtl/eclregex.cpp +++ b/rtl/eclrtl/eclregex.cpp @@ -439,7 +439,7 @@ class CCompiledStrRegExpr : implements ICompiledStrRegExpr { // No match found; return the original string out = (char *)rtlMalloc(sourceSize); - memcpy(out, str, sourceSize); + memcpy_iflen(out, str, sourceSize); outlen = slen; pcre2_match_data_free_8(matchData); } @@ -838,7 +838,7 @@ class CCompiledUStrRegExpr : implements ICompiledUStrRegExpr { // No match found; return the original string out = (UChar *)rtlMalloc(slen * sizeof(UChar)); - memcpy(out, str, slen * sizeof(UChar)); + memcpy_iflen(out, str, slen * sizeof(UChar)); outlen = slen; pcre2_match_data_free_16(matchData); } From 7cc3f958a1e1ff0c077bd823647c16c31310c3c8 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Mon, 1 Jul 2024 08:10:43 +0100 Subject: [PATCH 139/151] HPCC-31408 Enable sorting for metric properties Signed-off-by: Gordon Smith --- esp/src/src-react/components/Metrics.tsx | 1 + esp/src/src-react/components/MetricsPropertiesTables.tsx | 1 + 2 files changed, 2 insertions(+) diff --git a/esp/src/src-react/components/Metrics.tsx b/esp/src/src-react/components/Metrics.tsx index ee05e3055e3..03f1e4d3a72 100644 --- a/esp/src/src-react/components/Metrics.tsx +++ b/esp/src/src-react/components/Metrics.tsx @@ -450,6 +450,7 @@ export const Metrics: React.FunctionComponent = ({ const propsTable2 = useConst(() => new Table() .columns([nlsHPCC.Property, nlsHPCC.Value]) .columnWidth("auto") + .sortable(true) ); const updatePropsTable2 = React.useCallback((selection: IScope[]) => { diff --git a/esp/src/src-react/components/MetricsPropertiesTables.tsx b/esp/src/src-react/components/MetricsPropertiesTables.tsx index 470516b55f7..83d8a894cff 100644 --- a/esp/src/src-react/components/MetricsPropertiesTables.tsx +++ b/esp/src/src-react/components/MetricsPropertiesTables.tsx @@ -23,6 +23,7 @@ export const MetricsPropertiesTables: React.FunctionComponent new Table() .columns([nlsHPCC.Property, nlsHPCC.Value, "Avg", "Min", "Max", "Delta", "StdDev", "SkewMin", "SkewMax", "NodeMin", "NodeMax"]) .columnWidth("auto") + .sortable(true) ); React.useEffect(() => { From f7452d3ddd85a42c39144c320f0dc06ec1346128 Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 28 Jun 2024 11:27:47 +0100 Subject: [PATCH 140/151] HPCC-32176: Update default dependencies in smoketest pkg-config is breaking GH Actions Signed-off-by: Gordon Smith --- .github/workflows/build-gh_runner.yml | 1 - .github/workflows/test-smoke-gh_runner.yml | 2 +- .github/workflows/test-ui-gh_runner.yml | 2 +- .github/workflows/test-unit-gh_runner.yml | 2 +- 4 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-gh_runner.yml b/.github/workflows/build-gh_runner.yml index fbd791f9115..3a83c51db3d 100644 --- a/.github/workflows/build-gh_runner.yml +++ b/.github/workflows/build-gh_runner.yml @@ -93,7 +93,6 @@ jobs: gnupg \ groff-base \ libtool \ - pkg-config \ software-properties-common \ tar \ unzip \ diff --git a/.github/workflows/test-smoke-gh_runner.yml b/.github/workflows/test-smoke-gh_runner.yml index f0fdfbaeab5..cc37e0b32e2 100644 --- a/.github/workflows/test-smoke-gh_runner.yml +++ b/.github/workflows/test-smoke-gh_runner.yml @@ -17,7 +17,7 @@ on: type: string description: 'Dependencies' required: false - default: 'bison flex build-essential binutils-dev curl lsb-release libcppunit-dev python3-dev default-jdk r-base-dev r-cran-rcpp r-cran-rinside r-cran-inline pkg-config libtool autotools-dev automake git cmake xmlstarlet' + default: 'bison flex build-essential binutils-dev curl lsb-release libcppunit-dev python3-dev default-jdk r-base-dev r-cran-rcpp r-cran-rinside r-cran-inline libtool autotools-dev automake git cmake xmlstarlet' jobs: diff --git a/.github/workflows/test-ui-gh_runner.yml b/.github/workflows/test-ui-gh_runner.yml index 902b02f678b..ed1fad9bc21 100644 --- a/.github/workflows/test-ui-gh_runner.yml +++ b/.github/workflows/test-ui-gh_runner.yml @@ -17,7 +17,7 @@ on: type: string description: 'Dependencies' required: false - default: 'bison flex build-essential binutils-dev curl lsb-release libcppunit-dev python3-dev default-jdk r-base-dev r-cran-rcpp r-cran-rinside r-cran-inline pkg-config libtool autotools-dev automake git cmake xmlstarlet' + default: 'bison flex build-essential binutils-dev curl lsb-release libcppunit-dev python3-dev default-jdk r-base-dev r-cran-rcpp r-cran-rinside r-cran-inline libtool autotools-dev automake git cmake xmlstarlet' jobs: diff --git a/.github/workflows/test-unit-gh_runner.yml b/.github/workflows/test-unit-gh_runner.yml index 69928aa0811..eb8daf6a069 100644 --- a/.github/workflows/test-unit-gh_runner.yml +++ b/.github/workflows/test-unit-gh_runner.yml @@ -17,7 +17,7 @@ on: type: string description: 'Dependencies' required: false - default: 'bison flex build-essential binutils-dev curl lsb-release libcppunit-dev python3-dev default-jdk r-base-dev r-cran-rcpp r-cran-rinside r-cran-inline pkg-config libtool autotools-dev automake git cmake xmlstarlet' + default: 'bison flex build-essential binutils-dev curl lsb-release libcppunit-dev python3-dev default-jdk r-base-dev r-cran-rcpp r-cran-rinside r-cran-inline libtool autotools-dev automake git cmake xmlstarlet' jobs: From 573f392921e9ca3495914e4353f1c7cebc80a427 Mon Sep 17 00:00:00 2001 From: Charan-Sharan Date: Wed, 19 Jun 2024 18:18:04 +0530 Subject: [PATCH 141/151] HPCC-32003 Develop an automated testing of hyperlinks in HPCC Systems user documents and GitHub README files using GitHub Actions Signed-off-by: Charan-Sharan --- .github/workflows/test-hyperlinks.yml | 241 ++++++++++++++++++++++++++ 1 file changed, 241 insertions(+) create mode 100644 .github/workflows/test-hyperlinks.yml diff --git a/.github/workflows/test-hyperlinks.yml b/.github/workflows/test-hyperlinks.yml new file mode 100644 index 00000000000..1bb302e6e5f --- /dev/null +++ b/.github/workflows/test-hyperlinks.yml @@ -0,0 +1,241 @@ +name: Test Hyperlinks + +on: + pull_request: + branches: + - "master" + - "candidate-*" + - "!candidate-9.4.*" + - "!candidate-9.2.*" + - "!candidate-9.0.*" + - "!candidate-8.*" + - "!candidate-7.*" + - "!candidate-6.*" + workflow_dispatch: + inputs: + Debug-Mode: + type: boolean + description: Run in Debug mode to upload all created files + default: false + required: false + +jobs: + main: + runs-on: ubuntu-22.04 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 2 + + - name: List Documentation files + run: | + if [[ ${{ github.event_name }} == "workflow_dispatch" ]]; then + find $PWD -name '*.xml' -type f > xmlFilesList.txt + find $PWD -name '*.md' -type f > mdFilesList.txt + find $PWD -name '*.rst' -type f > rstFilesList.txt + else + git diff --name-only HEAD^1 HEAD > updatedFiles.txt + cat updatedFiles.txt | grep -E "*.xml" | tee xmlFilesList.txt 1>&/dev/null + cat updatedFiles.txt | grep -E "*.md" | tee mdFilesList.txt 1>&/dev/null + cat updatedFiles.txt | grep -E "*.rst" | tee rstFilesList.txt 1>&/dev/null + fi + continue-on-error: true + + - name: List links from Documentation files + run: | + IFS=$'\n' + for FILE in $( cat xmlFilesList.txt ) + do + #check if the file is missing + if [[ ! -f $FILE ]]; then + echo -e "$FILE -\e[31m file missing\e[0m" + echo $FILE >> missingFiles.txt + continue + fi + grep -onHE -e "" ${FILE} | sed 's/url="//' > links.tmp + FLAG=0 + for LINE in $( cat links.tmp ) + do + LINK=$( echo $LINE | cut -d ':' -f3- ) + if [[ ${LINK:0:6} == '' ]]; then + FLAG=0 + continue + fi + if [[ $FLAG -eq 1 ]]; then + echo $LINE >> linksList.txt + fi + done + done + for FILE in $( cat mdFilesList.txt ) + do + #check if the file is missing + if [[ ! -f $FILE ]]; then + echo -e "$FILE -\e[31m file missing\e[0m" + echo $FILE >> missingFiles.txt + continue + fi + grep -onHE -e "\]\([^\)]+" -e "\`\`\`[^\`]*" -e "http://[^\ \;\"\'\<\>\]\[\,\`\)]+" -e "https://[^\ \;\"\'\<\>\]\[\,\`\)]+" ${FILE} | sed 's/](//' > links.tmp + FLAG=0 + for LINE in $( cat links.tmp ) + do + LINK=$( echo $LINE | cut -d ':' -f3- ) + if [[ ${LINK:0:3} == '```' ]]; then + FLAG=$(( 1 - FLAG )) + continue + fi + if [[ $FLAG -eq 0 ]]; then + echo $LINE >> linksList.txt + fi + done + done + + for FILE in $( cat rstFilesList.txt ) + do + #check if the file is missing + if [[ ! -f $FILE ]]; then + echo -e "$FILE -\e[31m file missing\e[0m" + echo $FILE >> missingFiles.txt + continue + fi + grep -onHE -e ".. _[^\]+" -e "http://[^\ \;\"\'\<\>\,\`\)]+" -e "https://[^\ \;\"\'\<\>\,\`\)]+" ${FILE} | sed 's/.. _[^\:]*: //' >> linksList.txt + done + + if [[ -f linksList.txt ]]; then + cat linksList.txt | grep -vE '127.0.0.1|localhost|\$|\[' | grep -E 'https://|http://' | tee externalLinks.txt 1>&/dev/null + cat linksList.txt | grep -vE '127.0.0.1|localhost|\$|\[' | grep -vE 'https://|http://' | tee internalLinks.txt 1>&/dev/null + fi + + - name: Test External links + run: | + touch checkedLinksCache.txt + IFS=$'\n' + if [[ -f externalLinks.txt ]]; then + for LINE in $(cat externalLinks.txt ) + do + LINK=$( echo $LINE | cut -d ':' -f3- ) + LINK=${LINK%.} #remove trailing dot(.) + LINK=${LINK% } #remove trailing space + CHECK_CACHE=$( cat checkedLinksCache.txt | grep "$LINK~" | wc -w ) + TRY=3 #Max attempts to check status code of hyperlinks + if [[ $CHECK_CACHE -eq 0 ]]; then + while [[ $TRY -ne 0 ]] + do + STATUS_CODE=$(curl -LI -m 60 -s $LINK | grep "HTTP/" | tail -1 | cut -d' ' -f2 ) + if [[ -n $STATUS_CODE ]]; then + echo "$LINK~$STATUS_CODE" >> checkedLinksCache.txt + break + else + echo $LINE + echo "retrying..." + TRY=$(( TRY - 1)) + fi + done + else + STATUS_CODE=$( cat checkedLinksCache.txt | grep "$LINK~" | cut -d '~' -f2 ) + fi + if [[ $STATUS_CODE -eq 404 ]]; then + echo -e "${LINK} - \e[31m404 Error\e[0m" + echo "${LINE}" >> error-report.log + elif [[ ! -n $STATUS_CODE ]]; then + echo -e "${LINK} - \e[31mNo Response\e[0m" + echo "${LINE}(No-Response)" >> error-report.log + else + echo "${LINK} - ${STATUS_CODE}" + fi + done + fi + + - name: Test Internal Links + run: | + if [[ -f internalLinks.txt ]]; then + for LINE in $( cat internalLinks.txt ) + do + REFERENCE=$( echo $LINE | cut -d ':' -f3- ) + FILE=$( echo $LINE | cut -d ':' -f1 ) + if [[ ${REFERENCE:0:1} == '#' ]]; then + LINK_TEXT=$( cat $FILE | grep -oE "\[.*\]\(${REFERENCE}\)" | sed 's/\[//' | cut -d ']' -f1 ) + IS_PRESENT=$(cat $FILE | grep -oE "# ${LINK_TEXT}" | wc -w) + if [[ $IS_PRESENT -eq 0 ]]; then + echo -e "${LINE} -\e[31m invalid reference\e[0m" + echo "${LINE}" >> error-report.log + fi + else + if [[ ${REFERENCE:0:1} == '/' ]]; then + BASE_DIR=$PWD + else + BASE_DIR=${FILE/$( basename $FILE )} + fi + SEARCH_FILE="$BASE_DIR/${REFERENCE}" + SEARCH_FILE=$( realpath $SEARCH_FILE ) + if [[ ! -f $SEARCH_FILE ]]; then + echo -e "${LINE} -\e[31m invalid reference\e[0m" + echo ${LINE/$REFERENCE/$SEARCH_FILE} >> error-report.log + fi + fi + done + fi + + - name: Report Error links + run: | + if [[ -f error-report.log ]]; then + NUMBER_OF_404_LINKS=$( cat error-report.log | wc -l ) + fi + echo -e "\e[32mNo. of files scanned : $( cat *FilesList.txt | wc -l )\e[0m" + if [[ $NUMBER_OF_404_LINKS -ne 0 ]]; then + echo -e "\e[31mNo. of unique broken links : $( cat error-report.log | cut -d: -f3- | sort | uniq | wc -l )\e[0m" + echo -e "\e[31mTotal No. of reference to broken links : $( cat error-report.log | cut -d: -f3- | sort | wc -l )\e[0m" + exit -1 + else + echo -e "\e[32mNo Broken-links found\e[0m" + fi + + - name: Modify log file + if: ${{ failure() || cancelled() }} + run: | + BASE_DIR=${PWD%$(basename $PWD)} + BASE_DIR=$(echo $BASE_DIR | sed 's/\//\\\//g') + sed -i "s/${BASE_DIR}//g" error-report.log + FILE_NAMES_LIST=$(cat error-report.log | cut -d ':' -f1 | sort | uniq ) + FILE_COUNT=1 + for LINE in $FILE_NAMES_LIST + do + LINKS_LIST=$( cat error-report.log | grep $LINE | cut -d ':' -f2- ) + echo "$FILE_COUNT. $LINE" >> error-reportTmp.log + FILE_COUNT=$(( FILE_COUNT + 1)) + for LINK in $LINKS_LIST + do + echo -e "\t Line $LINK" | sed 's/:/ : /' >> error-reportTmp.log + done + done + if [[ $(cat missingFiles.txt | wc -w ) -eq 0 ]]; then + echo -e "Broken links: \n" > error-report.log + cat error-reportTmp.log >> error-report.log + else + echo -e "Missing Files: \n" > error-report.log + cat missingFiles.txt >> error-report.log + echo -e "Broken links: \n" >> error-report.log + cat error-reportTmp.log >> error-report.log + fi + if [[ ${{ github.event_name }} == "pull_request" || ${{ inputs.Debug-Mode }} == false ]]; then + rm -rf *FilesList.txt \ + checkedLinksCache.txt \ + *Links.txt \ + linksList.txt \ + fi + + - name: Upload logs + uses: actions/upload-artifact@v4 + if: ${{ failure() || cancelled() }} + with: + name: Hyperlinks-testing-log + path: | + /home/runner/work/HPCC-Platform/HPCC-Platform/error-report.log + /home/runner/work/HPCC-Platform/HPCC-Platform/*FilesList.txt + /home/runner/work/HPCC-Platform/HPCC-Platform/checkedLinksCache.txt + /home/runner/work/HPCC-Platform/HPCC-Platform/*Links.txt + /home/runner/work/HPCC-Platform/HPCC-Platform/linksList.txt + if-no-files-found: ignore \ No newline at end of file From adc18e098ec365acd09b89ca72125f2dde6ce535 Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Thu, 20 Jun 2024 16:56:47 -0400 Subject: [PATCH 142/151] HPCC-31952 ECL Watch v9 react component for permissions Created a react component for the dojo ShowIndividualPermissionsWidget. Also, as a workaround for the bug Jake reported in the dojo widget, added a grid refresh on a 100ms timeout after changing any permissions. Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- .../ShowIndividualPermissionsWidget.js | 1 + esp/src/package-lock.json | 31 ++- esp/src/package.json | 2 +- .../components/PermissionsEditor.tsx | 205 ++++++++++++++++++ esp/src/src-react/components/Security.tsx | 4 +- .../components/forms/AddGroupResource.tsx | 129 +++++++++++ .../components/forms/AddPermission.tsx | 2 +- esp/src/src/nls/hpcc.ts | 1 + 8 files changed, 362 insertions(+), 13 deletions(-) create mode 100644 esp/src/src-react/components/PermissionsEditor.tsx create mode 100644 esp/src/src-react/components/forms/AddGroupResource.tsx diff --git a/esp/src/eclwatch/ShowIndividualPermissionsWidget.js b/esp/src/eclwatch/ShowIndividualPermissionsWidget.js index fef10399d8b..ae3692d7ce6 100644 --- a/esp/src/eclwatch/ShowIndividualPermissionsWidget.js +++ b/esp/src/eclwatch/ShowIndividualPermissionsWidget.js @@ -181,6 +181,7 @@ define([ evt.preventDefault(); context.calcPermissionState(evt.cell.column.field, evt.value, evt.cell.row.data); evt.grid.store.put(evt.cell.row.data); + const t = window.setTimeout(() => { context.grid.refresh(); window.clearTimeout(t); }, 100); }); return retVal; }, diff --git a/esp/src/package-lock.json b/esp/src/package-lock.json index 06de90bbe29..363b1424c2d 100644 --- a/esp/src/package-lock.json +++ b/esp/src/package-lock.json @@ -18,7 +18,7 @@ "@hpcc-js/chart": "2.83.3", "@hpcc-js/codemirror": "2.62.0", "@hpcc-js/common": "2.71.17", - "@hpcc-js/comms": "2.92.2", + "@hpcc-js/comms": "2.92.3", "@hpcc-js/dataflow": "8.1.6", "@hpcc-js/eclwatch": "2.74.5", "@hpcc-js/graph": "2.85.15", @@ -1859,12 +1859,12 @@ } }, "node_modules/@hpcc-js/comms": { - "version": "2.92.2", - "resolved": "https://registry.npmjs.org/@hpcc-js/comms/-/comms-2.92.2.tgz", - "integrity": "sha512-9AbPnCYuTF6OhbSiG5QMDA2vuF457YL88h2ltuxPOjsOxp9Dp5VFlTkh88vW1W3Yph/+faGhiqUSvLMgFIwXEA==", + "version": "2.92.3", + "resolved": "https://registry.npmjs.org/@hpcc-js/comms/-/comms-2.92.3.tgz", + "integrity": "sha512-0mrIb4kXGTVnvHBpRZk+yas108sSXgIKq6HAGSv/ZJFCXEoKCynoaSH+DFaD9jcfZrVezn4fbjJtqYxXln86tA==", "dependencies": { - "@hpcc-js/ddl-shim": "^2.20.6", - "@hpcc-js/util": "^2.51.0", + "@hpcc-js/ddl-shim": "^2.20.7", + "@hpcc-js/util": "^2.51.1", "@xmldom/xmldom": "0.8.10", "abort-controller": "3.0.0", "node-fetch": "2.7.0", @@ -1873,6 +1873,14 @@ "undici": "5.28.4" } }, + "node_modules/@hpcc-js/comms/node_modules/@hpcc-js/util": { + "version": "2.51.1", + "resolved": "https://registry.npmjs.org/@hpcc-js/util/-/util-2.51.1.tgz", + "integrity": "sha512-BJuqg6FGqcV4RR8/BU5e7fASDtkl0Na7dWY+Th7r5ciWKI5AXsO0GtlgwDBt2uLUOlcGOMpYozmdbGSCoSHAvQ==", + "dependencies": { + "tslib": "2.6.3" + } + }, "node_modules/@hpcc-js/comms/node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", @@ -1892,15 +1900,20 @@ } ] }, + "node_modules/@hpcc-js/comms/node_modules/tslib": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz", + "integrity": "sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==" + }, "node_modules/@hpcc-js/dataflow": { "version": "8.1.6", "resolved": "https://registry.npmjs.org/@hpcc-js/dataflow/-/dataflow-8.1.6.tgz", "integrity": "sha512-BMmpA6CP00nRDdeq7MS/LRN+e08dyBnnLWEITK5zuEn8D9sFo4ZJlmrKNp+Lcox/m+CuNqWMTG9Z5c+hK2jUyw==" }, "node_modules/@hpcc-js/ddl-shim": { - "version": "2.20.6", - "resolved": "https://registry.npmjs.org/@hpcc-js/ddl-shim/-/ddl-shim-2.20.6.tgz", - "integrity": "sha512-0N9KwRr2rzSJEhfXjEDmT2IBf2Bm6QJ4ReOzhu1wTN3O3gKkUXxNxJMDePFyCNaeHaNWGEQOxewf1q9NtrSkbA==", + "version": "2.20.7", + "resolved": "https://registry.npmjs.org/@hpcc-js/ddl-shim/-/ddl-shim-2.20.7.tgz", + "integrity": "sha512-n+MQBW9zgfhN6zCTaZSiZfMAJfhR6bw4Fuo4fMhQdF2x17Yu/DbN8MReNvyq2OOBmxkwcp28/VxYnsJeppWMQw==", "hasInstallScript": true, "dependencies": { "ajv": "6.12.6" diff --git a/esp/src/package.json b/esp/src/package.json index 9f825f692bf..6030c085361 100644 --- a/esp/src/package.json +++ b/esp/src/package.json @@ -44,7 +44,7 @@ "@hpcc-js/chart": "2.83.3", "@hpcc-js/codemirror": "2.62.0", "@hpcc-js/common": "2.71.17", - "@hpcc-js/comms": "2.92.2", + "@hpcc-js/comms": "2.92.3", "@hpcc-js/dataflow": "8.1.6", "@hpcc-js/eclwatch": "2.74.5", "@hpcc-js/graph": "2.85.15", diff --git a/esp/src/src-react/components/PermissionsEditor.tsx b/esp/src/src-react/components/PermissionsEditor.tsx new file mode 100644 index 00000000000..b1d453f2354 --- /dev/null +++ b/esp/src/src-react/components/PermissionsEditor.tsx @@ -0,0 +1,205 @@ +import * as React from "react"; +import { Checkbox, CommandBar, ContextualMenuItemType, ICommandBarItemProps, Label, Stack } from "@fluentui/react"; +import { WsAccess, AccessService } from "@hpcc-js/comms"; +import { scopedLogger } from "@hpcc-js/util"; +import nlsHPCC from "src/nlsHPCC"; +import { useConfirm } from "../hooks/confirm"; +import { HolyGrail } from "../layouts/HolyGrail"; +import { ShortVerticalDivider } from "./Common"; +import { AddGroupResourceForm } from "./forms/AddGroupResource"; + +const logger = scopedLogger("src-react/components/PermissionsEditor.tsx"); + +const service = new AccessService({ baseUrl: "" }); + +// from ShowIndividualPermissionsWidget.js +const calcPermissionState = (field, value, row) => { + switch (field) { + case "allow_access": + row.allow_full = value && row.allow_read && row.allow_write; + if (value) + calcPermissionState("deny_access", false, row); + break; + case "allow_read": + row.allow_full = row.allow_access && value && row.allow_write; + if (value) + calcPermissionState("deny_read", false, row); + break; + case "allow_write": + row.allow_full = row.allow_access && row.allow_read && value; + if (value) + calcPermissionState("deny_write", false, row); + break; + case "allow_full": + row.allow_access = value; + row.allow_read = value; + row.allow_write = value; + if (value) + calcPermissionState("deny_full", false, row); + break; + case "deny_access": + row.deny_full = value && row.deny_read && row.deny_write; + if (value) + calcPermissionState("allow_access", false, row); + break; + case "deny_read": + row.deny_full = row.deny_access && value && row.deny_write; + if (value) + calcPermissionState("allow_read", false, row); + break; + case "deny_write": + row.deny_full = row.deny_access && row.deny_read && value; + if (value) + calcPermissionState("allow_write", false, row); + break; + case "deny_full": + row.deny_access = value; + row.deny_read = value; + row.deny_write = value; + if (value) + calcPermissionState("allow_full", false, row); + break; + } + row[field] = value; +}; + +interface PermissionsEditorProps { + BaseDn?: string; + Name?: string; +} + +export const PermissionsEditor: React.FunctionComponent = ({ + BaseDn, + Name +}) => { + + const [data, setData] = React.useState([]); + const [selectedIndex, setSelectedIndex] = React.useState(-1); + const [showAddGroup, setShowAddGroup] = React.useState(false); + + const refreshData = React.useCallback(() => { + service.ResourcePermissions({ BasednName: BaseDn, name: Name }) + .then(({ Permissions }: WsAccess.ResourcePermissionsResponse) => { + setData(Permissions?.Permission.map(Permission => { + return { + account_name: Permission.account_name, + allow_access: Permission.allow_access ?? false, + allow_read: Permission.allow_read ?? false, + allow_write: Permission.allow_write ?? false, + allow_full: Permission.allow_full ?? false, + deny_access: Permission.deny_access ?? false, + deny_read: Permission.deny_read ?? false, + deny_write: Permission.deny_write ?? false, + deny_full: Permission.deny_full ?? false, + }; + })); + setSelectedIndex(-1); + }) + .catch(err => logger.error(err)) + ; + }, [BaseDn, Name]); + + const [DeleteConfirm, setShowDeleteConfirm] = useConfirm({ + title: nlsHPCC.Delete, + message: nlsHPCC.DeleteSelectedGroups + "\n\n" + data[selectedIndex]?.account_name, + onSubmit: React.useCallback(() => { + service.PermissionAction({ + action: "delete", + BasednName: BaseDn, + rname: Name, + account_name: data[selectedIndex]?.account_name + }) + .then(() => refreshData()) + .catch(err => logger.error(err)) + ; + }, [BaseDn, data, Name, refreshData, selectedIndex]) + }); + + // Command Bar --- + const buttons = React.useMemo((): ICommandBarItemProps[] => [ + { + key: "refresh", text: nlsHPCC.Refresh, iconProps: { iconName: "Refresh" }, + onClick: () => refreshData() + }, + { key: "divider_1", itemType: ContextualMenuItemType.Divider, onRender: () => }, + { + key: "add", text: nlsHPCC.Add, + onClick: () => setShowAddGroup(true) + }, + { key: "divider_2", itemType: ContextualMenuItemType.Divider, onRender: () => }, + { + key: "delete", text: nlsHPCC.Delete, disabled: selectedIndex < 0, + onClick: () => setShowDeleteConfirm(true) + }, + ], [refreshData, selectedIndex, setShowDeleteConfirm]); + + React.useEffect(() => { + refreshData(); + }, [refreshData]); + + const onRowSelect = React.useCallback((evt, index) => { + if (evt.target.checked) { + setSelectedIndex(index); + } else { + setSelectedIndex(-1); + } + }, [setSelectedIndex]); + + const onPermissionCheckboxClick = React.useCallback((evt, permission, prop) => { + calcPermissionState(prop, evt.target.checked, permission); + service.PermissionAction({ + action: "update", + BasednName: BaseDn, + rname: Name, + account_type: 1, + ...permission + }).then(({ retcode, retmsg }) => { + if (retcode === 0) { + setData(prevState => { + const newState = Array.from(prevState); + return newState; + }); + } else if (retcode === -1) { + logger.error(retmsg); + refreshData(); + } + }).catch(err => logger.error(err)); + }, [BaseDn, Name, refreshData]); + + return <> + } + main={ +
+ + + + + + + + + + + + {data?.map((permission, index) => ( + + onRowSelect(ev, index)} /> + {permission.account_name} + onPermissionCheckboxClick(ev, permission, "allow_access")} /> + onPermissionCheckboxClick(ev, permission, "allow_read")} /> + onPermissionCheckboxClick(ev, permission, "allow_write")} /> + onPermissionCheckboxClick(ev, permission, "allow_full")} /> + onPermissionCheckboxClick(ev, permission, "deny_access")} /> + onPermissionCheckboxClick(ev, permission, "deny_read")} /> + onPermissionCheckboxClick(ev, permission, "deny_write")} /> + onPermissionCheckboxClick(ev, permission, "deny_full")} /> + + ))} +
+ } + /> + + + ; +}; \ No newline at end of file diff --git a/esp/src/src-react/components/Security.tsx b/esp/src/src-react/components/Security.tsx index fac4c8ed7a6..260f7b6239a 100644 --- a/esp/src/src-react/components/Security.tsx +++ b/esp/src/src-react/components/Security.tsx @@ -4,10 +4,10 @@ import { SizeMe } from "react-sizeme"; import { pushUrl } from "../util/history"; import { Groups } from "./Groups"; import { Permissions } from "./Permissions"; +import { PermissionsEditor } from "./PermissionsEditor"; import { Users } from "./Users"; import { useBuildInfo } from "../hooks/platform"; import { pivotItemStyle } from "../layouts/pivot"; -import { DojoAdapter } from "../layouts/DojoAdapter"; import nlsHPCC from "src/nlsHPCC"; interface SecurityProps { @@ -59,7 +59,7 @@ export const Security: React.FunctionComponent = ({ } {name && baseDn && - + } diff --git a/esp/src/src-react/components/forms/AddGroupResource.tsx b/esp/src/src-react/components/forms/AddGroupResource.tsx new file mode 100644 index 00000000000..0df103bf4a7 --- /dev/null +++ b/esp/src/src-react/components/forms/AddGroupResource.tsx @@ -0,0 +1,129 @@ +import * as React from "react"; +import { Checkbox, DefaultButton, PrimaryButton, TextField, } from "@fluentui/react"; +import { AccessService } from "@hpcc-js/comms"; +import { scopedLogger } from "@hpcc-js/util"; +import { useForm, Controller } from "react-hook-form"; +import nlsHPCC from "src/nlsHPCC"; +import { MessageBox } from "../../layouts/MessageBox"; + +const logger = scopedLogger("src-react/components/forms/AddGroupResource.tsx"); + +const service = new AccessService({ baseUrl: "" }); + +interface AddGroupResourceFormValues { + account_name: string; + allow_access: boolean; + allow_read: boolean; + allow_write: boolean; + allow_full: boolean; +} + +const defaultValues: AddGroupResourceFormValues = { + account_name: "", + allow_access: false, + allow_read: false, + allow_write: false, + allow_full: false +}; + +interface AddGroupResourceFormProps { + rname: string; + BasednName: string; + refreshGrid?: () => void; + showForm: boolean; + setShowForm: (_: boolean) => void; +} + +export const AddGroupResourceForm: React.FunctionComponent = ({ + rname, + BasednName, + refreshGrid, + showForm, + setShowForm +}) => { + + const { handleSubmit, control, reset } = useForm({ defaultValues }); + + const closeForm = React.useCallback(() => { + setShowForm(false); + }, [setShowForm]); + + const onSubmit = React.useCallback(() => { + handleSubmit( + (data, evt) => { + const request: any = data; + + request["action"] = "update"; + request["account_type"] = "1"; + request["rname"] = rname; + request["BasednName"] = BasednName; + + service.PermissionAction(request) + .then(() => { + closeForm(); + reset(defaultValues); + if (refreshGrid) refreshGrid(); + }) + .catch(err => logger.error(err)) + ; + }, + logger.info + )(); + }, [BasednName, closeForm, handleSubmit, refreshGrid, reset, rname]); + + return + + { reset(defaultValues); closeForm(); }} /> + }> + } + rules={{ + required: nlsHPCC.ValidationErrorRequired + }} + /> +
+ } + /> +
+
+ } + /> +
+
+ } + /> +
+
+ } + /> +
+
; +}; diff --git a/esp/src/src-react/components/forms/AddPermission.tsx b/esp/src/src-react/components/forms/AddPermission.tsx index 39f7df0f866..5b44b6eb7e2 100644 --- a/esp/src/src-react/components/forms/AddPermission.tsx +++ b/esp/src/src-react/components/forms/AddPermission.tsx @@ -67,7 +67,7 @@ export const AddPermissionForm: React.FunctionComponent )(); }, [closeForm, handleSubmit, refreshGrid, reset]); - return { reset(defaultValues); closeForm(); }} /> diff --git a/esp/src/src/nls/hpcc.ts b/esp/src/src/nls/hpcc.ts index 5b5cfb4d897..73435fe047d 100644 --- a/esp/src/src/nls/hpcc.ts +++ b/esp/src/src/nls/hpcc.ts @@ -31,6 +31,7 @@ export = { AddBinding: "Add Binding", AddFile: "Add File", AddGroup: "Add Group", + AddResource: "Add Resource", AddtionalProcessesToFilter: "Addtional Processes To Filter", AdditionalResources: "Additional Resources", AddPart: "Add Part", From 801f268fefba4dd3fcd4741466fc322928b5a676 Mon Sep 17 00:00:00 2001 From: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> Date: Wed, 3 Jul 2024 11:36:03 -0400 Subject: [PATCH 143/151] HPCC-32128 ECL Watch v9 add ZAP creation indicator disables the Submit button and displaying a "Loading..." status on form submission Signed-off-by: Jeremy Clements <79224539+jeclrsg@users.noreply.github.com> --- esp/src/src-react/components/forms/ZAPDialog.tsx | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/esp/src/src-react/components/forms/ZAPDialog.tsx b/esp/src/src-react/components/forms/ZAPDialog.tsx index d43147c8f3c..3d4e3e3c5c7 100644 --- a/esp/src/src-react/components/forms/ZAPDialog.tsx +++ b/esp/src/src-react/components/forms/ZAPDialog.tsx @@ -1,5 +1,5 @@ import * as React from "react"; -import { Checkbox, DefaultButton, Dropdown, Icon, IDropdownProps, IOnRenderComboBoxLabelProps, IStackTokens, ITextFieldProps, mergeStyleSets, PrimaryButton, Stack, TextField, TooltipHost } from "@fluentui/react"; +import { Checkbox, DefaultButton, Dropdown, Icon, IDropdownProps, IOnRenderComboBoxLabelProps, IStackTokens, ITextFieldProps, mergeStyleSets, PrimaryButton, Spinner, Stack, TextField, TooltipHost } from "@fluentui/react"; import { useForm, Controller } from "react-hook-form"; import { LogType } from "@hpcc-js/comms"; import { scopedLogger } from "@hpcc-js/util"; @@ -164,6 +164,8 @@ export const ZAPDialog: React.FunctionComponent = ({ }), [theme]); const [emailDisabled, setEmailDisabled] = React.useState(true); + const [submitDisabled, setSubmitDisabled] = React.useState(false); + const [spinnerHidden, setSpinnerHidden] = React.useState(true); const [columnMode, setColumnMode] = React.useState(ColumnMode.DEFAULT); const [logFormat, setLogFormat] = React.useState(LogFormat.CSV); const [showCustomColumns, setShowCustomColumns] = React.useState(false); @@ -184,6 +186,8 @@ export const ZAPDialog: React.FunctionComponent = ({ const logFilter = data.LogFilter; delete data.LogFilter; + setSubmitDisabled(true); + setSpinnerHidden(false); for (const key in data) { formData.append(key, data[key]); @@ -223,6 +227,8 @@ export const ZAPDialog: React.FunctionComponent = ({ link.click(); link.remove(); + setSubmitDisabled(false); + setSpinnerHidden(true); closeForm(); if (logAccessorMessage !== "") { @@ -253,7 +259,8 @@ export const ZAPDialog: React.FunctionComponent = ({ return - + + closeForm()} /> }> Date: Tue, 25 Jun 2024 17:19:21 -0400 Subject: [PATCH 144/151] HPCC-32143 Remove centos 7, 8, amazonlinux from build targets Signed-off-by: Michael Gardner --- .github/workflows/build-assets.yml | 17 ----------------- .github/workflows/build-vcpkg.yml | 26 -------------------------- 2 files changed, 43 deletions(-) diff --git a/.github/workflows/build-assets.yml b/.github/workflows/build-assets.yml index ca1ac3e0f88..a298a843a5e 100644 --- a/.github/workflows/build-assets.yml +++ b/.github/workflows/build-assets.yml @@ -102,11 +102,6 @@ jobs: name: docs documentation: true - os: ubuntu-20.04 - - os: centos-8 - - os: centos-7 - cmake_options_extra: "-DVCPKG_TARGET_TRIPLET=x64-centos-7-dynamic" - - os: amazonlinux - cmake_options_extra: "-DVCPKG_TARGET_TRIPLET=x64-amazonlinux-dynamic" - os: rockylinux-8 - os: ubuntu-22.04 name: LN k8s @@ -121,22 +116,10 @@ jobs: - os: ubuntu-20.04 name: LN ln: true - - os: centos-8 - name: LN - cmake_options_extra: "" - ln: true - - os: centos-7 - name: LN - cmake_options_extra: "-DVCPKG_TARGET_TRIPLET=x64-centos-7-dynamic" - ln: true - os: rockylinux-8 name: LN cmake_options_extra: "" ln: true - - os: centos-7-rh-python38 - name: LN Python 3.8 - cmake_options_extra: "-DVCPKG_TARGET_TRIPLET=x64-centos-7-dynamic -DCUSTOM_LABEL=_rh_python38" - ln: true fail-fast: false steps: diff --git a/.github/workflows/build-vcpkg.yml b/.github/workflows/build-vcpkg.yml index 0cf26085f6a..a29f49f26e1 100644 --- a/.github/workflows/build-vcpkg.yml +++ b/.github/workflows/build-vcpkg.yml @@ -10,9 +10,6 @@ on: - 'ubuntu-22.04' - 'ubuntu-20.04' - 'rockylinux-8' - - 'centos-8' - - 'centos-7' - - 'amazonlinux' description: 'Operating System' required: false default: 'ubuntu-22.04' @@ -141,29 +138,6 @@ jobs: os: rockylinux-8 secrets: inherit - build-docker-centos-8: - if: ${{ contains('schedule,push', github.event_name) }} - uses: ./.github/workflows/build-docker.yml - with: - os: centos-8 - secrets: inherit - - build-docker-centos-7: - if: ${{ contains('pull_request,push', github.event_name) }} - uses: ./.github/workflows/build-docker.yml - with: - os: centos-7 - cmake-configuration-ex: "-DVCPKG_TARGET_TRIPLET=x64-centos-7-dynamic" - secrets: inherit - - build-docker-amazonlinux: - if: ${{ contains('schedule,push', github.event_name) }} - uses: ./.github/workflows/build-docker.yml - with: - os: amazonlinux - cmake-configuration-ex: "-DVCPKG_TARGET_TRIPLET=x64-amazonlinux-dynamic" - secrets: inherit - build-gh_runner-ubuntu-22_04: if: ${{ contains('schedule,push', github.event_name) }} uses: ./.github/workflows/build-gh_runner.yml From 1fc52a6ddbdae4388cac83801de185accd86960a Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 5 Jul 2024 09:36:15 +0100 Subject: [PATCH 145/151] Split off 9.0.124 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index f9d3358da5a..893f6ceac83 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.0.123-closedown0 +version: 9.0.125-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.0.123-closedown0 +appVersion: 9.0.125-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 3fdd3c487dd..e84db607579 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1314,7 +1314,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index b59f13fec1e..53612176336 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 annotations: checksum/config: {{ $configSHA }} spec: diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index e7589f792fa..afe4ade316d 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index bae7d7e23ae..ef0b29ffadd 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 966a904a848..9760005fd64 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -135,7 +135,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 218819150d2..b1c114e29ad 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -142,7 +142,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 0c23e48df74..13924c80df9 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 4ad9c05b577..82c7668cb98 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -117,7 +117,7 @@ spec: server: {{ .name | quote }} accessDali: "yes" app: {{ $application }} - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index c2eed816fc7..fd81e430f18 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 9d3f9c2eb6c..804a935baec 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -180,7 +180,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -242,7 +242,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -347,7 +347,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 9262cfb938a..c47cb0710f6 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -52,7 +52,7 @@ spec: run: {{ $serviceName | quote }} server: {{ $serviceName | quote }} accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 9d716ffb94b..5bcc1c19c7e 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -147,7 +147,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -214,7 +214,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -347,7 +347,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -412,7 +412,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.0.123-closedown0 + helmVersion: 9.0.125-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index a9ed4df74c0..3a5dddf00f5 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 0 ) -set ( HPCC_POINT 123 ) +set ( HPCC_POINT 125 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-28T11:40:55Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-07-05T08:36:15Z" ) ### From e3c9fe0f4c31b038a3a7f15ffe294bd8e39c9d8b Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 5 Jul 2024 09:37:33 +0100 Subject: [PATCH 146/151] Split off 9.8.2 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index de086da65e6..2ea44e4ddff 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.8.1-closedown0 +version: 9.8.3-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.8.1-closedown0 +appVersion: 9.8.3-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index e571448a037..9dfe15ff52c 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1523,7 +1523,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 3b4046deafa..e107953ff67 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index c92f9d4bb17..4d6c0d58a2f 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 3c39acd576c..3641037b562 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 430ec35143d..a8bebc0ec99 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -139,7 +139,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 1b6c580be70..c1d8221f96f 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -147,7 +147,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 11f3edb0628..0d5b83fc66b 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 14e9ae0d5cb..476d7c4917b 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -125,7 +125,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index fd3ad929130..989712bf0e7 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 03cd7d3effc..aac49e8b770 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index d7558cdcf30..bd9dae275b8 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 706dfacaf2f..e7432127b82 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -88,7 +88,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -153,7 +153,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -220,7 +220,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -353,7 +353,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -418,7 +418,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.8.1-closedown0 + helmVersion: 9.8.3-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 92e98514d49..b831e0d1bb4 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 8 ) -set ( HPCC_POINT 1 ) +set ( HPCC_POINT 3 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-24T14:15:43Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-07-05T08:37:32Z" ) ### From c4ec2e7845ea7565757432472ea5498011d7532d Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 5 Jul 2024 09:39:00 +0100 Subject: [PATCH 147/151] Split off 9.6.28 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index ea41709c68c..e7a985dca35 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.6.27-closedown0 +version: 9.6.29-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.6.27-closedown0 +appVersion: 9.6.29-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 4f650bbbd0e..e6e11796b7b 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1523,7 +1523,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 2f761c8f4ac..3ed084449ad 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index fdbbe268fc0..3fbc0a46c9d 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index 998b361af9d..3630e102027 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 6f875530323..1ecf5855156 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -139,7 +139,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index da2ba3beae5..ae5f590aca7 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -62,7 +62,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -147,7 +147,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 8dc0b418482..bb5fb36b087 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index a1570fe8acc..4d9711a69c8 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -125,7 +125,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index 5e9cb065aac..d8b351c43ef 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 867edf1e9f0..07d177220e3 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 08641b21ea6..aa022562c67 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index 3c3505a8c98..09b19c12ed8 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -88,7 +88,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -153,7 +153,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -220,7 +220,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -353,7 +353,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -418,7 +418,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.6.27-closedown0 + helmVersion: 9.6.29-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 3ffe34264d4..ce1a22a1538 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 6 ) -set ( HPCC_POINT 27 ) +set ( HPCC_POINT 29 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-28T11:34:54Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-07-05T08:39:00Z" ) ### From 487ee29f0b128a532cec71e6b9330091e7ca133a Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 5 Jul 2024 09:40:25 +0100 Subject: [PATCH 148/151] Split off 9.4.76 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index 63368e54cce..bd512664537 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.4.75-closedown0 +version: 9.4.77-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.4.75-closedown0 +appVersion: 9.4.77-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index 225a27e6a17..44b5450b6d3 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1519,7 +1519,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 71fbbe75ba9..4c100577984 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -51,7 +51,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index 2b61cc50e8f..b10898b7fe9 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -88,7 +88,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index e6b3ca0c48f..acb1b44ab0b 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -57,7 +57,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 33e874fa680..56ca2e1f78e 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -60,7 +60,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -137,7 +137,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 952d68140c3..7aaf05e1d7e 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -143,7 +143,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index 12f07079b7d..439327aab07 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -65,7 +65,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 11064a7e82f..3622e7f306c 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -122,7 +122,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index eccf6fb914c..87e6681f876 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -73,7 +73,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index 02f2ef84769..43e3c3429c6 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -125,7 +125,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -182,7 +182,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -244,7 +244,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -352,7 +352,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 2eeccbb7348..a8808e4bf46 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index e8f015076d1..ee145b5d751 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -86,7 +86,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -151,7 +151,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -218,7 +218,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -351,7 +351,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -416,7 +416,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.4.75-closedown0 + helmVersion: 9.4.77-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 39620d6c68a..9fbe6320864 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 4 ) -set ( HPCC_POINT 75 ) +set ( HPCC_POINT 77 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-28T11:37:40Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-07-05T08:40:25Z" ) ### From 3d0a201aa740ec3fd0be5f1dc1414afdc1e148bb Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 5 Jul 2024 09:41:44 +0100 Subject: [PATCH 149/151] Split off 9.2.102 Signed-off-by: Gordon Smith --- helm/hpcc/Chart.yaml | 4 ++-- helm/hpcc/templates/_helpers.tpl | 2 +- helm/hpcc/templates/dafilesrv.yaml | 2 +- helm/hpcc/templates/dali.yaml | 2 +- helm/hpcc/templates/dfuserver.yaml | 2 +- helm/hpcc/templates/eclagent.yaml | 4 ++-- helm/hpcc/templates/eclccserver.yaml | 4 ++-- helm/hpcc/templates/eclscheduler.yaml | 2 +- helm/hpcc/templates/esp.yaml | 2 +- helm/hpcc/templates/localroxie.yaml | 2 +- helm/hpcc/templates/roxie.yaml | 8 ++++---- helm/hpcc/templates/sasha.yaml | 2 +- helm/hpcc/templates/thor.yaml | 10 +++++----- version.cmake | 4 ++-- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/helm/hpcc/Chart.yaml b/helm/hpcc/Chart.yaml index dc7185a7fec..37ad52a2e92 100644 --- a/helm/hpcc/Chart.yaml +++ b/helm/hpcc/Chart.yaml @@ -6,9 +6,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 9.2.101-closedown0 +version: 9.2.103-closedown0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 9.2.101-closedown0 +appVersion: 9.2.103-closedown0 diff --git a/helm/hpcc/templates/_helpers.tpl b/helm/hpcc/templates/_helpers.tpl index ed555ffd9a7..cac26347e1e 100644 --- a/helm/hpcc/templates/_helpers.tpl +++ b/helm/hpcc/templates/_helpers.tpl @@ -1405,7 +1405,7 @@ kind: Service metadata: name: {{ $lvars.serviceName | quote }} labels: - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $.root "instance" $lvars.serviceName ) | indent 4 }} {{- if $lvars.labels }} {{ toYaml $lvars.labels | indent 4 }} diff --git a/helm/hpcc/templates/dafilesrv.yaml b/helm/hpcc/templates/dafilesrv.yaml index 95f4a3b6cd8..030b400c674 100644 --- a/helm/hpcc/templates/dafilesrv.yaml +++ b/helm/hpcc/templates/dafilesrv.yaml @@ -50,7 +50,7 @@ spec: labels: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dafilesrv" "name" "dafilesrv" "instance" .name) | indent 8 }} server: {{ .name | quote }} - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 annotations: checksum/config: {{ $configSHA }} {{- include "hpcc.generateAnnotations" $commonCtx | indent 8 }} diff --git a/helm/hpcc/templates/dali.yaml b/helm/hpcc/templates/dali.yaml index 6f60764deb1..25babfb26eb 100644 --- a/helm/hpcc/templates/dali.yaml +++ b/helm/hpcc/templates/dali.yaml @@ -82,7 +82,7 @@ spec: run: {{ $dali.name | quote }} server: {{ $dali.name | quote }} app: dali - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} {{- end }} diff --git a/helm/hpcc/templates/dfuserver.yaml b/helm/hpcc/templates/dfuserver.yaml index f2eaaa5c17d..98861431c67 100644 --- a/helm/hpcc/templates/dfuserver.yaml +++ b/helm/hpcc/templates/dfuserver.yaml @@ -56,7 +56,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "dfuserver" "name" "dfuserver" "instance" .name) | indent 8 }} run: {{ .name | quote }} accessDali: "yes" - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclagent.yaml b/helm/hpcc/templates/eclagent.yaml index 5a398cf4cc8..8f14a31df62 100644 --- a/helm/hpcc/templates/eclagent.yaml +++ b/helm/hpcc/templates/eclagent.yaml @@ -58,7 +58,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" $apptype "name" "eclagent" "instance" $appJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -133,7 +133,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclccserver.yaml b/helm/hpcc/templates/eclccserver.yaml index 7c19cc105fd..ea659f47a04 100644 --- a/helm/hpcc/templates/eclccserver.yaml +++ b/helm/hpcc/templates/eclccserver.yaml @@ -57,7 +57,7 @@ data: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclccserver" "name" "eclccserver" "instance" $compileJobName "instanceOf" (printf "%s-job" .me.name)) | indent 12 }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} {{- end }} @@ -140,7 +140,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: {{ .useChildProcesses | default false | ternary "yes" "no" | quote }} - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/eclscheduler.yaml b/helm/hpcc/templates/eclscheduler.yaml index f5eb9ef1a03..59e9b95d443 100644 --- a/helm/hpcc/templates/eclscheduler.yaml +++ b/helm/hpcc/templates/eclscheduler.yaml @@ -64,7 +64,7 @@ spec: run: {{ .name | quote }} accessDali: "yes" accessEsp: "no" - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/esp.yaml b/helm/hpcc/templates/esp.yaml index 7652a41ae91..c8bdfdf744e 100644 --- a/helm/hpcc/templates/esp.yaml +++ b/helm/hpcc/templates/esp.yaml @@ -120,7 +120,7 @@ spec: accessSasha: "yes" {{- end }} app: {{ $application }} - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "name" $application "component" "esp" "instance" .name) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8 }} diff --git a/helm/hpcc/templates/localroxie.yaml b/helm/hpcc/templates/localroxie.yaml index e822313c7cb..7a115c72bd8 100644 --- a/helm/hpcc/templates/localroxie.yaml +++ b/helm/hpcc/templates/localroxie.yaml @@ -70,7 +70,7 @@ spec: server: {{ $servername | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $roxie.name) | indent 8 }} {{- if hasKey . "labels" }} {{ toYaml .labels | indent 8 }} diff --git a/helm/hpcc/templates/roxie.yaml b/helm/hpcc/templates/roxie.yaml index f115994c70a..ac234501338 100644 --- a/helm/hpcc/templates/roxie.yaml +++ b/helm/hpcc/templates/roxie.yaml @@ -120,7 +120,7 @@ spec: {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 8 }} run: {{ $commonCtx.toponame | quote }} roxie-cluster: {{ $roxie.name | quote }} - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} @@ -178,7 +178,7 @@ kind: Service metadata: name: {{ $commonCtx.toponame | quote }} labels: - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "topology-server" "name" "roxie" "instance" $commonCtx.toponame) | indent 4 }} spec: ports: @@ -240,7 +240,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "roxie-server" "name" "roxie" "instance" $servername) | indent 8 }} {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} @@ -346,7 +346,7 @@ spec: roxie-cluster: {{ $roxie.name | quote }} accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- if hasKey $.Values.global "metrics" }} {{- include "hpcc.generateMetricsReporterLabel" $.Values.global.metrics | nindent 8}} {{- end }} diff --git a/helm/hpcc/templates/sasha.yaml b/helm/hpcc/templates/sasha.yaml index 4bcbcc6b41b..0f2d89d163e 100644 --- a/helm/hpcc/templates/sasha.yaml +++ b/helm/hpcc/templates/sasha.yaml @@ -53,7 +53,7 @@ spec: server: {{ $serviceName | quote }} app: sasha accessDali: {{ (has "dali" $sasha.access) | ternary "yes" "no" | quote }} - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- if hasKey $sasha "labels" }} {{ toYaml $sasha.labels | indent 8 }} {{- end }} diff --git a/helm/hpcc/templates/thor.yaml b/helm/hpcc/templates/thor.yaml index e50507bff55..5efebe03a8b 100644 --- a/helm/hpcc/templates/thor.yaml +++ b/helm/hpcc/templates/thor.yaml @@ -82,7 +82,7 @@ data: labels: accessDali: "yes" accessEsp: "yes" - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $eclAgentJobName "instanceOf" (printf "%s-job" .eclAgentName)) | indent 8 }} {{- if hasKey .me "labels" }} {{ toYaml .me.labels | indent 12 }} @@ -145,7 +145,7 @@ data: accessEsp: "yes" app: "thor" component: "thormanager" - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thormanager" "name" "thor" "instance" $thorManagerJobName "instanceOf" (printf "%s-thormanager-job" .me.name)) | indent 12 }} @@ -210,7 +210,7 @@ data: accessEsp: "yes" app: "thor" component: "thorworker" - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 instance: "_HPCC_JOBNAME_" job: "_HPCC_JOBNAME_" {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "thorworker" "name" "thor" "instance" $thorWorkerJobName "instanceOf" (printf "%s-thorworker-job" .me.name)) | indent 12 }} @@ -341,7 +341,7 @@ spec: accessEsp: {{ $commonCtx.eclAgentUseChildProcesses | ternary "yes" "no" | quote }} app: "thor" component: "thor-eclagent" - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 instance: {{ $commonCtx.eclAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.eclAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} @@ -404,7 +404,7 @@ spec: accessEsp: "no" app: "thor" component: "thor-thoragent" - helmVersion: 9.2.101-closedown0 + helmVersion: 9.2.103-closedown0 instance: {{ $commonCtx.thorAgentName | quote }} {{- include "hpcc.addStandardLabels" (dict "root" $ "component" "eclagent" "name" "thor" "instance" $commonCtx.thorAgentName ) | indent 8 }} {{- if hasKey $commonCtx.me "labels" }} diff --git a/version.cmake b/version.cmake index 94b859bb5c4..316946fa4d1 100644 --- a/version.cmake +++ b/version.cmake @@ -5,8 +5,8 @@ set ( HPCC_NAME "Community Edition" ) set ( HPCC_PROJECT "community" ) set ( HPCC_MAJOR 9 ) set ( HPCC_MINOR 2 ) -set ( HPCC_POINT 101 ) +set ( HPCC_POINT 103 ) set ( HPCC_MATURITY "closedown" ) set ( HPCC_SEQUENCE 0 ) -set ( HPCC_TAG_TIMESTAMP "2024-06-28T11:39:35Z" ) +set ( HPCC_TAG_TIMESTAMP "2024-07-05T08:41:44Z" ) ### From 10326708b6e07a5fb4e3dfd5241e29c8d286630d Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Fri, 5 Jul 2024 13:20:37 +0100 Subject: [PATCH 150/151] HPCC-32176 Remove pkg-config from Ubuntu dependencies Signed-off-by: Gordon Smith --- .github/workflows/build-vcpkg.yml | 1 - .github/workflows/smoketest.yml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/build-vcpkg.yml b/.github/workflows/build-vcpkg.yml index cb9b7500eeb..2898c3fb9ae 100644 --- a/.github/workflows/build-vcpkg.yml +++ b/.github/workflows/build-vcpkg.yml @@ -278,7 +278,6 @@ jobs: gnupg \ groff-base \ libtool \ - pkg-config \ software-properties-common \ tar \ unzip \ diff --git a/.github/workflows/smoketest.yml b/.github/workflows/smoketest.yml index 0428925cbaf..219e864e536 100644 --- a/.github/workflows/smoketest.yml +++ b/.github/workflows/smoketest.yml @@ -32,7 +32,7 @@ env: cacheversion: 3 VCPKG_BINARY_SOURCES: "clear;nuget,GitHub,read" OS_DEPENDENCIES: "bison flex build-essential binutils-dev curl lsb-release libcppunit-dev python3-dev default-jdk - r-base-dev r-cran-rcpp r-cran-rinside r-cran-inline pkg-config libtool autotools-dev automake git cmake" + r-base-dev r-cran-rcpp r-cran-rinside r-cran-inline libtool autotools-dev automake git cmake" on: pull_request: From 4376ee6f1c1585193268c9054507c92b429af57b Mon Sep 17 00:00:00 2001 From: Gordon Smith Date: Tue, 9 Jul 2024 16:59:34 +0100 Subject: [PATCH 151/151] HPCC-32220 Remove unused ARGS Note: ARGS can be leaked to DockerHub by default Signed-off-by: Gordon Smith --- dockerfiles/platform-build/Dockerfile | 3 --- 1 file changed, 3 deletions(-) diff --git a/dockerfiles/platform-build/Dockerfile b/dockerfiles/platform-build/Dockerfile index e5cdbd6b948..4db003f0879 100644 --- a/dockerfiles/platform-build/Dockerfile +++ b/dockerfiles/platform-build/Dockerfile @@ -68,8 +68,6 @@ RUN git clone --no-checkout https://github.com/${BUILD_USER}/HPCC-Platform.git & WORKDIR /hpcc-dev/HPCC-Platform/vcpkg RUN ./bootstrap-vcpkg.sh -ARG GITHUB_ACTOR=hpcc-systems -ARG GITHUB_TOKEN=none WORKDIR /hpcc-dev/build @@ -84,7 +82,6 @@ RUN --mount=type=secret,id=key,uid=10000 if [[ "${SIGN_MODULES}" != "OFF" ]] ; \ ARG BUILD_TYPE=RelWithDebInfo ARG USE_CPPUNIT=1 ARG SIGNING_KEYID=HPCCSystems -ARG SIGNING_PASSPHRASE=none RUN --mount=type=secret,id=passphrase,uid=10000 cmake /hpcc-dev/HPCC-Platform \ -Wno-dev -DCONTAINERIZED=1 \ -DINCLUDE_PLUGINS=1 -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DUSE_PYTHON2=0 \