Skip to content

Commit

Permalink
Merge pull request #157 from lomik/rework-config
Browse files Browse the repository at this point in the history
Rework configuration package
  • Loading branch information
Felixoid authored May 22, 2021
2 parents fa36c3f + da75e2c commit bbb3a62
Show file tree
Hide file tree
Showing 58 changed files with 6,547 additions and 429 deletions.
12 changes: 9 additions & 3 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,16 @@ jobs:
git fetch --shallow-since=$(git log $VERS -1 --format=%at)
if: ${{ github.event_name == 'push' }}

- name: Test
- name: Build project
run: make

- name: Check documentation consistency
run: |
make
make test
make config
git diff --exit-code
- name: Test
run: make test

- name: Install packaging dependencies
run: |
Expand Down
18 changes: 16 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,28 @@ all: $(NAME)

.PHONY: clean
clean:
rm $(NAME)
rm -rf out
rm -f *deb *rpm
rm -f sha256sum md5sum

.PHONY: $(NAME)
$(NAME):
$(NAME): $(wildcard **/*.go)
$(GO) build $(MODULE)

deploy/doc/graphite-clickhouse.conf: $(NAME)
./$(NAME) -config-print-default > $@

doc/config.md: deploy/doc/graphite-clickhouse.conf deploy/doc/config.md
@echo 'Generating $@...'
@printf '[//]: # (This file is built out of deploy/doc/config.md, please do not edit it manually) \n' > $@
@printf '[//]: # (To rebuild it run `make config`)\n\n' >> $@
@cat deploy/doc/config.md >> $@
@printf '\n```toml\n' >> $@
@cat deploy/doc/graphite-clickhouse.conf >> $@
@printf '```\n' >> $@

config: doc/config.md

test:
$(GO) test -race ./...

Expand Down
180 changes: 1 addition & 179 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,185 +34,7 @@ make
3. Add graphite-clickhouse `host:port` to graphite-web [CLUSTER_SERVERS](http://graphite.readthedocs.io/en/latest/config-local-settings.html#cluster-configuration)

## Configuration
Create `/etc/graphite-clickhouse/rollup.xml` with same content as for ClickHouse. Short sample:
```xml
<graphite_rollup>
<default>
<function>avg</function>
<retention>
<age>0</age>
<precision>60</precision>
</retention>
<retention>
<age>2592000</age>
<precision>3600</precision>
</retention>
</default>
</graphite_rollup>
```

For complex ClickHouse queries you might need to increase default query_max_size. To do that add following line to `/etc/clickhouse-server/users.xml` for the user you are using:
```xml
<!-- Default is 262144 -->
<max_query_size>10485760</max_query_size>
```

Create `/etc/graphite-clickhouse/graphite-clickhouse.conf`
```toml
[common]
listen = ":9090"
# Listener to serve /debug/pprof requests. `-pprof` argument would override it
pprof-listen = ""
max-cpu = 1
# How frequently to call debug.FreeOSMemory() to return memory back to OS
# Setting it to zero disables this functionality
memory-return-interval = "0s"
# Limit number of results from find query. Zero = unlimited
max-metrics-in-find-answer = 0
# Limit numbers of queried metrics per target in /render requests. Zero or negative are unlimited
max-metrics-per-target = 15000
# Daemon returns empty response if query matches any of regular expressions
# target-blacklist = ["^not_found.*"]
# If this > 0, then once an interval daemon will return the freed memory to the OS
memory-return-interval = "0s"

[clickhouse]
# You can add user/password (http://user:password@localhost:8123) and any clickhouse options (GET-parameters) to url
# It is recommended to create read-only user
url = "http://localhost:8123"
# Add extra prefix (directory in graphite) for all metrics
extra-prefix = ""

# Default table with points
data-table = "graphite"
data-timeout = "1m0s"
# Rollup rules xml filename. Use `auto` magic word for select rollup rules from ClickHouse
rollup-conf = "/etc/graphite-clickhouse/rollup.xml"

# Table with series list (daily and full)
# https://github.com/lomik/graphite-clickhouse/wiki/IndexTable
index-table = "graphite_index"
# Use daily data from index table. This is useful for installations with big count of short-lived series but can be slower in other cases
index-use-daily = true
# Allow use reverse queries with minimal depth
# -1 - disable
# 0 - disable if no wildcard at first level
# 1 - allow for example a.b.c*.d and a.b*.c.d queries (default)
# 2 - allow a.b*.c.d) to index table.
# This is useful when reverse queries has bad perfomance
index-reverse-depth = 1
# overwrite default index-use-reverse for metrics with prefix/suffix/regular_expression checks against target (not used when index-use-reverse = -1)
#index-reverses = [
# { suffix = ".p99", reverse = 2 },
# { suffix = ".avg", reverse = 2 },
# { suffix = ".gc.gen1", reverse = 0 },
# { prefix = "Test.", suffix = ".cpu", reverse = 2 },
# { regex = "\\.gc\\.(heap|gen)[0-9]+$", reverse = 0 }
#]
index-reverses = []
index-timeout = "1m0s"

# `tagged` table from carbon-clickhouse. Required for seriesByTag
tagged-table = ""
# For how long the daemon will query tags during autocomplete
tagged-autocomplete-days = 7

# Old index tables. DEPRECATED
tree-table = "graphite_tree"
# Optional table with daily series list.
# Useful for installations with big count of short-lived series
date-tree-table = ""
# Supported several schemas of date-tree-table:
# 1 (default): table only with Path, Date, Level fields. Described here: https://habrahabr.ru/company/avito/blog/343928/
# 2: table with Path, Date, Level, Deleted, Version fields. Table type "series" in the carbon-clickhouse
# 3: same as #2 but with reversed Path. Table type "series-reverse" in the carbon-clickhouse
date-tree-table-version = 0
tree-timeout = "1m0s"

connect-timeout = "1s"

# Sets the maximum for maxDataPoints parameter.
# If you use CH w/o https://github.com/ClickHouse/ClickHouse/pull/13947, you have to set it to 4096
max-data-points = 4096
# Use metrics aggregation on ClickHouse site.
# This feature is very useful, read https://github.com/lomik/graphite-clickhouse/wiki/ClickHouse-aggregation-VS-graphite%E2%80%94clickhouse-aggregation
internal-aggregation = false

[prometheus]
# The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for
# generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all
# HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically.
external-url = ""
page-title = "Prometheus Time Series Collection and Processing Server"

[carbonlink]
server = ""
threads-per-request = 10
connect-timeout = "50ms"
query-timeout = "50ms"
total-timeout = "500ms"

# You can define multiple data tables (with points).
# The first table that matches is used.
#
# # Sample, archived table with points older 30d
# [[data-table]]
# table = "graphite_archive"
# min-age = "720h"
#
# # All available options
# [[data-table]]
# # clickhouse table name
# table = "table_name"
# # points in table are stored with reverse path
# reverse = false
# # Custom rollup.xml for table.
# # Magic word `auto` can be used for load rules from ClickHouse
# # With value `none` only rollup-default-precision and rollup-default-function will be used for rollup
# rollup-conf = ""
# # Which table to discover rollup-rules from. If not specified - will use what specified in "table" parameter.
# # Useful when reading from distributed table, but the rollup parameters are on the shard tables.
# # Can be in "database.table" form.
# rollup-auto-table = ""
# # Use unreversed metric name to discover rollup-rules. If not specified - use as is (reversed metric name).
# # Use if unreversed rules is used.
# # Ignored for non-reverse tables (unreversed metric names used always)
# rollup-use-reverted = false
# # Sets the default precision and function for rollup patterns which don't have age=0 retention defined.
# # If age=0 retention is defined in the rollup config then it takes precedence.
# # If left at the default value of 0 then no rollup is performed when the requested interval
# # is not covered by any rollup rule. In this case the points will be served with 60 second precision.
# rollup-default-precision = 60
# rollup-default-function = "avg"
# # from >= now - {max-age}
# max-age = "240h"
# # until <= now - {min-age}
# min-age = "240h"
# # until - from <= {max-interval}
# max-interval = "24h"
# # until - from >= {min-interval}
# min-interval = "24h"
# # regexp.Match({target-match-any}, target[0]) || regexp.Match({target-match-any}, target[1]) || ...
# target-match-any = "regexp"
# # regexp.Match({target-match-all}, target[0]) && regexp.Match({target-match-all}, target[1]) && ...
# target-match-all = "regexp"

[debug]
# The directory for debug info. If set, additional info may be saved there
directory = "/var/log/graphite-clickhouse/debug"
directory-perm = "0755"
# File permissions for external data dumps. Enabled only if !=0, see X-Gch-Debug-External-Data header
# Format is octal, e.g. 0640
external-data-perm = "0644"

[[logging]]
logger = ""
file = "/var/log/graphite-clickhouse/graphite-clickhouse.log"
level = "info"
encoding = "mixed"
encoding-time = "iso8601"
encoding-duration = "seconds"
```
See [configuration documentation](./doc/config.md).

### Special headers processing

Expand Down
12 changes: 6 additions & 6 deletions autocomplete/autocomplete.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,11 +142,11 @@ func (h *Handler) ServeTags(w http.ResponseWriter, r *http.Request) {

body, err := clickhouse.Query(
scope.WithTable(r.Context(), h.config.ClickHouse.TaggedTable),
h.config.ClickHouse.Url,
h.config.ClickHouse.URL,
sql,
clickhouse.Options{
Timeout: h.config.ClickHouse.TreeTimeout.Value(),
ConnectTimeout: h.config.ClickHouse.ConnectTimeout.Value(),
Timeout: h.config.ClickHouse.IndexTimeout,
ConnectTimeout: h.config.ClickHouse.ConnectTimeout,
},
nil,
)
Expand Down Expand Up @@ -248,11 +248,11 @@ func (h *Handler) ServeValues(w http.ResponseWriter, r *http.Request) {

body, err := clickhouse.Query(
scope.WithTable(r.Context(), h.config.ClickHouse.TaggedTable),
h.config.ClickHouse.Url,
h.config.ClickHouse.URL,
sql,
clickhouse.Options{
Timeout: h.config.ClickHouse.IndexTimeout.Value(),
ConnectTimeout: h.config.ClickHouse.ConnectTimeout.Value(),
Timeout: h.config.ClickHouse.IndexTimeout,
ConnectTimeout: h.config.ClickHouse.ConnectTimeout,
},
nil,
)
Expand Down
1 change: 1 addition & 0 deletions config/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
tests_tmp/
Loading

0 comments on commit bbb3a62

Please sign in to comment.