Merge branch 'main' into add-focus-flag-to-promtool-test-rules
Signed-off-by: Julien Pivotto <roidelapluie@o11y.eu>
This commit is contained in:
commit
74cd1b6a09
5
.github/CODEOWNERS
vendored
5
.github/CODEOWNERS
vendored
@ -1,7 +1,10 @@
|
|||||||
/web/ui @juliusv
|
/web/ui @juliusv
|
||||||
/web/ui/module @juliusv @nexucis
|
/web/ui/module @juliusv @nexucis
|
||||||
/storage/remote @csmarchbanks @cstyan @bwplotka @tomwilkie
|
/storage/remote @csmarchbanks @cstyan @bwplotka @tomwilkie
|
||||||
|
/storage/remote/otlptranslator @gouthamve @jesusvazquez
|
||||||
/discovery/kubernetes @brancz
|
/discovery/kubernetes @brancz
|
||||||
/tsdb @codesome
|
/tsdb @jesusvazquez
|
||||||
/promql @roidelapluie
|
/promql @roidelapluie
|
||||||
/cmd/promtool @dgl
|
/cmd/promtool @dgl
|
||||||
|
/documentation/prometheus-mixin @metalmatze
|
||||||
|
|
||||||
|
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,14 +1,16 @@
|
|||||||
<!--
|
<!--
|
||||||
|
Please give your PR a title in the form "area: short description". For example "tsdb: reduce disk usage by 95%"
|
||||||
|
|
||||||
|
If your PR is to fix an issue, put "Fixes #issue-number" in the description.
|
||||||
|
|
||||||
Don't forget!
|
Don't forget!
|
||||||
|
|
||||||
- Please sign CNCF's Developer Certificate of Origin and sign-off your commits by adding the -s / --sign-off flag to `git commit`. See https://github.com/apps/dco for more information.
|
- Please sign CNCF's Developer Certificate of Origin and sign-off your commits by adding the -s / --signoff flag to `git commit`. See https://github.com/apps/dco for more information.
|
||||||
|
|
||||||
- If the PR adds or changes a behaviour or fixes a bug of an exported API it would need a unit/e2e test.
|
- If the PR adds or changes a behaviour or fixes a bug of an exported API it would need a unit/e2e test.
|
||||||
|
|
||||||
- Where possible use only exported APIs for tests to simplify the review and make it as close as possible to an actual library usage.
|
- Where possible use only exported APIs for tests to simplify the review and make it as close as possible to an actual library usage.
|
||||||
|
|
||||||
- No tests are needed for internal implementation changes.
|
|
||||||
|
|
||||||
- Performance improvements would need a benchmark test to prove it.
|
- Performance improvements would need a benchmark test to prove it.
|
||||||
|
|
||||||
- All exposed objects should have a comment.
|
- All exposed objects should have a comment.
|
||||||
|
11
.github/dependabot.yml
vendored
11
.github/dependabot.yml
vendored
@ -4,6 +4,13 @@ updates:
|
|||||||
directory: "/"
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "monthly"
|
interval: "monthly"
|
||||||
|
groups:
|
||||||
|
k8s.io:
|
||||||
|
patterns:
|
||||||
|
- "k8s.io/*"
|
||||||
|
go.opentelemetry.io:
|
||||||
|
patterns:
|
||||||
|
- "go.opentelemetry.io/*"
|
||||||
- package-ecosystem: "gomod"
|
- package-ecosystem: "gomod"
|
||||||
directory: "/documentation/examples/remote_storage"
|
directory: "/documentation/examples/remote_storage"
|
||||||
schedule:
|
schedule:
|
||||||
@ -16,6 +23,10 @@ updates:
|
|||||||
directory: "/"
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "monthly"
|
interval: "monthly"
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/scripts"
|
||||||
|
schedule:
|
||||||
|
interval: "monthly"
|
||||||
- package-ecosystem: "docker"
|
- package-ecosystem: "docker"
|
||||||
directory: "/"
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
|
11
.github/workflows/buf-lint.yml
vendored
11
.github/workflows/buf-lint.yml
vendored
@ -4,19 +4,22 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- ".github/workflows/buf-lint.yml"
|
- ".github/workflows/buf-lint.yml"
|
||||||
- "**.proto"
|
- "**.proto"
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
buf:
|
buf:
|
||||||
name: lint
|
name: lint
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- uses: bufbuild/buf-setup-action@v1.13.1
|
- uses: bufbuild/buf-setup-action@eb60cd0de4f14f1f57cf346916b8cd69a9e7ed0b # v1.26.1
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: bufbuild/buf-lint-action@v1
|
- uses: bufbuild/buf-lint-action@bd48f53224baaaf0fc55de9a913e7680ca6dbea4 # v1.0.3
|
||||||
with:
|
with:
|
||||||
input: 'prompb'
|
input: 'prompb'
|
||||||
- uses: bufbuild/buf-breaking-action@v1
|
- uses: bufbuild/buf-breaking-action@f47418c81c00bfd65394628385593542f64db477 # v1.1.2
|
||||||
with:
|
with:
|
||||||
input: 'prompb'
|
input: 'prompb'
|
||||||
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD,subdir=prompb'
|
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD,subdir=prompb'
|
||||||
|
14
.github/workflows/buf.yml
vendored
14
.github/workflows/buf.yml
vendored
@ -3,23 +3,27 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
buf:
|
buf:
|
||||||
name: lint and publish
|
name: lint and publish
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository_owner == 'prometheus'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- uses: bufbuild/buf-setup-action@v1.13.1
|
- uses: bufbuild/buf-setup-action@eb60cd0de4f14f1f57cf346916b8cd69a9e7ed0b # v1.26.1
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: bufbuild/buf-lint-action@v1
|
- uses: bufbuild/buf-lint-action@bd48f53224baaaf0fc55de9a913e7680ca6dbea4 # v1.0.3
|
||||||
with:
|
with:
|
||||||
input: 'prompb'
|
input: 'prompb'
|
||||||
- uses: bufbuild/buf-breaking-action@v1
|
- uses: bufbuild/buf-breaking-action@f47418c81c00bfd65394628385593542f64db477 # v1.1.2
|
||||||
with:
|
with:
|
||||||
input: 'prompb'
|
input: 'prompb'
|
||||||
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb'
|
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb'
|
||||||
- uses: bufbuild/buf-push-action@v1
|
- uses: bufbuild/buf-push-action@342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 # v1.1.1
|
||||||
with:
|
with:
|
||||||
input: 'prompb'
|
input: 'prompb'
|
||||||
buf_token: ${{ secrets.BUF_TOKEN }}
|
buf_token: ${{ secrets.BUF_TOKEN }}
|
||||||
|
65
.github/workflows/ci.yml
vendored
65
.github/workflows/ci.yml
vendored
@ -3,6 +3,7 @@ name: CI
|
|||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
push:
|
push:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test_go:
|
test_go:
|
||||||
name: Go tests
|
name: Go tests
|
||||||
@ -10,10 +11,10 @@ jobs:
|
|||||||
# Whenever the Go version is updated here, .promu.yml
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
# should also be updated.
|
# should also be updated.
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder:1.20-base
|
image: quay.io/prometheus/golang-builder:1.21-base
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- uses: prometheus/promci@v0.0.2
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
- uses: ./.github/promci/actions/setup_environment
|
- uses: ./.github/promci/actions/setup_environment
|
||||||
- run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
- run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
||||||
- run: go test ./tsdb/ -test.tsdb-isolation=false
|
- run: go test ./tsdb/ -test.tsdb-isolation=false
|
||||||
@ -31,11 +32,11 @@ jobs:
|
|||||||
# Whenever the Go version is updated here, .promu.yml
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
# should also be updated.
|
# should also be updated.
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder:1.20-base
|
image: quay.io/prometheus/golang-builder:1.21-base
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- uses: prometheus/promci@v0.0.2
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
- uses: ./.github/promci/actions/setup_environment
|
- uses: ./.github/promci/actions/setup_environment
|
||||||
with:
|
with:
|
||||||
enable_go: false
|
enable_go: false
|
||||||
@ -51,10 +52,10 @@ jobs:
|
|||||||
name: Go tests on Windows
|
name: Go tests on Windows
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.20 <1.21'
|
go-version: 1.21.x
|
||||||
- run: |
|
- run: |
|
||||||
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
|
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
|
||||||
go test $TestTargets -vet=off -v
|
go test $TestTargets -vet=off -v
|
||||||
@ -65,9 +66,9 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# The go verson in this image should be N-1 wrt test_go.
|
# The go verson in this image should be N-1 wrt test_go.
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder:1.18-base
|
image: quay.io/prometheus/golang-builder:1.20-base
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- run: make build
|
- run: make build
|
||||||
- run: go test ./tsdb/...
|
- run: go test ./tsdb/...
|
||||||
- run: go test ./tsdb/ -test.tsdb-isolation=false
|
- run: go test ./tsdb/ -test.tsdb-isolation=false
|
||||||
@ -78,9 +79,9 @@ jobs:
|
|||||||
# Whenever the Go version is updated here, .promu.yml
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
# should also be updated.
|
# should also be updated.
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder:1.19-base
|
image: quay.io/prometheus/golang-builder:1.20-base
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- run: go install ./cmd/promtool/.
|
- run: go install ./cmd/promtool/.
|
||||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
||||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
||||||
@ -103,8 +104,8 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
thread: [ 0, 1, 2 ]
|
thread: [ 0, 1, 2 ]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- uses: prometheus/promci@v0.0.2
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
- uses: ./.github/promci/actions/build
|
- uses: ./.github/promci/actions/build
|
||||||
with:
|
with:
|
||||||
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
||||||
@ -126,8 +127,8 @@ jobs:
|
|||||||
# Whenever the Go version is updated here, .promu.yml
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
# should also be updated.
|
# should also be updated.
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- uses: prometheus/promci@v0.0.2
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
- uses: ./.github/promci/actions/build
|
- uses: ./.github/promci/actions/build
|
||||||
with:
|
with:
|
||||||
parallelism: 12
|
parallelism: 12
|
||||||
@ -137,18 +138,20 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
|
||||||
with:
|
with:
|
||||||
go-version: 1.20.x
|
cache: false
|
||||||
|
go-version: 1.21.x
|
||||||
- name: Install snmp_exporter/generator dependencies
|
- name: Install snmp_exporter/generator dependencies
|
||||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||||
if: github.repository == 'prometheus/snmp_exporter'
|
if: github.repository == 'prometheus/snmp_exporter'
|
||||||
- name: Lint
|
- name: Lint
|
||||||
uses: golangci/golangci-lint-action@v3.4.0
|
uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0
|
||||||
with:
|
with:
|
||||||
version: v1.51.2
|
args: --verbose
|
||||||
|
version: v1.54.2
|
||||||
fuzzing:
|
fuzzing:
|
||||||
uses: ./.github/workflows/fuzzing.yml
|
uses: ./.github/workflows/fuzzing.yml
|
||||||
if: github.event_name == 'pull_request'
|
if: github.event_name == 'pull_request'
|
||||||
@ -161,8 +164,8 @@ jobs:
|
|||||||
needs: [test_ui, test_go, test_windows, golangci, codeql, build_all]
|
needs: [test_ui, test_go, test_windows, golangci, codeql, build_all]
|
||||||
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- uses: prometheus/promci@v0.0.2
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
- uses: ./.github/promci/actions/publish_main
|
- uses: ./.github/promci/actions/publish_main
|
||||||
with:
|
with:
|
||||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||||
@ -170,13 +173,13 @@ jobs:
|
|||||||
quay_io_login: ${{ secrets.quay_io_login }}
|
quay_io_login: ${{ secrets.quay_io_login }}
|
||||||
quay_io_password: ${{ secrets.quay_io_password }}
|
quay_io_password: ${{ secrets.quay_io_password }}
|
||||||
publish_release:
|
publish_release:
|
||||||
name: Publish release arfefacts
|
name: Publish release artefacts
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [test_ui, test_go, test_windows, golangci, codeql, build_all]
|
needs: [test_ui, test_go, test_windows, golangci, codeql, build_all]
|
||||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- uses: prometheus/promci@v0.0.2
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
- uses: ./.github/promci/actions/publish_release
|
- uses: ./.github/promci/actions/publish_release
|
||||||
with:
|
with:
|
||||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||||
@ -190,14 +193,14 @@ jobs:
|
|||||||
needs: [test_ui, codeql]
|
needs: [test_ui, codeql]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- uses: prometheus/promci@v0.0.2
|
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
|
||||||
- name: Install nodejs
|
- name: Install nodejs
|
||||||
uses: actions/setup-node@v3
|
uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1
|
||||||
with:
|
with:
|
||||||
node-version-file: "web/ui/.nvmrc"
|
node-version-file: "web/ui/.nvmrc"
|
||||||
registry-url: "https://registry.npmjs.org"
|
registry-url: "https://registry.npmjs.org"
|
||||||
- uses: actions/cache@v3.2.4
|
- uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1
|
||||||
with:
|
with:
|
||||||
path: ~/.npm
|
path: ~/.npm
|
||||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||||
|
16
.github/workflows/codeql-analysis.yml
vendored
16
.github/workflows/codeql-analysis.yml
vendored
@ -6,6 +6,10 @@ on:
|
|||||||
schedule:
|
schedule:
|
||||||
- cron: "26 14 * * 1"
|
- cron: "26 14 * * 1"
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
analyze:
|
analyze:
|
||||||
name: Analyze
|
name: Analyze
|
||||||
@ -20,18 +24,18 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.20 <1.21'
|
go-version: 1.21.x
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v2
|
uses: github/codeql-action/init@74483a38d39275f33fcff5f35b679b5ca4a26a99 # v2.22.5
|
||||||
with:
|
with:
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
|
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@v2
|
uses: github/codeql-action/autobuild@74483a38d39275f33fcff5f35b679b5ca4a26a99 # v2.22.5
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@v2
|
uses: github/codeql-action/analyze@74483a38d39275f33fcff5f35b679b5ca4a26a99 # v2.22.5
|
||||||
|
3
.github/workflows/funcbench.yml
vendored
3
.github/workflows/funcbench.yml
vendored
@ -2,6 +2,9 @@ on:
|
|||||||
repository_dispatch:
|
repository_dispatch:
|
||||||
types: [funcbench_start]
|
types: [funcbench_start]
|
||||||
name: Funcbench Workflow
|
name: Funcbench Workflow
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
run_funcbench:
|
run_funcbench:
|
||||||
name: Running funcbench
|
name: Running funcbench
|
||||||
|
5
.github/workflows/fuzzing.yml
vendored
5
.github/workflows/fuzzing.yml
vendored
@ -1,6 +1,9 @@
|
|||||||
name: CIFuzz
|
name: CIFuzz
|
||||||
on:
|
on:
|
||||||
workflow_call:
|
workflow_call:
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
Fuzzing:
|
Fuzzing:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@ -18,7 +21,7 @@ jobs:
|
|||||||
fuzz-seconds: 600
|
fuzz-seconds: 600
|
||||||
dry-run: false
|
dry-run: false
|
||||||
- name: Upload Crash
|
- name: Upload Crash
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
|
||||||
if: failure() && steps.build.outcome == 'success'
|
if: failure() && steps.build.outcome == 'success'
|
||||||
with:
|
with:
|
||||||
name: artifacts
|
name: artifacts
|
||||||
|
3
.github/workflows/lock.yml
vendored
3
.github/workflows/lock.yml
vendored
@ -14,8 +14,9 @@ concurrency:
|
|||||||
jobs:
|
jobs:
|
||||||
action:
|
action:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository_owner == 'prometheus'
|
||||||
steps:
|
steps:
|
||||||
- uses: dessant/lock-threads@v4
|
- uses: dessant/lock-threads@be8aa5be94131386884a6da4189effda9b14aa21 # v4.0.1
|
||||||
with:
|
with:
|
||||||
process-only: 'issues'
|
process-only: 'issues'
|
||||||
issue-inactive-days: '180'
|
issue-inactive-days: '180'
|
||||||
|
6
.github/workflows/repo_sync.yml
vendored
6
.github/workflows/repo_sync.yml
vendored
@ -3,13 +3,17 @@ name: Sync repo files
|
|||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '44 17 * * *'
|
- cron: '44 17 * * *'
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
repo_sync:
|
repo_sync:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository_owner == 'prometheus'
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder
|
image: quay.io/prometheus/golang-builder
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
|
||||||
- run: ./scripts/sync_repo_files.sh
|
- run: ./scripts/sync_repo_files.sh
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
||||||
|
50
.github/workflows/scorecards.yml
vendored
Normal file
50
.github/workflows/scorecards.yml
vendored
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
# Copyright 2022 Google LLC
|
||||||
|
|
||||||
|
name: Scorecards supply-chain security
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches: [ "main" ]
|
||||||
|
|
||||||
|
# Declare default permissions as read only.
|
||||||
|
permissions: read-all
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analysis:
|
||||||
|
name: Scorecards analysis
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
# Needed to upload the results to code-scanning dashboard.
|
||||||
|
security-events: write
|
||||||
|
# Used to receive a badge.
|
||||||
|
id-token: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: "Checkout code"
|
||||||
|
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
|
|
||||||
|
- name: "Run analysis"
|
||||||
|
uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # tag=v2.3.1
|
||||||
|
with:
|
||||||
|
results_file: results.sarif
|
||||||
|
results_format: sarif
|
||||||
|
# Publish the results for public repositories to enable scorecard badges. For more details, see
|
||||||
|
# https://github.com/ossf/scorecard-action#publishing-results.
|
||||||
|
publish_results: ${{ github.event_name != 'pull_request' }}
|
||||||
|
|
||||||
|
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||||
|
# format to the repository Actions tab.
|
||||||
|
- name: "Upload artifact"
|
||||||
|
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # tag=v3.1.3
|
||||||
|
with:
|
||||||
|
name: SARIF file
|
||||||
|
path: results.sarif
|
||||||
|
retention-days: 5
|
||||||
|
|
||||||
|
# Upload the results to GitHub's code scanning dashboard.
|
||||||
|
- name: "Upload to code-scanning"
|
||||||
|
uses: github/codeql-action/upload-sarif@74483a38d39275f33fcff5f35b679b5ca4a26a99 # tag=v2.22.5
|
||||||
|
with:
|
||||||
|
sarif_file: results.sarif
|
101
.golangci.yml
101
.golangci.yml
@ -1,8 +1,12 @@
|
|||||||
run:
|
run:
|
||||||
deadline: 5m
|
timeout: 15m
|
||||||
skip-files:
|
skip-files:
|
||||||
# Skip autogenerated files.
|
# Skip autogenerated files.
|
||||||
- ^.*\.(pb|y)\.go$
|
- ^.*\.(pb|y)\.go$
|
||||||
|
skip-dirs:
|
||||||
|
# Copied it from a different source
|
||||||
|
- storage/remote/otlptranslator/prometheusremotewrite
|
||||||
|
- storage/remote/otlptranslator/prometheus
|
||||||
|
|
||||||
output:
|
output:
|
||||||
sort-results: true
|
sort-results: true
|
||||||
@ -10,31 +14,106 @@ output:
|
|||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- depguard
|
- depguard
|
||||||
|
- errorlint
|
||||||
|
- gocritic
|
||||||
|
- godot
|
||||||
- gofumpt
|
- gofumpt
|
||||||
- goimports
|
- goimports
|
||||||
- revive
|
|
||||||
- misspell
|
- misspell
|
||||||
|
- nolintlint
|
||||||
|
- predeclared
|
||||||
|
- revive
|
||||||
|
- unconvert
|
||||||
|
- unused
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
max-same-issues: 0
|
max-same-issues: 0
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
|
- linters:
|
||||||
|
- gocritic
|
||||||
|
text: "appendAssign"
|
||||||
- path: _test.go
|
- path: _test.go
|
||||||
linters:
|
linters:
|
||||||
- errcheck
|
- errcheck
|
||||||
|
- path: tsdb/
|
||||||
|
linters:
|
||||||
|
- errorlint
|
||||||
|
- path: tsdb/
|
||||||
|
text: "import 'github.com/pkg/errors' is not allowed"
|
||||||
|
linters:
|
||||||
|
- depguard
|
||||||
|
- linters:
|
||||||
|
- godot
|
||||||
|
source: "^// ==="
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
depguard:
|
depguard:
|
||||||
list-type: blacklist
|
rules:
|
||||||
include-go-root: true
|
main:
|
||||||
packages-with-error-message:
|
deny:
|
||||||
- sync/atomic: "Use go.uber.org/atomic instead of sync/atomic"
|
- pkg: "sync/atomic"
|
||||||
- github.com/stretchr/testify/assert: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
|
desc: "Use go.uber.org/atomic instead of sync/atomic"
|
||||||
- github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
|
- pkg: "github.com/stretchr/testify/assert"
|
||||||
- io/ioutil: "Use corresponding 'os' or 'io' functions instead."
|
desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
|
||||||
- regexp: "Use github.com/grafana/regexp instead of regexp"
|
- pkg: "github.com/go-kit/kit/log"
|
||||||
|
desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
|
||||||
|
- pkg: "io/ioutil"
|
||||||
|
desc: "Use corresponding 'os' or 'io' functions instead."
|
||||||
|
- pkg: "regexp"
|
||||||
|
desc: "Use github.com/grafana/regexp instead of regexp"
|
||||||
|
- pkg: "github.com/pkg/errors"
|
||||||
|
desc: "Use 'errors' or 'fmt' instead of github.com/pkg/errors"
|
||||||
errcheck:
|
errcheck:
|
||||||
exclude: scripts/errcheck_excludes.txt
|
exclude-functions:
|
||||||
|
# Don't flag lines such as "io.Copy(io.Discard, resp.Body)".
|
||||||
|
- io.Copy
|
||||||
|
# The next two are used in HTTP handlers, any error is handled by the server itself.
|
||||||
|
- io.WriteString
|
||||||
|
- (net/http.ResponseWriter).Write
|
||||||
|
# No need to check for errors on server's shutdown.
|
||||||
|
- (*net/http.Server).Shutdown
|
||||||
|
# Never check for logger errors.
|
||||||
|
- (github.com/go-kit/log.Logger).Log
|
||||||
|
# Never check for rollback errors as Rollback() is called when a previous error was detected.
|
||||||
|
- (github.com/prometheus/prometheus/storage.Appender).Rollback
|
||||||
goimports:
|
goimports:
|
||||||
local-prefixes: github.com/prometheus/prometheus
|
local-prefixes: github.com/prometheus/prometheus
|
||||||
gofumpt:
|
gofumpt:
|
||||||
extra-rules: true
|
extra-rules: true
|
||||||
|
revive:
|
||||||
|
# By default, revive will enable only the linting rules that are named in the configuration file.
|
||||||
|
# So, it's needed to explicitly set in configuration all required rules.
|
||||||
|
# The following configuration enables all the rules from the defaults.toml
|
||||||
|
# https://github.com/mgechev/revive/blob/master/defaults.toml
|
||||||
|
rules:
|
||||||
|
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
|
||||||
|
- name: blank-imports
|
||||||
|
- name: context-as-argument
|
||||||
|
arguments:
|
||||||
|
# allow functions with test or bench signatures
|
||||||
|
- allowTypesBefore: "*testing.T,testing.TB"
|
||||||
|
- name: context-keys-type
|
||||||
|
- name: dot-imports
|
||||||
|
# A lot of false positives: incorrectly identifies channel draining as "empty code block".
|
||||||
|
# See https://github.com/mgechev/revive/issues/386
|
||||||
|
- name: empty-block
|
||||||
|
disabled: true
|
||||||
|
- name: error-naming
|
||||||
|
- name: error-return
|
||||||
|
- name: error-strings
|
||||||
|
- name: errorf
|
||||||
|
- name: exported
|
||||||
|
- name: increment-decrement
|
||||||
|
- name: indent-error-flow
|
||||||
|
- name: package-comments
|
||||||
|
- name: range
|
||||||
|
- name: receiver-naming
|
||||||
|
- name: redefines-builtin-id
|
||||||
|
- name: superfluous-else
|
||||||
|
- name: time-naming
|
||||||
|
- name: unexported-return
|
||||||
|
- name: unreachable-code
|
||||||
|
- name: unused-parameter
|
||||||
|
disabled: true
|
||||||
|
- name: var-declaration
|
||||||
|
- name: var-naming
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
go:
|
go:
|
||||||
# Whenever the Go version is updated here,
|
# Whenever the Go version is updated here,
|
||||||
# .circle/config.yml should also be updated.
|
# .circle/config.yml should also be updated.
|
||||||
version: 1.20
|
version: 1.21
|
||||||
repository:
|
repository:
|
||||||
path: github.com/prometheus/prometheus
|
path: github.com/prometheus/prometheus
|
||||||
build:
|
build:
|
||||||
@ -14,9 +14,10 @@ build:
|
|||||||
all:
|
all:
|
||||||
- netgo
|
- netgo
|
||||||
- builtinassets
|
- builtinassets
|
||||||
|
- stringlabels
|
||||||
windows:
|
windows:
|
||||||
- builtinassets
|
- builtinassets
|
||||||
flags: -a
|
- stringlabels
|
||||||
ldflags: |
|
ldflags: |
|
||||||
-X github.com/prometheus/common/version.Version={{.Version}}
|
-X github.com/prometheus/common/version.Version={{.Version}}
|
||||||
-X github.com/prometheus/common/version.Revision={{.Revision}}
|
-X github.com/prometheus/common/version.Revision={{.Revision}}
|
||||||
|
@ -20,5 +20,4 @@ rules:
|
|||||||
config/testdata/section_key_dup.bad.yml
|
config/testdata/section_key_dup.bad.yml
|
||||||
line-length: disable
|
line-length: disable
|
||||||
truthy:
|
truthy:
|
||||||
ignore: |
|
check-keys: false
|
||||||
.github/workflows/*.yml
|
|
||||||
|
138
CHANGELOG.md
138
CHANGELOG.md
@ -1,5 +1,139 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## unreleased
|
||||||
|
|
||||||
|
* [ENHANCEMENT] TSDB: Make the wlog watcher read segments synchronously when not tailing. #13224
|
||||||
|
* [BUGFIX] Agent: Participate in notify calls. #13223
|
||||||
|
|
||||||
|
## 2.48.0 / 2023-11-16
|
||||||
|
|
||||||
|
* [CHANGE] Remote-write: respect Retry-After header on 5xx errors. #12677
|
||||||
|
* [FEATURE] Alerting: Add AWS SigV4 authentication support for Alertmanager endpoints. #12774
|
||||||
|
* [FEATURE] Promtool: Add support for histograms in the TSDB dump command. #12775
|
||||||
|
* [FEATURE] PromQL: Add warnings (and annotations) to PromQL query results. #12152 #12982 #12988 #13012
|
||||||
|
* [FEATURE] Remote-write: Add Azure AD OAuth authentication support for remote write requests. #12572
|
||||||
|
* [ENHANCEMENT] Remote-write: Add a header to count retried remote write requests. #12729
|
||||||
|
* [ENHANCEMENT] TSDB: Improve query performance by re-using iterator when moving between series. #12757
|
||||||
|
* [ENHANCEMENT] UI: Move /targets page discovered labels to expandable section #12824
|
||||||
|
* [ENHANCEMENT] TSDB: Optimize WBL loading by not sending empty buffers over channel. #12808
|
||||||
|
* [ENHANCEMENT] TSDB: Reply WBL mmap markers concurrently. #12801
|
||||||
|
* [ENHANCEMENT] Promtool: Add support for specifying series matchers in the TSDB analyze command. #12842
|
||||||
|
* [ENHANCEMENT] PromQL: Prevent Prometheus from overallocating memory on subquery with large amount of steps. #12734
|
||||||
|
* [ENHANCEMENT] PromQL: Add warning when monotonicity is forced in the input to histogram_quantile. #12931
|
||||||
|
* [ENHANCEMENT] Scraping: Optimize sample appending by reducing garbage. #12939
|
||||||
|
* [ENHANCEMENT] Storage: Reduce memory allocations in queries that merge series sets. #12938
|
||||||
|
* [ENHANCEMENT] UI: Show group interval in rules display. #12943
|
||||||
|
* [ENHANCEMENT] Scraping: Save memory when scraping by delaying creation of buffer. #12953
|
||||||
|
* [ENHANCEMENT] Agent: Allow ingestion of out-of-order samples. #12897
|
||||||
|
* [ENHANCEMENT] Promtool: Improve support for native histograms in TSDB analyze command. #12869
|
||||||
|
* [ENHANCEMENT] Scraping: Add configuration option for tracking staleness of scraped timestamps. #13060
|
||||||
|
* [BUGFIX] SD: Ensure that discovery managers are properly canceled. #10569
|
||||||
|
* [BUGFIX] TSDB: Fix PostingsForMatchers race with creating new series. #12558
|
||||||
|
* [BUGFIX] TSDB: Fix handling of explicit counter reset header in histograms. #12772
|
||||||
|
* [BUGFIX] SD: Validate HTTP client configuration in HTTP, EC2, Azure, Uyuni, PuppetDB, and Lightsail SDs. #12762 #12811 #12812 #12815 #12814 #12816
|
||||||
|
* [BUGFIX] TSDB: Fix counter reset edgecases causing native histogram panics. #12838
|
||||||
|
* [BUGFIX] TSDB: Fix duplicate sample detection at chunk size limit. #12874
|
||||||
|
* [BUGFIX] Promtool: Fix errors not being reported in check rules command. #12715
|
||||||
|
* [BUGFIX] TSDB: Avoid panics reported in logs when head initialization takes a long time. #12876
|
||||||
|
* [BUGFIX] TSDB: Ensure that WBL is repaired when possible. #12406
|
||||||
|
* [BUGFIX] Storage: Fix crash caused by incorrect mixed samples handling. #13055
|
||||||
|
* [BUGFIX] TSDB: Fix compactor failures by adding min time to histogram chunks. #13062
|
||||||
|
|
||||||
|
## 2.47.1 / 2023-10-04
|
||||||
|
|
||||||
|
* [BUGFIX] Fix duplicate sample detection at chunk size limit #12874
|
||||||
|
|
||||||
|
## 2.47.0 / 2023-09-06
|
||||||
|
|
||||||
|
This release adds an experimental OpenTelemetry (OTLP) Ingestion feature,
|
||||||
|
and also new setting `keep_dropped_targets` to limit the amount of dropped
|
||||||
|
targets held in memory. This defaults to 0 meaning 'no limit', so we encourage
|
||||||
|
users with large Prometheus to try setting a limit such as 100.
|
||||||
|
|
||||||
|
* [FEATURE] Web: Add OpenTelemetry (OTLP) Ingestion endpoint. #12571 #12643
|
||||||
|
* [FEATURE] Scraping: Optionally limit detail on dropped targets, to save memory. #12647
|
||||||
|
* [ENHANCEMENT] TSDB: Write head chunks to disk in the background to reduce blocking. #11818
|
||||||
|
* [ENHANCEMENT] PromQL: Speed up aggregate and function queries. #12682
|
||||||
|
* [ENHANCEMENT] PromQL: More efficient evaluation of query with `timestamp()`. #12579
|
||||||
|
* [ENHANCEMENT] API: Faster streaming of Labels to JSON. #12598
|
||||||
|
* [ENHANCEMENT] Agent: Memory pooling optimisation. #12651
|
||||||
|
* [ENHANCEMENT] TSDB: Prevent storage space leaks due to terminated snapshots on shutdown. #12664
|
||||||
|
* [ENHANCEMENT] Histograms: Refactoring and optimisations. #12352 #12584 #12596 #12711 #12054
|
||||||
|
* [ENHANCEMENT] Histograms: Add `histogram_stdvar` and `histogram_stddev` functions. #12614
|
||||||
|
* [ENHANCEMENT] Remote-write: add http.resend_count tracing attribute. #12676
|
||||||
|
* [ENHANCEMENT] TSDB: Support native histograms in snapshot on shutdown. #12722
|
||||||
|
* [BUGFIX] TSDB/Agent: ensure that new series get written to WAL on rollback. #12592
|
||||||
|
* [BUGFIX] Scraping: fix infinite loop on exemplar in protobuf format. #12737
|
||||||
|
|
||||||
|
## 2.46.0 / 2023-07-25
|
||||||
|
|
||||||
|
* [FEATURE] Promtool: Add PromQL format and label matcher set/delete commands to promtool. #11411
|
||||||
|
* [FEATURE] Promtool: Add push metrics command. #12299
|
||||||
|
* [ENHANCEMENT] Promtool: Read from stdin if no filenames are provided in check rules. #12225
|
||||||
|
* [ENHANCEMENT] Hetzner SD: Support larger ID's that will be used by Hetzner in September. #12569
|
||||||
|
* [ENHANCEMENT] Kubernetes SD: Add more labels for endpointslice and endpoints role. #10914
|
||||||
|
* [ENHANCEMENT] Kubernetes SD: Do not add pods to target group if the PodIP status is not set. #11642
|
||||||
|
* [ENHANCEMENT] OpenStack SD: Include instance image ID in labels. #12502
|
||||||
|
* [ENHANCEMENT] Remote Write receiver: Validate the metric names and labels. #11688
|
||||||
|
* [ENHANCEMENT] Web: Initialize `prometheus_http_requests_total` metrics with `code` label set to `200`. #12472
|
||||||
|
* [ENHANCEMENT] TSDB: Add Zstandard compression option for wlog. #11666
|
||||||
|
* [ENHANCEMENT] TSDB: Support native histograms in snapshot on shutdown. #12258
|
||||||
|
* [ENHANCEMENT] Labels: Avoid compiling regexes that are literal. #12434
|
||||||
|
* [BUGFIX] Histograms: Fix parsing of float histograms without zero bucket. #12577
|
||||||
|
* [BUGFIX] Histograms: Fix scraping native and classic histograms missing some histograms. #12554
|
||||||
|
* [BUGFIX] Histograms: Enable ingestion of multiple exemplars per sample. 12557
|
||||||
|
* [BUGFIX] File SD: Fix path handling in File-SD watcher to allow directory monitoring on Windows. #12488
|
||||||
|
* [BUGFIX] Linode SD: Cast `InstanceSpec` values to `int64` to avoid overflows on 386 architecture. #12568
|
||||||
|
* [BUGFIX] PromQL Engine: Include query parsing in active-query tracking. #12418
|
||||||
|
* [BUGFIX] TSDB: Handle TOC parsing failures. #10623
|
||||||
|
|
||||||
|
## 2.45.0 / 2023-06-23
|
||||||
|
|
||||||
|
This release is a LTS (Long-Term Support) release of Prometheus and will
|
||||||
|
receive security, documentation and bugfix patches for at least 12 months.
|
||||||
|
Please read more about our LTS release cycle at
|
||||||
|
<https://prometheus.io/docs/introduction/release-cycle/>.
|
||||||
|
|
||||||
|
* [FEATURE] API: New limit parameter to limit the number of items returned by `/api/v1/status/tsdb` endpoint. #12336
|
||||||
|
* [FEATURE] Config: Add limits to global config. #12126
|
||||||
|
* [FEATURE] Consul SD: Added support for `path_prefix`. #12372
|
||||||
|
* [FEATURE] Native histograms: Add option to scrape both classic and native histograms. #12350
|
||||||
|
* [FEATURE] Native histograms: Added support for two more arithmetic operators `avg_over_time` and `sum_over_time`. #12262
|
||||||
|
* [FEATURE] Promtool: When providing the block id, only one block will be loaded and analyzed. #12031
|
||||||
|
* [FEATURE] Remote-write: New Azure ad configuration to support remote writing directly to Azure Monitor workspace. #11944
|
||||||
|
* [FEATURE] TSDB: Samples per chunk are now configurable with flag `storage.tsdb.samples-per-chunk`. By default set to its former value 120. #12055
|
||||||
|
* [ENHANCEMENT] Native histograms: bucket size can now be limited to avoid scrape fails. #12254
|
||||||
|
* [ENHANCEMENT] TSDB: Dropped series are now deleted from the WAL sooner. #12297
|
||||||
|
* [BUGFIX] Native histograms: ChunkSeries iterator now checks if a new sample can be appended to the open chunk. #12185
|
||||||
|
* [BUGFIX] Native histograms: Fix Histogram Appender `Appendable()` segfault. #12357
|
||||||
|
* [BUGFIX] Native histograms: Fix setting reset header to gauge histograms in seriesToChunkEncoder. #12329
|
||||||
|
* [BUGFIX] TSDB: Tombstone intervals are not modified after Get() call. #12245
|
||||||
|
* [BUGFIX] TSDB: Use path/filepath to set the WAL directory. #12349
|
||||||
|
|
||||||
|
## 2.44.0 / 2023-05-13
|
||||||
|
|
||||||
|
This version is built with Go tag `stringlabels`, to use the smaller data
|
||||||
|
structure for Labels that was optional in the previous release. For more
|
||||||
|
details about this code change see #10991.
|
||||||
|
|
||||||
|
* [CHANGE] Remote-write: Raise default samples per send to 2,000. #12203
|
||||||
|
* [FEATURE] Remote-read: Handle native histograms. #12085, #12192
|
||||||
|
* [FEATURE] Promtool: Health and readiness check of prometheus server in CLI. #12096
|
||||||
|
* [FEATURE] PromQL: Add `query_samples_total` metric, the total number of samples loaded by all queries. #12251
|
||||||
|
* [ENHANCEMENT] Storage: Optimise buffer used to iterate through samples. #12326
|
||||||
|
* [ENHANCEMENT] Scrape: Reduce memory allocations on target labels. #12084
|
||||||
|
* [ENHANCEMENT] PromQL: Use faster heap method for `topk()` / `bottomk()`. #12190
|
||||||
|
* [ENHANCEMENT] Rules API: Allow filtering by rule name. #12270
|
||||||
|
* [ENHANCEMENT] Native Histograms: Various fixes and improvements. #11687, #12264, #12272
|
||||||
|
* [ENHANCEMENT] UI: Search of scraping pools is now case-insensitive. #12207
|
||||||
|
* [ENHANCEMENT] TSDB: Add an affirmative log message for successful WAL repair. #12135
|
||||||
|
* [BUGFIX] TSDB: Block compaction failed when shutting down. #12179
|
||||||
|
* [BUGFIX] TSDB: Out-of-order chunks could be ignored if the write-behind log was deleted. #12127
|
||||||
|
|
||||||
|
## 2.43.1 / 2023-05-03
|
||||||
|
|
||||||
|
* [BUGFIX] Labels: `Set()` after `Del()` would be ignored, which broke some relabeling rules. #12322
|
||||||
|
|
||||||
## 2.43.0 / 2023-03-21
|
## 2.43.0 / 2023-03-21
|
||||||
|
|
||||||
We are working on some performance improvements in Prometheus, which are only
|
We are working on some performance improvements in Prometheus, which are only
|
||||||
@ -13,9 +147,9 @@ the gains on their production architecture. We are providing release artefacts
|
|||||||
improvements for testing. #10991
|
improvements for testing. #10991
|
||||||
|
|
||||||
* [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487
|
* [FEATURE] Promtool: Add HTTP client configuration to query commands. #11487
|
||||||
* [FEATURE] Scrape: Add `include_scrape_configs` to include scrape configs from different files. #12019
|
* [FEATURE] Scrape: Add `scrape_config_files` to include scrape configs from different files. #12019
|
||||||
* [FEATURE] HTTP client: Add `no_proxy` to exclude URLs from proxied requests. #12098
|
* [FEATURE] HTTP client: Add `no_proxy` to exclude URLs from proxied requests. #12098
|
||||||
* [FEATURE] HTTP client: Add `proxy_from_enviroment` to read proxies from env variables. #12098
|
* [FEATURE] HTTP client: Add `proxy_from_environment` to read proxies from env variables. #12098
|
||||||
* [ENHANCEMENT] API: Add support for setting lookback delta per query via the API. #12088
|
* [ENHANCEMENT] API: Add support for setting lookback delta per query via the API. #12088
|
||||||
* [ENHANCEMENT] API: Change HTTP status code from 503/422 to 499 if a request is canceled. #11897
|
* [ENHANCEMENT] API: Change HTTP status code from 503/422 to 499 if a request is canceled. #11897
|
||||||
* [ENHANCEMENT] Scrape: Allow exemplars for all metric types. #11984
|
* [ENHANCEMENT] Scrape: Allow exemplars for all metric types. #11984
|
||||||
|
@ -85,7 +85,7 @@ The PromQL parser grammar is located in `promql/parser/generated_parser.y` and i
|
|||||||
The parser is built using [goyacc](https://pkg.go.dev/golang.org/x/tools/cmd/goyacc)
|
The parser is built using [goyacc](https://pkg.go.dev/golang.org/x/tools/cmd/goyacc)
|
||||||
|
|
||||||
If doing some sort of debugging, then it is possible to add some verbose output. After generating the parser, then you
|
If doing some sort of debugging, then it is possible to add some verbose output. After generating the parser, then you
|
||||||
can modify the the `./promql/parser/generated_parser.y.go` manually.
|
can modify the `./promql/parser/generated_parser.y.go` manually.
|
||||||
|
|
||||||
```golang
|
```golang
|
||||||
// As of writing this was somewhere around line 600.
|
// As of writing this was somewhere around line 600.
|
||||||
|
@ -7,10 +7,10 @@ Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison
|
|||||||
* `discovery`
|
* `discovery`
|
||||||
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
|
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
|
||||||
* `documentation`
|
* `documentation`
|
||||||
* `prometheus-mixin`: Björn Rabenstein (<beorn@grafana.com> / @beorn7)
|
* `prometheus-mixin`: Matthias Loibl (<mail@matthiasloibl.com> / @metalmatze)
|
||||||
* `storage`
|
* `storage`
|
||||||
* `remote`: Chris Marchbanks (<csmarchbanks@gmail.com> / @csmarchbanks), Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
|
* `remote`: Chris Marchbanks (<csmarchbanks@gmail.com> / @csmarchbanks), Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
|
||||||
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka)
|
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||||
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
|
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
|
||||||
* `web`
|
* `web`
|
||||||
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
|
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
|
||||||
|
2
Makefile
2
Makefile
@ -82,7 +82,7 @@ assets-tarball: assets
|
|||||||
.PHONY: parser
|
.PHONY: parser
|
||||||
parser:
|
parser:
|
||||||
@echo ">> running goyacc to generate the .go file."
|
@echo ">> running goyacc to generate the .go file."
|
||||||
ifeq (, $(shell which goyacc))
|
ifeq (, $(shell command -v goyacc > /dev/null))
|
||||||
@echo "goyacc not installed so skipping"
|
@echo "goyacc not installed so skipping"
|
||||||
@echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0"
|
@echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0"
|
||||||
else
|
else
|
||||||
|
@ -49,19 +49,19 @@ endif
|
|||||||
GOTEST := $(GO) test
|
GOTEST := $(GO) test
|
||||||
GOTEST_DIR :=
|
GOTEST_DIR :=
|
||||||
ifneq ($(CIRCLE_JOB),)
|
ifneq ($(CIRCLE_JOB),)
|
||||||
ifneq ($(shell which gotestsum),)
|
ifneq ($(shell command -v gotestsum > /dev/null),)
|
||||||
GOTEST_DIR := test-results
|
GOTEST_DIR := test-results
|
||||||
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
PROMU_VERSION ?= 0.14.0
|
PROMU_VERSION ?= 0.15.0
|
||||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
|
||||||
SKIP_GOLANGCI_LINT :=
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.51.2
|
GOLANGCI_LINT_VERSION ?= v1.54.2
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
@ -178,7 +178,7 @@ endif
|
|||||||
.PHONY: common-yamllint
|
.PHONY: common-yamllint
|
||||||
common-yamllint:
|
common-yamllint:
|
||||||
@echo ">> running yamllint on all YAML files in the repository"
|
@echo ">> running yamllint on all YAML files in the repository"
|
||||||
ifeq (, $(shell which yamllint))
|
ifeq (, $(shell command -v yamllint > /dev/null))
|
||||||
@echo "yamllint not installed so skipping"
|
@echo "yamllint not installed so skipping"
|
||||||
else
|
else
|
||||||
yamllint .
|
yamllint .
|
||||||
|
@ -14,6 +14,7 @@ examples and guides.</p>
|
|||||||
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486)
|
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486)
|
||||||
[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus)
|
[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus)
|
||||||
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
|
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
|
||||||
|
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus)
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@ -34,7 +35,7 @@ The features that distinguish Prometheus from other metrics and monitoring syste
|
|||||||
|
|
||||||
## Architecture overview
|
## Architecture overview
|
||||||
|
|
||||||
![Architecture overview](https://cdn.jsdelivr.net/gh/prometheus/prometheus@c34257d069c630685da35bcef084632ffd5d6209/documentation/images/architecture.svg)
|
![Architecture overview](documentation/images/architecture.svg)
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
|
|
||||||
|
@ -49,7 +49,12 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
|||||||
| v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) |
|
| v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) |
|
||||||
| v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) |
|
| v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
| v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) |
|
| v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) |
|
||||||
| v2.45 | 2023-05-31 | **searching for volunteer** |
|
| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) |
|
||||||
|
| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||||
|
| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) |
|
||||||
|
| v2.48 | 2023-10-04 | Levi Harrison (GitHub: @LeviHarrison) |
|
||||||
|
| v2.49 | 2023-12-05 | Bartek Plotka (GitHub: @bwplotka) |
|
||||||
|
| v2.50 | 2024-01-16 | **searching for volunteer** |
|
||||||
|
|
||||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||||
|
|
||||||
|
@ -63,6 +63,7 @@ import (
|
|||||||
"github.com/prometheus/prometheus/notifier"
|
"github.com/prometheus/prometheus/notifier"
|
||||||
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
|
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/rules"
|
"github.com/prometheus/prometheus/rules"
|
||||||
"github.com/prometheus/prometheus/scrape"
|
"github.com/prometheus/prometheus/scrape"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
@ -70,6 +71,7 @@ import (
|
|||||||
"github.com/prometheus/prometheus/tracing"
|
"github.com/prometheus/prometheus/tracing"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
"github.com/prometheus/prometheus/tsdb/agent"
|
"github.com/prometheus/prometheus/tsdb/agent"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||||
"github.com/prometheus/prometheus/util/documentcli"
|
"github.com/prometheus/prometheus/util/documentcli"
|
||||||
"github.com/prometheus/prometheus/util/logging"
|
"github.com/prometheus/prometheus/util/logging"
|
||||||
prom_runtime "github.com/prometheus/prometheus/util/runtime"
|
prom_runtime "github.com/prometheus/prometheus/util/runtime"
|
||||||
@ -168,6 +170,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||||||
case "remote-write-receiver":
|
case "remote-write-receiver":
|
||||||
c.web.EnableRemoteWriteReceiver = true
|
c.web.EnableRemoteWriteReceiver = true
|
||||||
level.Warn(logger).Log("msg", "Remote write receiver enabled via feature flag remote-write-receiver. This is DEPRECATED. Use --web.enable-remote-write-receiver.")
|
level.Warn(logger).Log("msg", "Remote write receiver enabled via feature flag remote-write-receiver. This is DEPRECATED. Use --web.enable-remote-write-receiver.")
|
||||||
|
case "otlp-write-receiver":
|
||||||
|
c.web.EnableOTLPWriteReceiver = true
|
||||||
|
level.Info(logger).Log("msg", "Experimental OTLP write receiver enabled")
|
||||||
case "expand-external-labels":
|
case "expand-external-labels":
|
||||||
c.enableExpandExternalLabels = true
|
c.enableExpandExternalLabels = true
|
||||||
level.Info(logger).Log("msg", "Experimental expand-external-labels enabled")
|
level.Info(logger).Log("msg", "Experimental expand-external-labels enabled")
|
||||||
@ -195,10 +200,15 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||||||
case "no-default-scrape-port":
|
case "no-default-scrape-port":
|
||||||
c.scrape.NoDefaultPort = true
|
c.scrape.NoDefaultPort = true
|
||||||
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
|
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
|
||||||
|
case "promql-experimental-functions":
|
||||||
|
parser.EnableExperimentalFunctions = true
|
||||||
|
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.")
|
||||||
case "native-histograms":
|
case "native-histograms":
|
||||||
c.tsdb.EnableNativeHistograms = true
|
c.tsdb.EnableNativeHistograms = true
|
||||||
c.scrape.EnableProtobufNegotiation = true
|
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
||||||
level.Info(logger).Log("msg", "Experimental native histogram support enabled.")
|
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultNativeHistogramScrapeProtocols
|
||||||
|
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultNativeHistogramScrapeProtocols
|
||||||
|
level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||||
case "":
|
case "":
|
||||||
continue
|
continue
|
||||||
case "promql-at-modifier", "promql-negative-offset":
|
case "promql-at-modifier", "promql-negative-offset":
|
||||||
@ -209,11 +219,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.tsdb.EnableNativeHistograms && c.tsdb.EnableMemorySnapshotOnShutdown {
|
|
||||||
c.tsdb.EnableMemorySnapshotOnShutdown = false
|
|
||||||
level.Warn(logger).Log("msg", "memory-snapshot-on-shutdown has been disabled automatically because memory-snapshot-on-shutdown and native-histograms cannot be enabled at the same time.")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -333,9 +338,15 @@ func main() {
|
|||||||
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
|
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
|
||||||
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
|
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
|
||||||
|
|
||||||
|
serverOnlyFlag(a, "storage.tsdb.wal-compression-type", "Compression algorithm for the tsdb WAL.").
|
||||||
|
Hidden().Default(string(wlog.CompressionSnappy)).EnumVar(&cfg.tsdb.WALCompressionType, string(wlog.CompressionSnappy), string(wlog.CompressionZstd))
|
||||||
|
|
||||||
serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental.").
|
serverOnlyFlag(a, "storage.tsdb.head-chunks-write-queue-size", "Size of the queue through which head chunks are written to the disk to be m-mapped, 0 disables the queue completely. Experimental.").
|
||||||
Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize)
|
Default("0").IntVar(&cfg.tsdb.HeadChunksWriteQueueSize)
|
||||||
|
|
||||||
|
serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk.").
|
||||||
|
Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk)
|
||||||
|
|
||||||
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
|
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
|
||||||
Default("data-agent/").StringVar(&cfg.agentStoragePath)
|
Default("data-agent/").StringVar(&cfg.agentStoragePath)
|
||||||
|
|
||||||
@ -346,6 +357,9 @@ func main() {
|
|||||||
agentOnlyFlag(a, "storage.agent.wal-compression", "Compress the agent WAL.").
|
agentOnlyFlag(a, "storage.agent.wal-compression", "Compress the agent WAL.").
|
||||||
Default("true").BoolVar(&cfg.agent.WALCompression)
|
Default("true").BoolVar(&cfg.agent.WALCompression)
|
||||||
|
|
||||||
|
agentOnlyFlag(a, "storage.agent.wal-compression-type", "Compression algorithm for the agent WAL.").
|
||||||
|
Hidden().Default(string(wlog.CompressionSnappy)).EnumVar(&cfg.agent.WALCompressionType, string(wlog.CompressionSnappy), string(wlog.CompressionZstd))
|
||||||
|
|
||||||
agentOnlyFlag(a, "storage.agent.wal-truncate-frequency",
|
agentOnlyFlag(a, "storage.agent.wal-truncate-frequency",
|
||||||
"The frequency at which to truncate the WAL and remove old data.").
|
"The frequency at which to truncate the WAL and remove old data.").
|
||||||
Hidden().PlaceHolder("<duration>").SetValue(&cfg.agent.TruncateFrequency)
|
Hidden().PlaceHolder("<duration>").SetValue(&cfg.agent.TruncateFrequency)
|
||||||
@ -409,7 +423,7 @@ func main() {
|
|||||||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||||
|
|
||||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||||
Default("").StringsVar(&cfg.featureList)
|
Default("").StringsVar(&cfg.featureList)
|
||||||
|
|
||||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||||
@ -425,7 +439,7 @@ func main() {
|
|||||||
|
|
||||||
_, err := a.Parse(os.Args[1:])
|
_, err := a.Parse(os.Args[1:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err))
|
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing command line arguments: %w", err))
|
||||||
a.Usage(os.Args[1:])
|
a.Usage(os.Args[1:])
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
@ -490,7 +504,7 @@ func main() {
|
|||||||
if cfgFile.StorageConfig.ExemplarsConfig == nil {
|
if cfgFile.StorageConfig.ExemplarsConfig == nil {
|
||||||
cfgFile.StorageConfig.ExemplarsConfig = &config.DefaultExemplarsConfig
|
cfgFile.StorageConfig.ExemplarsConfig = &config.DefaultExemplarsConfig
|
||||||
}
|
}
|
||||||
cfg.tsdb.MaxExemplars = int64(cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars)
|
cfg.tsdb.MaxExemplars = cfgFile.StorageConfig.ExemplarsConfig.MaxExemplars
|
||||||
}
|
}
|
||||||
if cfgFile.StorageConfig.TSDBConfig != nil {
|
if cfgFile.StorageConfig.TSDBConfig != nil {
|
||||||
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
|
cfg.tsdb.OutOfOrderTimeWindow = cfgFile.StorageConfig.TSDBConfig.OutOfOrderTimeWindow
|
||||||
@ -610,8 +624,18 @@ func main() {
|
|||||||
discoveryManagerNotify = legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), legacymanager.Name("notify"))
|
discoveryManagerNotify = legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), legacymanager.Name("notify"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
scrapeManager, err := scrape.NewManager(
|
||||||
|
&cfg.scrape,
|
||||||
|
log.With(logger, "component", "scrape manager"),
|
||||||
|
fanoutStorage,
|
||||||
|
prometheus.DefaultRegisterer,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
level.Error(logger).Log("msg", "failed to create a scrape manager", "err", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
scrapeManager = scrape.NewManager(&cfg.scrape, log.With(logger, "component", "scrape manager"), fanoutStorage)
|
|
||||||
tracingManager = tracing.NewManager(logger)
|
tracingManager = tracing.NewManager(logger)
|
||||||
|
|
||||||
queryEngine *promql.Engine
|
queryEngine *promql.Engine
|
||||||
@ -1049,6 +1073,7 @@ func main() {
|
|||||||
|
|
||||||
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
|
startTimeMargin := int64(2 * time.Duration(cfg.tsdb.MinBlockDuration).Seconds() * 1000)
|
||||||
localStorage.Set(db, startTimeMargin)
|
localStorage.Set(db, startTimeMargin)
|
||||||
|
db.SetWriteNotified(remoteStorage)
|
||||||
close(dbOpen)
|
close(dbOpen)
|
||||||
<-cancel
|
<-cancel
|
||||||
return nil
|
return nil
|
||||||
@ -1102,6 +1127,7 @@ func main() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
localStorage.Set(db, 0)
|
localStorage.Set(db, 0)
|
||||||
|
db.SetWriteNotified(remoteStorage)
|
||||||
close(dbOpen)
|
close(dbOpen)
|
||||||
<-cancel
|
<-cancel
|
||||||
return nil
|
return nil
|
||||||
@ -1262,7 +1288,7 @@ func startsOrEndsWithQuote(s string) bool {
|
|||||||
strings.HasSuffix(s, "\"") || strings.HasSuffix(s, "'")
|
strings.HasSuffix(s, "\"") || strings.HasSuffix(s, "'")
|
||||||
}
|
}
|
||||||
|
|
||||||
// compileCORSRegexString compiles given string and adds anchors
|
// compileCORSRegexString compiles given string and adds anchors.
|
||||||
func compileCORSRegexString(s string) (*regexp.Regexp, error) {
|
func compileCORSRegexString(s string) (*regexp.Regexp, error) {
|
||||||
r, err := relabel.NewRegexp(s)
|
r, err := relabel.NewRegexp(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1368,17 +1394,17 @@ func (s *readyStorage) StartTime() (int64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Querier implements the Storage interface.
|
// Querier implements the Storage interface.
|
||||||
func (s *readyStorage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
|
func (s *readyStorage) Querier(mint, maxt int64) (storage.Querier, error) {
|
||||||
if x := s.get(); x != nil {
|
if x := s.get(); x != nil {
|
||||||
return x.Querier(ctx, mint, maxt)
|
return x.Querier(mint, maxt)
|
||||||
}
|
}
|
||||||
return nil, tsdb.ErrNotReady
|
return nil, tsdb.ErrNotReady
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChunkQuerier implements the Storage interface.
|
// ChunkQuerier implements the Storage interface.
|
||||||
func (s *readyStorage) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) {
|
func (s *readyStorage) ChunkQuerier(mint, maxt int64) (storage.ChunkQuerier, error) {
|
||||||
if x := s.get(); x != nil {
|
if x := s.get(); x != nil {
|
||||||
return x.ChunkQuerier(ctx, mint, maxt)
|
return x.ChunkQuerier(mint, maxt)
|
||||||
}
|
}
|
||||||
return nil, tsdb.ErrNotReady
|
return nil, tsdb.ErrNotReady
|
||||||
}
|
}
|
||||||
@ -1451,11 +1477,11 @@ func (s *readyStorage) CleanTombstones() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces.
|
// Delete implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces.
|
||||||
func (s *readyStorage) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
|
func (s *readyStorage) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Matcher) error {
|
||||||
if x := s.get(); x != nil {
|
if x := s.get(); x != nil {
|
||||||
switch db := x.(type) {
|
switch db := x.(type) {
|
||||||
case *tsdb.DB:
|
case *tsdb.DB:
|
||||||
return db.Delete(mint, maxt, ms...)
|
return db.Delete(ctx, mint, maxt, ms...)
|
||||||
case *agent.DB:
|
case *agent.DB:
|
||||||
return agent.ErrUnsupported
|
return agent.ErrUnsupported
|
||||||
default:
|
default:
|
||||||
@ -1481,11 +1507,11 @@ func (s *readyStorage) Snapshot(dir string, withHead bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stats implements the api_v1.TSDBAdminStats interface.
|
// Stats implements the api_v1.TSDBAdminStats interface.
|
||||||
func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) {
|
func (s *readyStorage) Stats(statsByLabelName string, limit int) (*tsdb.Stats, error) {
|
||||||
if x := s.get(); x != nil {
|
if x := s.get(); x != nil {
|
||||||
switch db := x.(type) {
|
switch db := x.(type) {
|
||||||
case *tsdb.DB:
|
case *tsdb.DB:
|
||||||
return db.Head().Stats(statsByLabelName), nil
|
return db.Head().Stats(statsByLabelName, limit), nil
|
||||||
case *agent.DB:
|
case *agent.DB:
|
||||||
return nil, agent.ErrUnsupported
|
return nil, agent.ErrUnsupported
|
||||||
default:
|
default:
|
||||||
@ -1541,7 +1567,9 @@ type tsdbOptions struct {
|
|||||||
MaxBytes units.Base2Bytes
|
MaxBytes units.Base2Bytes
|
||||||
NoLockfile bool
|
NoLockfile bool
|
||||||
WALCompression bool
|
WALCompression bool
|
||||||
|
WALCompressionType string
|
||||||
HeadChunksWriteQueueSize int
|
HeadChunksWriteQueueSize int
|
||||||
|
SamplesPerChunk int
|
||||||
StripeSize int
|
StripeSize int
|
||||||
MinBlockDuration model.Duration
|
MinBlockDuration model.Duration
|
||||||
MaxBlockDuration model.Duration
|
MaxBlockDuration model.Duration
|
||||||
@ -1560,8 +1588,9 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
|||||||
MaxBytes: int64(opts.MaxBytes),
|
MaxBytes: int64(opts.MaxBytes),
|
||||||
NoLockfile: opts.NoLockfile,
|
NoLockfile: opts.NoLockfile,
|
||||||
AllowOverlappingCompaction: true,
|
AllowOverlappingCompaction: true,
|
||||||
WALCompression: opts.WALCompression,
|
WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType),
|
||||||
HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize,
|
HeadChunksWriteQueueSize: opts.HeadChunksWriteQueueSize,
|
||||||
|
SamplesPerChunk: opts.SamplesPerChunk,
|
||||||
StripeSize: opts.StripeSize,
|
StripeSize: opts.StripeSize,
|
||||||
MinBlockDuration: int64(time.Duration(opts.MinBlockDuration) / time.Millisecond),
|
MinBlockDuration: int64(time.Duration(opts.MinBlockDuration) / time.Millisecond),
|
||||||
MaxBlockDuration: int64(time.Duration(opts.MaxBlockDuration) / time.Millisecond),
|
MaxBlockDuration: int64(time.Duration(opts.MaxBlockDuration) / time.Millisecond),
|
||||||
@ -1578,6 +1607,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
|||||||
type agentOptions struct {
|
type agentOptions struct {
|
||||||
WALSegmentSize units.Base2Bytes
|
WALSegmentSize units.Base2Bytes
|
||||||
WALCompression bool
|
WALCompression bool
|
||||||
|
WALCompressionType string
|
||||||
StripeSize int
|
StripeSize int
|
||||||
TruncateFrequency model.Duration
|
TruncateFrequency model.Duration
|
||||||
MinWALTime, MaxWALTime model.Duration
|
MinWALTime, MaxWALTime model.Duration
|
||||||
@ -1587,7 +1617,7 @@ type agentOptions struct {
|
|||||||
func (opts agentOptions) ToAgentOptions() agent.Options {
|
func (opts agentOptions) ToAgentOptions() agent.Options {
|
||||||
return agent.Options{
|
return agent.Options{
|
||||||
WALSegmentSize: int(opts.WALSegmentSize),
|
WALSegmentSize: int(opts.WALSegmentSize),
|
||||||
WALCompression: opts.WALCompression,
|
WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType),
|
||||||
StripeSize: opts.StripeSize,
|
StripeSize: opts.StripeSize,
|
||||||
TruncateFrequency: time.Duration(opts.TruncateFrequency),
|
TruncateFrequency: time.Duration(opts.TruncateFrequency),
|
||||||
MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)),
|
MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)),
|
||||||
|
@ -121,7 +121,7 @@ func TestFailedStartupExitCode(t *testing.T) {
|
|||||||
fakeInputFile := "fake-input-file"
|
fakeInputFile := "fake-input-file"
|
||||||
expectedExitStatus := 2
|
expectedExitStatus := 2
|
||||||
|
|
||||||
prom := exec.Command(promPath, "-test.main", "--config.file="+fakeInputFile)
|
prom := exec.Command(promPath, "-test.main", "--web.listen-address=0.0.0.0:0", "--config.file="+fakeInputFile)
|
||||||
err := prom.Run()
|
err := prom.Run()
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
@ -358,7 +358,7 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAgentSuccessfulStartup(t *testing.T) {
|
func TestAgentSuccessfulStartup(t *testing.T) {
|
||||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+agentConfig)
|
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
|
||||||
require.NoError(t, prom.Start())
|
require.NoError(t, prom.Start())
|
||||||
|
|
||||||
actualExitStatus := 0
|
actualExitStatus := 0
|
||||||
@ -376,7 +376,7 @@ func TestAgentSuccessfulStartup(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
||||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--config.file="+promConfig)
|
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||||
|
|
||||||
output := bytes.Buffer{}
|
output := bytes.Buffer{}
|
||||||
prom.Stderr = &output
|
prom.Stderr = &output
|
||||||
@ -403,7 +403,7 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
|
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
|
||||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+promConfig)
|
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||||
require.NoError(t, prom.Start())
|
require.NoError(t, prom.Start())
|
||||||
|
|
||||||
actualExitStatus := 0
|
actualExitStatus := 0
|
||||||
@ -438,7 +438,7 @@ func TestModeSpecificFlags(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testcases {
|
for _, tc := range testcases {
|
||||||
t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
|
||||||
args := []string{"-test.main", tc.arg, t.TempDir()}
|
args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"}
|
||||||
|
|
||||||
if tc.mode == "agent" {
|
if tc.mode == "agent" {
|
||||||
args = append(args, "--enable-feature=agent", "--config.file="+agentConfig)
|
args = append(args, "--enable-feature=agent", "--config.file="+agentConfig)
|
||||||
@ -498,10 +498,9 @@ func TestDocumentation(t *testing.T) {
|
|||||||
cmd.Stdout = &stdout
|
cmd.Stdout = &stdout
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
if exitError, ok := err.(*exec.ExitError); ok {
|
var exitError *exec.ExitError
|
||||||
if exitError.ExitCode() != 0 {
|
if errors.As(err, &exitError) && exitError.ExitCode() != 0 {
|
||||||
fmt.Println("Command failed with non-zero exit code")
|
fmt.Println("Command failed with non-zero exit code")
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,9 +72,11 @@ Loop:
|
|||||||
if !startedOk {
|
if !startedOk {
|
||||||
t.Fatal("prometheus didn't start in the specified timeout")
|
t.Fatal("prometheus didn't start in the specified timeout")
|
||||||
}
|
}
|
||||||
if err := prom.Process.Kill(); err == nil {
|
switch err := prom.Process.Kill(); {
|
||||||
|
case err == nil:
|
||||||
t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal")
|
t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal")
|
||||||
} else if stoppedErr != nil && stoppedErr.Error() != "signal: interrupt" { // TODO - find a better way to detect when the process didn't exit as expected!
|
case stoppedErr != nil && stoppedErr.Error() != "signal: interrupt":
|
||||||
|
// TODO: find a better way to detect when the process didn't exit as expected!
|
||||||
t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr)
|
t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -193,7 +193,7 @@ func (p *queryLogTest) String() string {
|
|||||||
}
|
}
|
||||||
name = name + ", " + p.host + ":" + strconv.Itoa(p.port)
|
name = name + ", " + p.host + ":" + strconv.Itoa(p.port)
|
||||||
if p.enabledAtStart {
|
if p.enabledAtStart {
|
||||||
name = name + ", enabled at start"
|
name += ", enabled at start"
|
||||||
}
|
}
|
||||||
if p.prefix != "" {
|
if p.prefix != "" {
|
||||||
name = name + ", with prefix " + p.prefix
|
name = name + ", with prefix " + p.prefix
|
||||||
|
@ -101,7 +101,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
|
|||||||
nextSampleTs int64 = math.MaxInt64
|
nextSampleTs int64 = math.MaxInt64
|
||||||
)
|
)
|
||||||
|
|
||||||
for t := mint; t <= maxt; t = t + blockDuration {
|
for t := mint; t <= maxt; t += blockDuration {
|
||||||
tsUpper := t + blockDuration
|
tsUpper := t + blockDuration
|
||||||
if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper {
|
if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper {
|
||||||
// The next sample is not in this timerange, we can avoid parsing
|
// The next sample is not in this timerange, we can avoid parsing
|
||||||
|
@ -45,7 +45,7 @@ func sortSamples(samples []backfillSample) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample {
|
func queryAllSeries(t testing.TB, q storage.Querier, expectedMinTime, expectedMaxTime int64) []backfillSample {
|
||||||
ss := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||||
samples := []backfillSample{}
|
samples := []backfillSample{}
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
series := ss.At()
|
series := ss.At()
|
||||||
@ -67,7 +67,7 @@ func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, exp
|
|||||||
require.Equal(t, block.MinTime()/expectedBlockDuration, (block.MaxTime()-1)/expectedBlockDuration, "block %d contains data outside of one aligned block duration", i)
|
require.Equal(t, block.MinTime()/expectedBlockDuration, (block.MaxTime()-1)/expectedBlockDuration, "block %d contains data outside of one aligned block duration", i)
|
||||||
}
|
}
|
||||||
|
|
||||||
q, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64)
|
q, err := db.Querier(math.MinInt64, math.MaxInt64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer func() {
|
defer func() {
|
||||||
require.NoError(t, q.Close())
|
require.NoError(t, q.Close())
|
||||||
|
@ -58,6 +58,7 @@ import (
|
|||||||
"github.com/prometheus/prometheus/notifier"
|
"github.com/prometheus/prometheus/notifier"
|
||||||
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
|
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/scrape"
|
"github.com/prometheus/prometheus/scrape"
|
||||||
"github.com/prometheus/prometheus/util/documentcli"
|
"github.com/prometheus/prometheus/util/documentcli"
|
||||||
)
|
)
|
||||||
@ -71,6 +72,8 @@ const (
|
|||||||
lintOptionAll = "all"
|
lintOptionAll = "all"
|
||||||
lintOptionDuplicateRules = "duplicate-rules"
|
lintOptionDuplicateRules = "duplicate-rules"
|
||||||
lintOptionNone = "none"
|
lintOptionNone = "none"
|
||||||
|
checkHealth = "/-/healthy"
|
||||||
|
checkReadiness = "/-/ready"
|
||||||
)
|
)
|
||||||
|
|
||||||
var lintOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone}
|
var lintOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone}
|
||||||
@ -79,15 +82,20 @@ func main() {
|
|||||||
var (
|
var (
|
||||||
httpRoundTripper = api.DefaultRoundTripper
|
httpRoundTripper = api.DefaultRoundTripper
|
||||||
serverURL *url.URL
|
serverURL *url.URL
|
||||||
|
remoteWriteURL *url.URL
|
||||||
httpConfigFilePath string
|
httpConfigFilePath string
|
||||||
)
|
)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
app := kingpin.New(filepath.Base(os.Args[0]), "Tooling for the Prometheus monitoring system.").UsageWriter(os.Stdout)
|
app := kingpin.New(filepath.Base(os.Args[0]), "Tooling for the Prometheus monitoring system.").UsageWriter(os.Stdout)
|
||||||
app.Version(version.Print("promtool"))
|
app.Version(version.Print("promtool"))
|
||||||
app.HelpFlag.Short('h')
|
app.HelpFlag.Short('h')
|
||||||
|
|
||||||
checkCmd := app.Command("check", "Check the resources for validity.")
|
checkCmd := app.Command("check", "Check the resources for validity.")
|
||||||
|
|
||||||
|
experimental := app.Flag("experimental", "Enable experimental commands.").Bool()
|
||||||
|
|
||||||
sdCheckCmd := checkCmd.Command("service-discovery", "Perform service discovery for the given job name and report the results, including relabeling.")
|
sdCheckCmd := checkCmd.Command("service-discovery", "Perform service discovery for the given job name and report the results, including relabeling.")
|
||||||
sdConfigFile := sdCheckCmd.Arg("config-file", "The prometheus config file.").Required().ExistingFile()
|
sdConfigFile := sdCheckCmd.Arg("config-file", "The prometheus config file.").Required().ExistingFile()
|
||||||
sdJobName := sdCheckCmd.Arg("job", "The job to run service discovery for.").Required().String()
|
sdJobName := sdCheckCmd.Arg("job", "The job to run service discovery for.").Required().String()
|
||||||
@ -113,11 +121,19 @@ func main() {
|
|||||||
"The config files to check.",
|
"The config files to check.",
|
||||||
).Required().ExistingFiles()
|
).Required().ExistingFiles()
|
||||||
|
|
||||||
|
checkServerHealthCmd := checkCmd.Command("healthy", "Check if the Prometheus server is healthy.")
|
||||||
|
checkServerHealthCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||||
|
checkServerHealthCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL)
|
||||||
|
|
||||||
|
checkServerReadyCmd := checkCmd.Command("ready", "Check if the Prometheus server is ready.")
|
||||||
|
checkServerReadyCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||||
|
checkServerReadyCmd.Flag("url", "The URL for the Prometheus server.").Default("http://localhost:9090").URLVar(&serverURL)
|
||||||
|
|
||||||
checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.")
|
checkRulesCmd := checkCmd.Command("rules", "Check if the rule files are valid or not.")
|
||||||
ruleFiles := checkRulesCmd.Arg(
|
ruleFiles := checkRulesCmd.Arg(
|
||||||
"rule-files",
|
"rule-files",
|
||||||
"The rule files to check.",
|
"The rule files to check, default is read from standard input.",
|
||||||
).Required().ExistingFiles()
|
).ExistingFiles()
|
||||||
checkRulesLint := checkRulesCmd.Flag(
|
checkRulesLint := checkRulesCmd.Flag(
|
||||||
"lint",
|
"lint",
|
||||||
"Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting",
|
"Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting",
|
||||||
@ -168,6 +184,18 @@ func main() {
|
|||||||
queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String()
|
queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String()
|
||||||
queryLabelsMatch := queryLabelsCmd.Flag("match", "Series selector. Can be specified multiple times.").Strings()
|
queryLabelsMatch := queryLabelsCmd.Flag("match", "Series selector. Can be specified multiple times.").Strings()
|
||||||
|
|
||||||
|
pushCmd := app.Command("push", "Push to a Prometheus server.")
|
||||||
|
pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
|
||||||
|
pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).")
|
||||||
|
pushMetricsCmd.Arg("remote-write-url", "Prometheus remote write url to push metrics.").Required().URLVar(&remoteWriteURL)
|
||||||
|
metricFiles := pushMetricsCmd.Arg(
|
||||||
|
"metric-files",
|
||||||
|
"The metric files to push, default is read from standard input.",
|
||||||
|
).ExistingFiles()
|
||||||
|
pushMetricsLabels := pushMetricsCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times.").Default("job=promtool").StringMap()
|
||||||
|
pushMetricsTimeout := pushMetricsCmd.Flag("timeout", "The time to wait for pushing metrics.").Default("30s").Duration()
|
||||||
|
pushMetricsHeaders := pushMetricsCmd.Flag("header", "Prometheus remote write header.").StringMap()
|
||||||
|
|
||||||
testCmd := app.Command("test", "Unit testing.")
|
testCmd := app.Command("test", "Unit testing.")
|
||||||
testRulesCmd := testCmd.Command("rules", "Unit tests for rules.")
|
testRulesCmd := testCmd.Command("rules", "Unit tests for rules.")
|
||||||
testRulesRun := testRulesCmd.Flag("run", "If set, will only run test groups whose names match the regular expression. Can be specified multiple times.").Strings()
|
testRulesRun := testRulesCmd.Flag("run", "If set, will only run test groups whose names match the regular expression. Can be specified multiple times.").Strings()
|
||||||
@ -191,6 +219,7 @@ func main() {
|
|||||||
analyzeBlockID := tsdbAnalyzeCmd.Arg("block id", "Block to analyze (default is the last block).").String()
|
analyzeBlockID := tsdbAnalyzeCmd.Arg("block id", "Block to analyze (default is the last block).").String()
|
||||||
analyzeLimit := tsdbAnalyzeCmd.Flag("limit", "How many items to show in each list.").Default("20").Int()
|
analyzeLimit := tsdbAnalyzeCmd.Flag("limit", "How many items to show in each list.").Default("20").Int()
|
||||||
analyzeRunExtended := tsdbAnalyzeCmd.Flag("extended", "Run extended analysis.").Bool()
|
analyzeRunExtended := tsdbAnalyzeCmd.Flag("extended", "Run extended analysis.").Bool()
|
||||||
|
analyzeMatchers := tsdbAnalyzeCmd.Flag("match", "Series selector to analyze. Only 1 set of matchers is supported now.").String()
|
||||||
|
|
||||||
tsdbListCmd := tsdbCmd.Command("list", "List tsdb blocks.")
|
tsdbListCmd := tsdbCmd.Command("list", "List tsdb blocks.")
|
||||||
listHumanReadable := tsdbListCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool()
|
listHumanReadable := tsdbListCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool()
|
||||||
@ -223,6 +252,22 @@ func main() {
|
|||||||
"A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated.",
|
"A list of one or more files containing recording rules to be backfilled. All recording rules listed in the files will be backfilled. Alerting rules are not evaluated.",
|
||||||
).Required().ExistingFiles()
|
).Required().ExistingFiles()
|
||||||
|
|
||||||
|
promQLCmd := app.Command("promql", "PromQL formatting and editing. Requires the --experimental flag.")
|
||||||
|
|
||||||
|
promQLFormatCmd := promQLCmd.Command("format", "Format PromQL query to pretty printed form.")
|
||||||
|
promQLFormatQuery := promQLFormatCmd.Arg("query", "PromQL query.").Required().String()
|
||||||
|
|
||||||
|
promQLLabelsCmd := promQLCmd.Command("label-matchers", "Edit label matchers contained within an existing PromQL query.")
|
||||||
|
promQLLabelsSetCmd := promQLLabelsCmd.Command("set", "Set a label matcher in the query.")
|
||||||
|
promQLLabelsSetType := promQLLabelsSetCmd.Flag("type", "Type of the label matcher to set.").Short('t').Default("=").Enum("=", "!=", "=~", "!~")
|
||||||
|
promQLLabelsSetQuery := promQLLabelsSetCmd.Arg("query", "PromQL query.").Required().String()
|
||||||
|
promQLLabelsSetName := promQLLabelsSetCmd.Arg("name", "Name of the label matcher to set.").Required().String()
|
||||||
|
promQLLabelsSetValue := promQLLabelsSetCmd.Arg("value", "Value of the label matcher to set.").Required().String()
|
||||||
|
|
||||||
|
promQLLabelsDeleteCmd := promQLLabelsCmd.Command("delete", "Delete a label from the query.")
|
||||||
|
promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String()
|
||||||
|
promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String()
|
||||||
|
|
||||||
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings()
|
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings()
|
||||||
|
|
||||||
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
|
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
|
||||||
@ -277,6 +322,12 @@ func main() {
|
|||||||
case checkConfigCmd.FullCommand():
|
case checkConfigCmd.FullCommand():
|
||||||
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
||||||
|
|
||||||
|
case checkServerHealthCmd.FullCommand():
|
||||||
|
os.Exit(checkErr(CheckServerStatus(serverURL, checkHealth, httpRoundTripper)))
|
||||||
|
|
||||||
|
case checkServerReadyCmd.FullCommand():
|
||||||
|
os.Exit(checkErr(CheckServerStatus(serverURL, checkReadiness, httpRoundTripper)))
|
||||||
|
|
||||||
case checkWebConfigCmd.FullCommand():
|
case checkWebConfigCmd.FullCommand():
|
||||||
os.Exit(CheckWebConfig(*webConfigFiles...))
|
os.Exit(CheckWebConfig(*webConfigFiles...))
|
||||||
|
|
||||||
@ -286,6 +337,9 @@ func main() {
|
|||||||
case checkMetricsCmd.FullCommand():
|
case checkMetricsCmd.FullCommand():
|
||||||
os.Exit(CheckMetrics(*checkMetricsExtended))
|
os.Exit(CheckMetrics(*checkMetricsExtended))
|
||||||
|
|
||||||
|
case pushMetricsCmd.FullCommand():
|
||||||
|
os.Exit(PushMetrics(remoteWriteURL, httpRoundTripper, *pushMetricsHeaders, *pushMetricsTimeout, *pushMetricsLabels, *metricFiles...))
|
||||||
|
|
||||||
case queryInstantCmd.FullCommand():
|
case queryInstantCmd.FullCommand():
|
||||||
os.Exit(QueryInstant(serverURL, httpRoundTripper, *queryInstantExpr, *queryInstantTime, p))
|
os.Exit(QueryInstant(serverURL, httpRoundTripper, *queryInstantExpr, *queryInstantTime, p))
|
||||||
|
|
||||||
@ -321,26 +375,45 @@ func main() {
|
|||||||
os.Exit(checkErr(benchmarkWrite(*benchWriteOutPath, *benchSamplesFile, *benchWriteNumMetrics, *benchWriteNumScrapes)))
|
os.Exit(checkErr(benchmarkWrite(*benchWriteOutPath, *benchSamplesFile, *benchWriteNumMetrics, *benchWriteNumScrapes)))
|
||||||
|
|
||||||
case tsdbAnalyzeCmd.FullCommand():
|
case tsdbAnalyzeCmd.FullCommand():
|
||||||
os.Exit(checkErr(analyzeBlock(*analyzePath, *analyzeBlockID, *analyzeLimit, *analyzeRunExtended)))
|
os.Exit(checkErr(analyzeBlock(ctx, *analyzePath, *analyzeBlockID, *analyzeLimit, *analyzeRunExtended, *analyzeMatchers)))
|
||||||
|
|
||||||
case tsdbListCmd.FullCommand():
|
case tsdbListCmd.FullCommand():
|
||||||
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
|
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
|
||||||
|
|
||||||
case tsdbDumpCmd.FullCommand():
|
case tsdbDumpCmd.FullCommand():
|
||||||
os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch)))
|
os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch)))
|
||||||
// TODO(aSquare14): Work on adding support for custom block size.
|
// TODO(aSquare14): Work on adding support for custom block size.
|
||||||
case openMetricsImportCmd.FullCommand():
|
case openMetricsImportCmd.FullCommand():
|
||||||
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
|
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
|
||||||
|
|
||||||
case importRulesCmd.FullCommand():
|
case importRulesCmd.FullCommand():
|
||||||
os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...)))
|
os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...)))
|
||||||
|
|
||||||
case documentationCmd.FullCommand():
|
case documentationCmd.FullCommand():
|
||||||
os.Exit(checkErr(documentcli.GenerateMarkdown(app.Model(), os.Stdout)))
|
os.Exit(checkErr(documentcli.GenerateMarkdown(app.Model(), os.Stdout)))
|
||||||
|
|
||||||
|
case promQLFormatCmd.FullCommand():
|
||||||
|
checkExperimental(*experimental)
|
||||||
|
os.Exit(checkErr(formatPromQL(*promQLFormatQuery)))
|
||||||
|
|
||||||
|
case promQLLabelsSetCmd.FullCommand():
|
||||||
|
checkExperimental(*experimental)
|
||||||
|
os.Exit(checkErr(labelsSetPromQL(*promQLLabelsSetQuery, *promQLLabelsSetType, *promQLLabelsSetName, *promQLLabelsSetValue)))
|
||||||
|
|
||||||
|
case promQLLabelsDeleteCmd.FullCommand():
|
||||||
|
checkExperimental(*experimental)
|
||||||
|
os.Exit(checkErr(labelsDeletePromQL(*promQLLabelsDeleteQuery, *promQLLabelsDeleteName)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:revive
|
func checkExperimental(f bool) {
|
||||||
var lintError = fmt.Errorf("lint error")
|
if !f {
|
||||||
|
fmt.Fprintln(os.Stderr, "This command is experimental and requires the --experimental flag to be set.")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var errLint = fmt.Errorf("lint error")
|
||||||
|
|
||||||
type lintConfig struct {
|
type lintConfig struct {
|
||||||
all bool
|
all bool
|
||||||
@ -371,6 +444,43 @@ func (ls lintConfig) lintDuplicateRules() bool {
|
|||||||
return ls.all || ls.duplicateRules
|
return ls.all || ls.duplicateRules
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check server status - healthy & ready.
|
||||||
|
func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error {
|
||||||
|
if serverURL.Scheme == "" {
|
||||||
|
serverURL.Scheme = "http"
|
||||||
|
}
|
||||||
|
|
||||||
|
config := api.Config{
|
||||||
|
Address: serverURL.String() + checkEndpoint,
|
||||||
|
RoundTripper: roundTripper,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new client.
|
||||||
|
c, err := api.NewClient(config)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "error creating API client:", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
request, err := http.NewRequest("GET", config.Address, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
response, dataBytes, err := c.Do(ctx, request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("check failed: URL=%s, status=%d", serverURL, response.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(os.Stderr, " SUCCESS: ", string(dataBytes))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// CheckConfig validates configuration files.
|
// CheckConfig validates configuration files.
|
||||||
func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files ...string) int {
|
func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files ...string) int {
|
||||||
failed := false
|
failed := false
|
||||||
@ -390,20 +500,12 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files
|
|||||||
}
|
}
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
for _, rf := range ruleFiles {
|
rulesFailed, rulesHasErrors := checkRules(ruleFiles, lintSettings)
|
||||||
if n, errs := checkRules(rf, lintSettings); len(errs) > 0 {
|
if rulesFailed {
|
||||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
failed = rulesFailed
|
||||||
for _, err := range errs {
|
}
|
||||||
fmt.Fprintln(os.Stderr, " ", err)
|
if rulesHasErrors {
|
||||||
}
|
hasErrors = rulesHasErrors
|
||||||
failed = true
|
|
||||||
for _, err := range errs {
|
|
||||||
hasErrors = hasErrors || !errors.Is(err, lintError)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Printf(" SUCCESS: %d rules found\n", n)
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if failed && hasErrors {
|
if failed && hasErrors {
|
||||||
@ -631,39 +733,96 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) {
|
|||||||
func CheckRules(ls lintConfig, files ...string) int {
|
func CheckRules(ls lintConfig, files ...string) int {
|
||||||
failed := false
|
failed := false
|
||||||
hasErrors := false
|
hasErrors := false
|
||||||
|
if len(files) == 0 {
|
||||||
for _, f := range files {
|
failed, hasErrors = checkRulesFromStdin(ls)
|
||||||
if n, errs := checkRules(f, ls); errs != nil {
|
} else {
|
||||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
failed, hasErrors = checkRules(files, ls)
|
||||||
for _, e := range errs {
|
|
||||||
fmt.Fprintln(os.Stderr, e.Error())
|
|
||||||
}
|
|
||||||
failed = true
|
|
||||||
for _, err := range errs {
|
|
||||||
hasErrors = hasErrors || !errors.Is(err, lintError)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Printf(" SUCCESS: %d rules found\n", n)
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if failed && hasErrors {
|
if failed && hasErrors {
|
||||||
return failureExitCode
|
return failureExitCode
|
||||||
}
|
}
|
||||||
if failed && ls.fatal {
|
if failed && ls.fatal {
|
||||||
return lintErrExitCode
|
return lintErrExitCode
|
||||||
}
|
}
|
||||||
|
|
||||||
return successExitCode
|
return successExitCode
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkRules(filename string, lintSettings lintConfig) (int, []error) {
|
// checkRulesFromStdin validates rule from stdin.
|
||||||
fmt.Println("Checking", filename)
|
func checkRulesFromStdin(ls lintConfig) (bool, bool) {
|
||||||
|
failed := false
|
||||||
rgs, errs := rulefmt.ParseFile(filename)
|
hasErrors := false
|
||||||
if errs != nil {
|
fmt.Println("Checking standard input")
|
||||||
return successExitCode, errs
|
data, err := io.ReadAll(os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return true, true
|
||||||
}
|
}
|
||||||
|
rgs, errs := rulefmt.Parse(data)
|
||||||
|
if errs != nil {
|
||||||
|
failed = true
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||||
|
for _, e := range errs {
|
||||||
|
fmt.Fprintln(os.Stderr, e.Error())
|
||||||
|
hasErrors = hasErrors || !errors.Is(e, errLint)
|
||||||
|
}
|
||||||
|
if hasErrors {
|
||||||
|
return failed, hasErrors
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n, errs := checkRuleGroups(rgs, ls); errs != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||||
|
for _, e := range errs {
|
||||||
|
fmt.Fprintln(os.Stderr, e.Error())
|
||||||
|
}
|
||||||
|
failed = true
|
||||||
|
for _, err := range errs {
|
||||||
|
hasErrors = hasErrors || !errors.Is(err, errLint)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" SUCCESS: %d rules found\n", n)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
return failed, hasErrors
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkRules validates rule files.
|
||||||
|
func checkRules(files []string, ls lintConfig) (bool, bool) {
|
||||||
|
failed := false
|
||||||
|
hasErrors := false
|
||||||
|
for _, f := range files {
|
||||||
|
fmt.Println("Checking", f)
|
||||||
|
rgs, errs := rulefmt.ParseFile(f)
|
||||||
|
if errs != nil {
|
||||||
|
failed = true
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||||
|
for _, e := range errs {
|
||||||
|
fmt.Fprintln(os.Stderr, e.Error())
|
||||||
|
hasErrors = hasErrors || !errors.Is(e, errLint)
|
||||||
|
}
|
||||||
|
if hasErrors {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n, errs := checkRuleGroups(rgs, ls); errs != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||||
|
for _, e := range errs {
|
||||||
|
fmt.Fprintln(os.Stderr, e.Error())
|
||||||
|
}
|
||||||
|
failed = true
|
||||||
|
for _, err := range errs {
|
||||||
|
hasErrors = hasErrors || !errors.Is(err, errLint)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf(" SUCCESS: %d rules found\n", n)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
return failed, hasErrors
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []error) {
|
||||||
numRules := 0
|
numRules := 0
|
||||||
for _, rg := range rgs.Groups {
|
for _, rg := range rgs.Groups {
|
||||||
numRules += len(rg.Rules)
|
numRules += len(rg.Rules)
|
||||||
@ -680,7 +839,7 @@ func checkRules(filename string, lintSettings lintConfig) (int, []error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
errMessage += "Might cause inconsistency while recording expressions"
|
errMessage += "Might cause inconsistency while recording expressions"
|
||||||
return 0, []error{fmt.Errorf("%w %s", lintError, errMessage)}
|
return 0, []error{fmt.Errorf("%w %s", errLint, errMessage)}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1281,3 +1440,79 @@ func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *c
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func formatPromQL(query string) error {
|
||||||
|
expr, err := parser.ParseExpr(query)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(expr.Pretty(0))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func labelsSetPromQL(query, labelMatchType, name, value string) error {
|
||||||
|
expr, err := parser.ParseExpr(query)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var matchType labels.MatchType
|
||||||
|
switch labelMatchType {
|
||||||
|
case parser.ItemType(parser.EQL).String():
|
||||||
|
matchType = labels.MatchEqual
|
||||||
|
case parser.ItemType(parser.NEQ).String():
|
||||||
|
matchType = labels.MatchNotEqual
|
||||||
|
case parser.ItemType(parser.EQL_REGEX).String():
|
||||||
|
matchType = labels.MatchRegexp
|
||||||
|
case parser.ItemType(parser.NEQ_REGEX).String():
|
||||||
|
matchType = labels.MatchNotRegexp
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid label match type: %s", labelMatchType)
|
||||||
|
}
|
||||||
|
|
||||||
|
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
|
||||||
|
if n, ok := node.(*parser.VectorSelector); ok {
|
||||||
|
var found bool
|
||||||
|
for i, l := range n.LabelMatchers {
|
||||||
|
if l.Name == name {
|
||||||
|
n.LabelMatchers[i].Type = matchType
|
||||||
|
n.LabelMatchers[i].Value = value
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
n.LabelMatchers = append(n.LabelMatchers, &labels.Matcher{
|
||||||
|
Type: matchType,
|
||||||
|
Name: name,
|
||||||
|
Value: value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
fmt.Println(expr.Pretty(0))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func labelsDeletePromQL(query, name string) error {
|
||||||
|
expr, err := parser.ParseExpr(query)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
|
||||||
|
if n, ok := node.(*parser.VectorSelector); ok {
|
||||||
|
for i, l := range n.LabelMatchers {
|
||||||
|
if l.Name == name {
|
||||||
|
n.LabelMatchers = append(n.LabelMatchers[:i], n.LabelMatchers[i+1:]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
fmt.Println(expr.Pretty(0))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -450,10 +450,9 @@ func TestDocumentation(t *testing.T) {
|
|||||||
cmd.Stdout = &stdout
|
cmd.Stdout = &stdout
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
if exitError, ok := err.(*exec.ExitError); ok {
|
var exitError *exec.ExitError
|
||||||
if exitError.ExitCode() != 0 {
|
if errors.As(err, &exitError) && exitError.ExitCode() != 0 {
|
||||||
fmt.Println("Command failed with non-zero exit code")
|
fmt.Println("Command failed with non-zero exit code")
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -464,3 +463,88 @@ func TestDocumentation(t *testing.T) {
|
|||||||
|
|
||||||
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
|
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCheckRules(t *testing.T) {
|
||||||
|
t.Run("rules-good", func(t *testing.T) {
|
||||||
|
data, err := os.ReadFile("./testdata/rules.yml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Restore stdin right after the test.
|
||||||
|
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
|
||||||
|
os.Stdin = r
|
||||||
|
|
||||||
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false))
|
||||||
|
require.Equal(t, successExitCode, exitCode, "")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("rules-bad", func(t *testing.T) {
|
||||||
|
data, err := os.ReadFile("./testdata/rules-bad.yml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Restore stdin right after the test.
|
||||||
|
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
|
||||||
|
os.Stdin = r
|
||||||
|
|
||||||
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false))
|
||||||
|
require.Equal(t, failureExitCode, exitCode, "")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("rules-lint-fatal", func(t *testing.T) {
|
||||||
|
data, err := os.ReadFile("./testdata/prometheus-rules.lint.yml")
|
||||||
|
require.NoError(t, err)
|
||||||
|
r, w, err := os.Pipe()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
// Restore stdin right after the test.
|
||||||
|
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
|
||||||
|
os.Stdin = r
|
||||||
|
|
||||||
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true))
|
||||||
|
require.Equal(t, lintErrExitCode, exitCode, "")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckRulesWithRuleFiles(t *testing.T) {
|
||||||
|
t.Run("rules-good", func(t *testing.T) {
|
||||||
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml")
|
||||||
|
require.Equal(t, successExitCode, exitCode, "")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("rules-bad", func(t *testing.T) {
|
||||||
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml")
|
||||||
|
require.Equal(t, failureExitCode, exitCode, "")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("rules-lint-fatal", func(t *testing.T) {
|
||||||
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml")
|
||||||
|
require.Equal(t, lintErrExitCode, exitCode, "")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
138
cmd/promtool/metrics.go
Normal file
138
cmd/promtool/metrics.go
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
// Copyright 2023 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/snappy"
|
||||||
|
config_util "github.com/prometheus/common/config"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/storage/remote"
|
||||||
|
"github.com/prometheus/prometheus/util/fmtutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Push metrics to a prometheus remote write (for testing purpose only).
|
||||||
|
func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int {
|
||||||
|
addressURL, err := url.Parse(url.String())
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// build remote write client
|
||||||
|
writeClient, err := remote.NewWriteClient("remote-write", &remote.ClientConfig{
|
||||||
|
URL: &config_util.URL{URL: addressURL},
|
||||||
|
Timeout: model.Duration(timeout),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// set custom tls config from httpConfigFilePath
|
||||||
|
// set custom headers to every request
|
||||||
|
client, ok := writeClient.(*remote.Client)
|
||||||
|
if !ok {
|
||||||
|
fmt.Fprintln(os.Stderr, fmt.Errorf("unexpected type %T", writeClient))
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
client.Client.Transport = &setHeadersTransport{
|
||||||
|
RoundTripper: roundTripper,
|
||||||
|
headers: headers,
|
||||||
|
}
|
||||||
|
|
||||||
|
var data []byte
|
||||||
|
var failed bool
|
||||||
|
|
||||||
|
if len(files) == 0 {
|
||||||
|
data, err = io.ReadAll(os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
fmt.Printf("Parsing standard input\n")
|
||||||
|
if parseAndPushMetrics(client, data, labels) {
|
||||||
|
fmt.Printf(" SUCCESS: metrics pushed to remote write.\n")
|
||||||
|
return successExitCode
|
||||||
|
}
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
data, err = os.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
failed = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Parsing metrics file %s\n", file)
|
||||||
|
if parseAndPushMetrics(client, data, labels) {
|
||||||
|
fmt.Printf(" SUCCESS: metrics file %s pushed to remote write.\n", file)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
failed = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if failed {
|
||||||
|
return failureExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
return successExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool {
|
||||||
|
metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
raw, err := metricsData.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode the request body into snappy encoding.
|
||||||
|
compressed := snappy.Encode(nil, raw)
|
||||||
|
err = client.Store(context.Background(), compressed, 0)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type setHeadersTransport struct {
|
||||||
|
http.RoundTripper
|
||||||
|
headers map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *setHeadersTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
for key, value := range s.headers {
|
||||||
|
req.Header.Set(key, value)
|
||||||
|
}
|
||||||
|
return s.RoundTripper.RoundTrip(req)
|
||||||
|
}
|
@ -68,7 +68,7 @@ func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient que
|
|||||||
}
|
}
|
||||||
|
|
||||||
// loadGroups parses groups from a list of recording rule files.
|
// loadGroups parses groups from a list of recording rule files.
|
||||||
func (importer *ruleImporter) loadGroups(ctx context.Context, filenames []string) (errs []error) {
|
func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) {
|
||||||
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...)
|
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...)
|
||||||
if errs != nil {
|
if errs != nil {
|
||||||
return errs
|
return errs
|
||||||
@ -100,7 +100,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
|||||||
startInMs := start.Unix() * int64(time.Second/time.Millisecond)
|
startInMs := start.Unix() * int64(time.Second/time.Millisecond)
|
||||||
endInMs := end.Unix() * int64(time.Second/time.Millisecond)
|
endInMs := end.Unix() * int64(time.Second/time.Millisecond)
|
||||||
|
|
||||||
for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock = startOfBlock + blockDuration {
|
for startOfBlock := blockDuration * (startInMs / blockDuration); startOfBlock <= endInMs; startOfBlock += blockDuration {
|
||||||
endOfBlock := startOfBlock + blockDuration - 1
|
endOfBlock := startOfBlock + blockDuration - 1
|
||||||
|
|
||||||
currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix())
|
currStart := max(startOfBlock/int64(time.Second/time.Millisecond), start.Unix())
|
||||||
@ -163,7 +163,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
|||||||
})
|
})
|
||||||
|
|
||||||
lb.Set(labels.MetricName, ruleName)
|
lb.Set(labels.MetricName, ruleName)
|
||||||
lbls := lb.Labels(labels.EmptyLabels())
|
lbls := lb.Labels()
|
||||||
|
|
||||||
for _, value := range sample.Values {
|
for _, value := range sample.Values {
|
||||||
if err := app.add(ctx, lbls, timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
|
if err := app.add(ctx, lbls, timestamp.FromTime(value.Timestamp.Time()), float64(value.Value)); err != nil {
|
||||||
|
@ -35,7 +35,7 @@ type mockQueryRangeAPI struct {
|
|||||||
samples model.Matrix
|
samples model.Matrix
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mockAPI mockQueryRangeAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) {
|
func (mockAPI mockQueryRangeAPI) QueryRange(_ context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) {
|
||||||
return mockAPI.samples, v1.Warnings{}, nil
|
return mockAPI.samples, v1.Warnings{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,10 +124,10 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
|||||||
blocks := db.Blocks()
|
blocks := db.Blocks()
|
||||||
require.Equal(t, (i+1)*tt.expectedBlockCount, len(blocks))
|
require.Equal(t, (i+1)*tt.expectedBlockCount, len(blocks))
|
||||||
|
|
||||||
q, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64)
|
q, err := db.Querier(math.MinInt64, math.MaxInt64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
selectedSeries := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
selectedSeries := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||||
var seriesCount, samplesCount int
|
var seriesCount, samplesCount int
|
||||||
for selectedSeries.Next() {
|
for selectedSeries.Next() {
|
||||||
seriesCount++
|
seriesCount++
|
||||||
@ -161,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestRuleImporter(ctx context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
|
func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
|
||||||
logger := log.NewNopLogger()
|
logger := log.NewNopLogger()
|
||||||
cfg := ruleImporterConfig{
|
cfg := ruleImporterConfig{
|
||||||
outputDir: tmpDir,
|
outputDir: tmpDir,
|
||||||
@ -248,11 +248,11 @@ func TestBackfillLabels(t *testing.T) {
|
|||||||
db, err := tsdb.Open(tmpDir, nil, nil, opts, nil)
|
db, err := tsdb.Open(tmpDir, nil, nil, opts, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
q, err := db.Querier(context.Background(), math.MinInt64, math.MaxInt64)
|
q, err := db.Querier(math.MinInt64, math.MaxInt64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
t.Run("correct-labels", func(t *testing.T) {
|
t.Run("correct-labels", func(t *testing.T) {
|
||||||
selectedSeries := q.Select(false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
selectedSeries := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||||
for selectedSeries.Next() {
|
for selectedSeries.Next() {
|
||||||
series := selectedSeries.At()
|
series := selectedSeries.At()
|
||||||
expectedLabels := labels.FromStrings("__name__", "rulename", "name1", "value-from-rule")
|
expectedLabels := labels.FromStrings("__name__", "rulename", "name1", "value-from-rule")
|
||||||
|
15
cmd/promtool/testdata/no-test-group-interval.yml
vendored
Normal file
15
cmd/promtool/testdata/no-test-group-interval.yml
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
tests:
|
||||||
|
- input_series:
|
||||||
|
- series: test
|
||||||
|
values: 0 1
|
||||||
|
promql_expr_test:
|
||||||
|
- expr: test
|
||||||
|
eval_time: 59s
|
||||||
|
exp_samples:
|
||||||
|
- value: 0
|
||||||
|
labels: test
|
||||||
|
- expr: test
|
||||||
|
eval_time: 1m
|
||||||
|
exp_samples:
|
||||||
|
- value: 1
|
||||||
|
labels: test
|
28
cmd/promtool/testdata/rules-bad.yml
vendored
Normal file
28
cmd/promtool/testdata/rules-bad.yml
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
# This is the rules file.
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- name: alerts
|
||||||
|
rules:
|
||||||
|
- alert: InstanceDown
|
||||||
|
expr: up == 0
|
||||||
|
for: 5m
|
||||||
|
labels:
|
||||||
|
severity: page
|
||||||
|
annotations:
|
||||||
|
summary: "Instance {{ $label.foo }} down"
|
||||||
|
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."
|
||||||
|
- alert: AlwaysFiring
|
||||||
|
expr: 1
|
||||||
|
|
||||||
|
- name: rules
|
||||||
|
rules:
|
||||||
|
- record: job:test:count_over_time1m
|
||||||
|
expr: sum without(instance) (count_over_time(test[1m]))
|
||||||
|
|
||||||
|
# A recording rule that doesn't depend on input series.
|
||||||
|
- record: fixed_data
|
||||||
|
expr: 1
|
||||||
|
|
||||||
|
# Subquery with default resolution test.
|
||||||
|
- record: suquery_interval_test
|
||||||
|
expr: count_over_time(up[5m:])
|
46
cmd/promtool/testdata/unittest.yml
vendored
46
cmd/promtool/testdata/unittest.yml
vendored
@ -10,6 +10,21 @@ tests:
|
|||||||
- series: test_full
|
- series: test_full
|
||||||
values: "0 0"
|
values: "0 0"
|
||||||
|
|
||||||
|
- series: test_repeat
|
||||||
|
values: "1x2"
|
||||||
|
|
||||||
|
- series: test_increase
|
||||||
|
values: "1+1x2"
|
||||||
|
|
||||||
|
- series: test_histogram
|
||||||
|
values: "{{schema:1 sum:-0.3 count:32.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}"
|
||||||
|
|
||||||
|
- series: test_histogram_repeat
|
||||||
|
values: "{{sum:3 count:2 buckets:[2]}}x2"
|
||||||
|
|
||||||
|
- series: test_histogram_increase
|
||||||
|
values: "{{sum:3 count:2 buckets:[2]}}+{{sum:1.3 count:1 buckets:[1]}}x2"
|
||||||
|
|
||||||
- series: test_stale
|
- series: test_stale
|
||||||
values: "0 stale"
|
values: "0 stale"
|
||||||
|
|
||||||
@ -31,6 +46,37 @@ tests:
|
|||||||
exp_samples:
|
exp_samples:
|
||||||
- value: 60
|
- value: 60
|
||||||
|
|
||||||
|
# Repeat & increase
|
||||||
|
- expr: test_repeat
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- value: 1
|
||||||
|
labels: "test_repeat"
|
||||||
|
- expr: test_increase
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- value: 3
|
||||||
|
labels: "test_increase"
|
||||||
|
|
||||||
|
# Histograms
|
||||||
|
- expr: test_histogram
|
||||||
|
eval_time: 1m
|
||||||
|
exp_samples:
|
||||||
|
- labels: "test_histogram"
|
||||||
|
histogram: "{{schema:1 sum:-0.3 count:32.1 z_bucket:7.1 z_bucket_w:0.05 buckets:[5.1 10 7] offset:-3 n_buckets:[4.1 5] n_offset:-5}}"
|
||||||
|
|
||||||
|
- expr: test_histogram_repeat
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- labels: "test_histogram_repeat"
|
||||||
|
histogram: "{{count:2 sum:3 buckets:[2]}}"
|
||||||
|
|
||||||
|
- expr: test_histogram_increase
|
||||||
|
eval_time: 2m
|
||||||
|
exp_samples:
|
||||||
|
- labels: "test_histogram_increase"
|
||||||
|
histogram: "{{count:4 sum:5.6 buckets:[4]}}"
|
||||||
|
|
||||||
# Ensure a value is stale as soon as it is marked as such.
|
# Ensure a value is stale as soon as it is marked as such.
|
||||||
- expr: test_stale
|
- expr: test_stale
|
||||||
eval_time: 59s
|
eval_time: 59s
|
||||||
|
@ -16,33 +16,32 @@ package main
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/pprof"
|
"runtime/pprof"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
|
||||||
"github.com/prometheus/prometheus/storage"
|
|
||||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
|
||||||
"github.com/prometheus/prometheus/tsdb/index"
|
|
||||||
|
|
||||||
"github.com/alecthomas/units"
|
"github.com/alecthomas/units"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
|
"github.com/prometheus/prometheus/storage"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||||
|
"github.com/prometheus/prometheus/tsdb/index"
|
||||||
)
|
)
|
||||||
|
|
||||||
const timeDelta = 30000
|
const timeDelta = 30000
|
||||||
@ -398,28 +397,33 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
blocks, err := db.Blocks()
|
|
||||||
|
if blockID == "" {
|
||||||
|
blockID, err = db.LastBlockID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := db.Block(blockID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
var block tsdb.BlockReader
|
|
||||||
if blockID != "" {
|
return db, b, nil
|
||||||
for _, b := range blocks {
|
|
||||||
if b.Meta().ULID.String() == blockID {
|
|
||||||
block = b
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if len(blocks) > 0 {
|
|
||||||
block = blocks[len(blocks)-1]
|
|
||||||
}
|
|
||||||
if block == nil {
|
|
||||||
return nil, nil, fmt.Errorf("block %s not found", blockID)
|
|
||||||
}
|
|
||||||
return db, block, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExtended bool, matchers string) error {
|
||||||
|
var (
|
||||||
|
selectors []*labels.Matcher
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if len(matchers) > 0 {
|
||||||
|
selectors, err = parser.ParseMetricSelector(matchers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
db, block, err := openBlock(path, blockID)
|
db, block, err := openBlock(path, blockID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -432,14 +436,17 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
|||||||
fmt.Printf("Block ID: %s\n", meta.ULID)
|
fmt.Printf("Block ID: %s\n", meta.ULID)
|
||||||
// Presume 1ms resolution that Prometheus uses.
|
// Presume 1ms resolution that Prometheus uses.
|
||||||
fmt.Printf("Duration: %s\n", (time.Duration(meta.MaxTime-meta.MinTime) * 1e6).String())
|
fmt.Printf("Duration: %s\n", (time.Duration(meta.MaxTime-meta.MinTime) * 1e6).String())
|
||||||
fmt.Printf("Series: %d\n", meta.Stats.NumSeries)
|
fmt.Printf("Total Series: %d\n", meta.Stats.NumSeries)
|
||||||
|
if len(matchers) > 0 {
|
||||||
|
fmt.Printf("Matcher: %s\n", matchers)
|
||||||
|
}
|
||||||
ir, err := block.Index()
|
ir, err := block.Index()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer ir.Close()
|
defer ir.Close()
|
||||||
|
|
||||||
allLabelNames, err := ir.LabelNames()
|
allLabelNames, err := ir.LabelNames(ctx, selectors...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -452,7 +459,16 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
|||||||
postingInfos := []postingInfo{}
|
postingInfos := []postingInfo{}
|
||||||
|
|
||||||
printInfo := func(postingInfos []postingInfo) {
|
printInfo := func(postingInfos []postingInfo) {
|
||||||
sort.Slice(postingInfos, func(i, j int) bool { return postingInfos[i].metric > postingInfos[j].metric })
|
slices.SortFunc(postingInfos, func(a, b postingInfo) int {
|
||||||
|
switch {
|
||||||
|
case b.metric < a.metric:
|
||||||
|
return -1
|
||||||
|
case b.metric > a.metric:
|
||||||
|
return 1
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
for i, pc := range postingInfos {
|
for i, pc := range postingInfos {
|
||||||
if i >= limit {
|
if i >= limit {
|
||||||
@ -466,10 +482,30 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
|||||||
labelpairsUncovered := map[string]uint64{}
|
labelpairsUncovered := map[string]uint64{}
|
||||||
labelpairsCount := map[string]uint64{}
|
labelpairsCount := map[string]uint64{}
|
||||||
entries := 0
|
entries := 0
|
||||||
p, err := ir.Postings("", "") // The special all key.
|
var (
|
||||||
if err != nil {
|
p index.Postings
|
||||||
return err
|
refs []storage.SeriesRef
|
||||||
|
)
|
||||||
|
if len(matchers) > 0 {
|
||||||
|
p, err = tsdb.PostingsForMatchers(ctx, ir, selectors...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Expand refs first and cache in memory.
|
||||||
|
// So later we don't have to expand again.
|
||||||
|
refs, err = index.ExpandPostings(p)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Printf("Matched series: %d\n", len(refs))
|
||||||
|
p = index.NewListPostings(refs)
|
||||||
|
} else {
|
||||||
|
p, err = ir.Postings(ctx, "", "") // The special all key.
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
chks := []chunks.Meta{}
|
chks := []chunks.Meta{}
|
||||||
builder := labels.ScratchBuilder{}
|
builder := labels.ScratchBuilder{}
|
||||||
for p.Next() {
|
for p.Next() {
|
||||||
@ -518,7 +554,7 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
|||||||
|
|
||||||
postingInfos = postingInfos[:0]
|
postingInfos = postingInfos[:0]
|
||||||
for _, n := range allLabelNames {
|
for _, n := range allLabelNames {
|
||||||
values, err := ir.SortedLabelValues(n)
|
values, err := ir.SortedLabelValues(ctx, n, selectors...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -534,7 +570,7 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
|||||||
|
|
||||||
postingInfos = postingInfos[:0]
|
postingInfos = postingInfos[:0]
|
||||||
for _, n := range allLabelNames {
|
for _, n := range allLabelNames {
|
||||||
lv, err := ir.SortedLabelValues(n)
|
lv, err := ir.SortedLabelValues(ctx, n, selectors...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -544,15 +580,16 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
|||||||
printInfo(postingInfos)
|
printInfo(postingInfos)
|
||||||
|
|
||||||
postingInfos = postingInfos[:0]
|
postingInfos = postingInfos[:0]
|
||||||
lv, err := ir.SortedLabelValues("__name__")
|
lv, err := ir.SortedLabelValues(ctx, "__name__", selectors...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, n := range lv {
|
for _, n := range lv {
|
||||||
postings, err := ir.Postings("__name__", n)
|
postings, err := ir.Postings(ctx, "__name__", n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
postings = index.Intersect(postings, index.NewListPostings(refs))
|
||||||
count := 0
|
count := 0
|
||||||
for postings.Next() {
|
for postings.Next() {
|
||||||
count++
|
count++
|
||||||
@ -566,17 +603,24 @@ func analyzeBlock(path, blockID string, limit int, runExtended bool) error {
|
|||||||
printInfo(postingInfos)
|
printInfo(postingInfos)
|
||||||
|
|
||||||
if runExtended {
|
if runExtended {
|
||||||
return analyzeCompaction(block, ir)
|
return analyzeCompaction(ctx, block, ir, selectors)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err error) {
|
func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.IndexReader, matchers []*labels.Matcher) (err error) {
|
||||||
postingsr, err := indexr.Postings(index.AllPostingsKey())
|
var postingsr index.Postings
|
||||||
|
if len(matchers) > 0 {
|
||||||
|
postingsr, err = tsdb.PostingsForMatchers(ctx, indexr, matchers...)
|
||||||
|
} else {
|
||||||
|
n, v := index.AllPostingsKey()
|
||||||
|
postingsr, err = indexr.Postings(ctx, n, v)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkr, err := block.Chunks()
|
chunkr, err := block.Chunks()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -585,10 +629,12 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err
|
|||||||
err = tsdb_errors.NewMulti(err, chunkr.Close()).Err()
|
err = tsdb_errors.NewMulti(err, chunkr.Close()).Err()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
const maxSamplesPerChunk = 120
|
|
||||||
nBuckets := 10
|
|
||||||
histogram := make([]int, nBuckets)
|
|
||||||
totalChunks := 0
|
totalChunks := 0
|
||||||
|
floatChunkSamplesCount := make([]int, 0)
|
||||||
|
floatChunkSize := make([]int, 0)
|
||||||
|
histogramChunkSamplesCount := make([]int, 0)
|
||||||
|
histogramChunkSize := make([]int, 0)
|
||||||
|
histogramChunkBucketsCount := make([]int, 0)
|
||||||
var builder labels.ScratchBuilder
|
var builder labels.ScratchBuilder
|
||||||
for postingsr.Next() {
|
for postingsr.Next() {
|
||||||
var chks []chunks.Meta
|
var chks []chunks.Meta
|
||||||
@ -598,34 +644,69 @@ func analyzeCompaction(block tsdb.BlockReader, indexr tsdb.IndexReader) (err err
|
|||||||
|
|
||||||
for _, chk := range chks {
|
for _, chk := range chks {
|
||||||
// Load the actual data of the chunk.
|
// Load the actual data of the chunk.
|
||||||
chk, err := chunkr.Chunk(chk)
|
chk, iterable, err := chunkr.ChunkOrIterable(chk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
chunkSize := math.Min(float64(chk.NumSamples()), maxSamplesPerChunk)
|
// Chunks within blocks should not need to be re-written, so an
|
||||||
// Calculate the bucket for the chunk and increment it in the histogram.
|
// iterable is not expected to be returned from the chunk reader.
|
||||||
bucket := int(math.Ceil(float64(nBuckets)*chunkSize/maxSamplesPerChunk)) - 1
|
if iterable != nil {
|
||||||
histogram[bucket]++
|
return errors.New("ChunkOrIterable should not return an iterable when reading a block")
|
||||||
|
}
|
||||||
|
switch chk.Encoding() {
|
||||||
|
case chunkenc.EncXOR:
|
||||||
|
floatChunkSamplesCount = append(floatChunkSamplesCount, chk.NumSamples())
|
||||||
|
floatChunkSize = append(floatChunkSize, len(chk.Bytes()))
|
||||||
|
case chunkenc.EncFloatHistogram:
|
||||||
|
histogramChunkSamplesCount = append(histogramChunkSamplesCount, chk.NumSamples())
|
||||||
|
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
|
||||||
|
fhchk, ok := chk.(*chunkenc.FloatHistogramChunk)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("chunk is not FloatHistogramChunk")
|
||||||
|
}
|
||||||
|
it := fhchk.Iterator(nil)
|
||||||
|
bucketCount := 0
|
||||||
|
for it.Next() == chunkenc.ValFloatHistogram {
|
||||||
|
_, f := it.AtFloatHistogram()
|
||||||
|
bucketCount += len(f.PositiveBuckets)
|
||||||
|
bucketCount += len(f.NegativeBuckets)
|
||||||
|
}
|
||||||
|
histogramChunkBucketsCount = append(histogramChunkBucketsCount, bucketCount)
|
||||||
|
case chunkenc.EncHistogram:
|
||||||
|
histogramChunkSamplesCount = append(histogramChunkSamplesCount, chk.NumSamples())
|
||||||
|
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
|
||||||
|
hchk, ok := chk.(*chunkenc.HistogramChunk)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("chunk is not HistogramChunk")
|
||||||
|
}
|
||||||
|
it := hchk.Iterator(nil)
|
||||||
|
bucketCount := 0
|
||||||
|
for it.Next() == chunkenc.ValHistogram {
|
||||||
|
_, f := it.AtHistogram()
|
||||||
|
bucketCount += len(f.PositiveBuckets)
|
||||||
|
bucketCount += len(f.NegativeBuckets)
|
||||||
|
}
|
||||||
|
histogramChunkBucketsCount = append(histogramChunkBucketsCount, bucketCount)
|
||||||
|
}
|
||||||
totalChunks++
|
totalChunks++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("\nCompaction analysis:\n")
|
fmt.Printf("\nCompaction analysis:\n")
|
||||||
fmt.Println("Fullness: Amount of samples in chunks (100% is 120 samples)")
|
fmt.Println()
|
||||||
// Normalize absolute counts to percentages and print them out.
|
displayHistogram("samples per float chunk", floatChunkSamplesCount, totalChunks)
|
||||||
for bucket, count := range histogram {
|
|
||||||
percentage := 100.0 * count / totalChunks
|
|
||||||
fmt.Printf("%7d%%: ", (bucket+1)*10)
|
|
||||||
for j := 0; j < percentage; j++ {
|
|
||||||
fmt.Printf("#")
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
}
|
|
||||||
|
|
||||||
|
displayHistogram("bytes per float chunk", floatChunkSize, totalChunks)
|
||||||
|
|
||||||
|
displayHistogram("samples per histogram chunk", histogramChunkSamplesCount, totalChunks)
|
||||||
|
|
||||||
|
displayHistogram("bytes per histogram chunk", histogramChunkSize, totalChunks)
|
||||||
|
|
||||||
|
displayHistogram("buckets per histogram chunk", histogramChunkBucketsCount, totalChunks)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func dumpSamples(path string, mint, maxt int64, match string) (err error) {
|
func dumpSamples(ctx context.Context, path string, mint, maxt int64, match string) (err error) {
|
||||||
db, err := tsdb.OpenDBReadOnly(path, nil)
|
db, err := tsdb.OpenDBReadOnly(path, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -633,7 +714,7 @@ func dumpSamples(path string, mint, maxt int64, match string) (err error) {
|
|||||||
defer func() {
|
defer func() {
|
||||||
err = tsdb_errors.NewMulti(err, db.Close()).Err()
|
err = tsdb_errors.NewMulti(err, db.Close()).Err()
|
||||||
}()
|
}()
|
||||||
q, err := db.Querier(context.TODO(), mint, maxt)
|
q, err := db.Querier(mint, maxt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -643,7 +724,7 @@ func dumpSamples(path string, mint, maxt int64, match string) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ss := q.Select(false, nil, matchers...)
|
ss := q.Select(ctx, false, nil, matchers...)
|
||||||
|
|
||||||
for ss.Next() {
|
for ss.Next() {
|
||||||
series := ss.At()
|
series := ss.At()
|
||||||
@ -653,13 +734,21 @@ func dumpSamples(path string, mint, maxt int64, match string) (err error) {
|
|||||||
ts, val := it.At()
|
ts, val := it.At()
|
||||||
fmt.Printf("%s %g %d\n", lbs, val, ts)
|
fmt.Printf("%s %g %d\n", lbs, val, ts)
|
||||||
}
|
}
|
||||||
|
for it.Next() == chunkenc.ValFloatHistogram {
|
||||||
|
ts, fh := it.AtFloatHistogram()
|
||||||
|
fmt.Printf("%s %s %d\n", lbs, fh.String(), ts)
|
||||||
|
}
|
||||||
|
for it.Next() == chunkenc.ValHistogram {
|
||||||
|
ts, h := it.AtHistogram()
|
||||||
|
fmt.Printf("%s %s %d\n", lbs, h.String(), ts)
|
||||||
|
}
|
||||||
if it.Err() != nil {
|
if it.Err() != nil {
|
||||||
return ss.Err()
|
return ss.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ws := ss.Warnings(); len(ws) > 0 {
|
if ws := ss.Warnings(); len(ws) > 0 {
|
||||||
return tsdb_errors.NewMulti(ws...).Err()
|
return tsdb_errors.NewMulti(ws.AsErrors()...).Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
if ss.Err() != nil {
|
if ss.Err() != nil {
|
||||||
@ -689,3 +778,42 @@ func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxB
|
|||||||
|
|
||||||
return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration))
|
return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func displayHistogram(dataType string, datas []int, total int) {
|
||||||
|
slices.Sort(datas)
|
||||||
|
start, end, step := generateBucket(datas[0], datas[len(datas)-1])
|
||||||
|
sum := 0
|
||||||
|
buckets := make([]int, (end-start)/step+1)
|
||||||
|
maxCount := 0
|
||||||
|
for _, c := range datas {
|
||||||
|
sum += c
|
||||||
|
buckets[(c-start)/step]++
|
||||||
|
if buckets[(c-start)/step] > maxCount {
|
||||||
|
maxCount = buckets[(c-start)/step]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
avg := sum / len(datas)
|
||||||
|
fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1])
|
||||||
|
maxLeftLen := strconv.Itoa(len(fmt.Sprintf("%d", end)))
|
||||||
|
maxRightLen := strconv.Itoa(len(fmt.Sprintf("%d", end+step)))
|
||||||
|
maxCountLen := strconv.Itoa(len(fmt.Sprintf("%d", maxCount)))
|
||||||
|
for bucket, count := range buckets {
|
||||||
|
percentage := 100.0 * count / total
|
||||||
|
fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage))
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateBucket(min, max int) (start, end, step int) {
|
||||||
|
s := (max - min) / 10
|
||||||
|
|
||||||
|
step = 10
|
||||||
|
for step < s && step <= 10000 {
|
||||||
|
step *= 10
|
||||||
|
}
|
||||||
|
|
||||||
|
start = min - min%step
|
||||||
|
end = max - max%step + step
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
43
cmd/promtool/tsdb_test.go
Normal file
43
cmd/promtool/tsdb_test.go
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
// Copyright 2017 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGenerateBucket(t *testing.T) {
|
||||||
|
tcs := []struct {
|
||||||
|
min, max int
|
||||||
|
start, end, step int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
min: 101,
|
||||||
|
max: 141,
|
||||||
|
start: 100,
|
||||||
|
end: 150,
|
||||||
|
step: 10,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tcs {
|
||||||
|
start, end, step := generateBucket(tc.min, tc.max)
|
||||||
|
|
||||||
|
require.Equal(t, tc.start, start)
|
||||||
|
require.Equal(t, tc.end, end)
|
||||||
|
require.Equal(t, tc.step, step)
|
||||||
|
}
|
||||||
|
}
|
@ -30,6 +30,7 @@ import (
|
|||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/promql"
|
"github.com/prometheus/prometheus/promql"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
@ -105,6 +106,9 @@ func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if t.Interval == 0 {
|
||||||
|
t.Interval = unitTestInp.EvaluationInterval
|
||||||
|
}
|
||||||
ers := t.test(evalInterval, groupOrderMap, queryOpts, unitTestInp.RuleFiles...)
|
ers := t.test(evalInterval, groupOrderMap, queryOpts, unitTestInp.RuleFiles...)
|
||||||
if ers != nil {
|
if ers != nil {
|
||||||
errs = append(errs, ers...)
|
errs = append(errs, ers...)
|
||||||
@ -148,7 +152,7 @@ func resolveAndGlobFilepaths(baseDir string, utf *unitTestFile) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(m) <= 0 {
|
if len(m) == 0 {
|
||||||
fmt.Fprintln(os.Stderr, " WARNING: no file match pattern", rf)
|
fmt.Fprintln(os.Stderr, " WARNING: no file match pattern", rf)
|
||||||
}
|
}
|
||||||
globbedFiles = append(globbedFiles, m...)
|
globbedFiles = append(globbedFiles, m...)
|
||||||
@ -258,7 +262,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
|
|||||||
g.Eval(suite.Context(), ts)
|
g.Eval(suite.Context(), ts)
|
||||||
for _, r := range g.Rules() {
|
for _, r := range g.Rules() {
|
||||||
if r.LastError() != nil {
|
if r.LastError() != nil {
|
||||||
evalErrs = append(evalErrs, fmt.Errorf(" rule: %s, time: %s, err: %v",
|
evalErrs = append(evalErrs, fmt.Errorf(" rule: %s, time: %s, err: %w",
|
||||||
r.Name(), ts.Sub(time.Unix(0, 0).UTC()), r.LastError()))
|
r.Name(), ts.Sub(time.Unix(0, 0).UTC()), r.LastError()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -364,14 +368,29 @@ Outer:
|
|||||||
var gotSamples []parsedSample
|
var gotSamples []parsedSample
|
||||||
for _, s := range got {
|
for _, s := range got {
|
||||||
gotSamples = append(gotSamples, parsedSample{
|
gotSamples = append(gotSamples, parsedSample{
|
||||||
Labels: s.Metric.Copy(),
|
Labels: s.Metric.Copy(),
|
||||||
Value: s.V,
|
Value: s.F,
|
||||||
|
Histogram: promql.HistogramTestExpression(s.H),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var expSamples []parsedSample
|
var expSamples []parsedSample
|
||||||
for _, s := range testCase.ExpSamples {
|
for _, s := range testCase.ExpSamples {
|
||||||
lb, err := parser.ParseMetric(s.Labels)
|
lb, err := parser.ParseMetric(s.Labels)
|
||||||
|
var hist *histogram.FloatHistogram
|
||||||
|
if err == nil && s.Histogram != "" {
|
||||||
|
_, values, parseErr := parser.ParseSeriesDesc("{} " + s.Histogram)
|
||||||
|
switch {
|
||||||
|
case parseErr != nil:
|
||||||
|
err = parseErr
|
||||||
|
case len(values) != 1:
|
||||||
|
err = fmt.Errorf("expected 1 value, got %d", len(values))
|
||||||
|
case values[0].Histogram == nil:
|
||||||
|
err = fmt.Errorf("expected histogram, got %v", values[0])
|
||||||
|
default:
|
||||||
|
hist = values[0].Histogram
|
||||||
|
}
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("labels %q: %w", s.Labels, err)
|
err = fmt.Errorf("labels %q: %w", s.Labels, err)
|
||||||
errs = append(errs, fmt.Errorf(" expr: %q, time: %s, err: %w", testCase.Expr,
|
errs = append(errs, fmt.Errorf(" expr: %q, time: %s, err: %w", testCase.Expr,
|
||||||
@ -379,8 +398,9 @@ Outer:
|
|||||||
continue Outer
|
continue Outer
|
||||||
}
|
}
|
||||||
expSamples = append(expSamples, parsedSample{
|
expSamples = append(expSamples, parsedSample{
|
||||||
Labels: lb,
|
Labels: lb,
|
||||||
Value: s.Value,
|
Value: s.Value,
|
||||||
|
Histogram: promql.HistogramTestExpression(hist),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -452,7 +472,7 @@ func (tg *testGroup) maxEvalTime() time.Duration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, qu storage.Queryable) (promql.Vector, error) {
|
func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, qu storage.Queryable) (promql.Vector, error) {
|
||||||
q, err := engine.NewInstantQuery(qu, nil, qs, t)
|
q, err := engine.NewInstantQuery(ctx, qu, nil, qs, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -465,7 +485,8 @@ func query(ctx context.Context, qs string, t time.Time, engine *promql.Engine, q
|
|||||||
return v, nil
|
return v, nil
|
||||||
case promql.Scalar:
|
case promql.Scalar:
|
||||||
return promql.Vector{promql.Sample{
|
return promql.Vector{promql.Sample{
|
||||||
Point: promql.Point{T: v.T, V: v.V},
|
T: v.T,
|
||||||
|
F: v.V,
|
||||||
Metric: labels.Labels{},
|
Metric: labels.Labels{},
|
||||||
}}, nil
|
}}, nil
|
||||||
default:
|
default:
|
||||||
@ -547,14 +568,16 @@ type promqlTestCase struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type sample struct {
|
type sample struct {
|
||||||
Labels string `yaml:"labels"`
|
Labels string `yaml:"labels"`
|
||||||
Value float64 `yaml:"value"`
|
Value float64 `yaml:"value"`
|
||||||
|
Histogram string `yaml:"histogram"` // A non-empty string means Value is ignored.
|
||||||
}
|
}
|
||||||
|
|
||||||
// parsedSample is a sample with parsed Labels.
|
// parsedSample is a sample with parsed Labels.
|
||||||
type parsedSample struct {
|
type parsedSample struct {
|
||||||
Labels labels.Labels
|
Labels labels.Labels
|
||||||
Value float64
|
Value float64
|
||||||
|
Histogram string // TestExpression() of histogram.FloatHistogram
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsedSamplesString(pss []parsedSample) string {
|
func parsedSamplesString(pss []parsedSample) string {
|
||||||
@ -569,5 +592,8 @@ func parsedSamplesString(pss []parsedSample) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ps *parsedSample) String() string {
|
func (ps *parsedSample) String() string {
|
||||||
|
if ps.Histogram != "" {
|
||||||
|
return ps.Labels.String() + " " + ps.Histogram
|
||||||
|
}
|
||||||
return ps.Labels.String() + " " + strconv.FormatFloat(ps.Value, 'E', -1, 64)
|
return ps.Labels.String() + " " + strconv.FormatFloat(ps.Value, 'E', -1, 64)
|
||||||
}
|
}
|
||||||
|
@ -112,6 +112,16 @@ func TestRulesUnitTest(t *testing.T) {
|
|||||||
},
|
},
|
||||||
want: 0,
|
want: 0,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "No test group interval",
|
||||||
|
args: args{
|
||||||
|
files: []string{"./testdata/no-test-group-interval.yml"},
|
||||||
|
},
|
||||||
|
queryOpts: promql.LazyLoaderOpts{
|
||||||
|
EnableNegativeOffset: true,
|
||||||
|
},
|
||||||
|
want: 0,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
229
config/config.go
229
config/config.go
@ -19,6 +19,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -34,6 +35,7 @@ import (
|
|||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/relabel"
|
"github.com/prometheus/prometheus/model/relabel"
|
||||||
|
"github.com/prometheus/prometheus/storage/remote/azuread"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -142,17 +144,21 @@ var (
|
|||||||
ScrapeInterval: model.Duration(1 * time.Minute),
|
ScrapeInterval: model.Duration(1 * time.Minute),
|
||||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||||
EvaluationInterval: model.Duration(1 * time.Minute),
|
EvaluationInterval: model.Duration(1 * time.Minute),
|
||||||
|
// When native histogram feature flag is enabled, ScrapeProtocols default
|
||||||
|
// changes to DefaultNativeHistogramScrapeProtocols.
|
||||||
|
ScrapeProtocols: DefaultScrapeProtocols,
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultScrapeConfig is the default scrape configuration.
|
// DefaultScrapeConfig is the default scrape configuration.
|
||||||
DefaultScrapeConfig = ScrapeConfig{
|
DefaultScrapeConfig = ScrapeConfig{
|
||||||
// ScrapeTimeout and ScrapeInterval default to the
|
// ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals.
|
||||||
// configured globals.
|
ScrapeClassicHistograms: false,
|
||||||
MetricsPath: "/metrics",
|
MetricsPath: "/metrics",
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
HonorLabels: false,
|
HonorLabels: false,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
|
EnableCompression: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultAlertmanagerConfig is the default alertmanager configuration.
|
// DefaultAlertmanagerConfig is the default alertmanager configuration.
|
||||||
@ -173,16 +179,16 @@ var (
|
|||||||
|
|
||||||
// DefaultQueueConfig is the default remote queue configuration.
|
// DefaultQueueConfig is the default remote queue configuration.
|
||||||
DefaultQueueConfig = QueueConfig{
|
DefaultQueueConfig = QueueConfig{
|
||||||
// With a maximum of 200 shards, assuming an average of 100ms remote write
|
// With a maximum of 50 shards, assuming an average of 100ms remote write
|
||||||
// time and 500 samples per batch, we will be able to push 1M samples/s.
|
// time and 2000 samples per batch, we will be able to push 1M samples/s.
|
||||||
MaxShards: 200,
|
MaxShards: 50,
|
||||||
MinShards: 1,
|
MinShards: 1,
|
||||||
MaxSamplesPerSend: 500,
|
MaxSamplesPerSend: 2000,
|
||||||
|
|
||||||
// Each shard will have a max of 2500 samples pending in its channel, plus the pending
|
// Each shard will have a max of 10,000 samples pending in its channel, plus the pending
|
||||||
// samples that have been enqueued. Theoretically we should only ever have about 3000 samples
|
// samples that have been enqueued. Theoretically we should only ever have about 12,000 samples
|
||||||
// per shard pending. At 200 shards that's 600k.
|
// per shard pending. At 50 shards that's 600k.
|
||||||
Capacity: 2500,
|
Capacity: 10000,
|
||||||
BatchSendDeadline: model.Duration(5 * time.Second),
|
BatchSendDeadline: model.Duration(5 * time.Second),
|
||||||
|
|
||||||
// Backoff times for retrying a batch of samples on recoverable errors.
|
// Backoff times for retrying a batch of samples on recoverable errors.
|
||||||
@ -194,7 +200,7 @@ var (
|
|||||||
DefaultMetadataConfig = MetadataConfig{
|
DefaultMetadataConfig = MetadataConfig{
|
||||||
Send: true,
|
Send: true,
|
||||||
SendInterval: model.Duration(1 * time.Minute),
|
SendInterval: model.Duration(1 * time.Minute),
|
||||||
MaxSamplesPerSend: 500,
|
MaxSamplesPerSend: 2000,
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultRemoteReadConfig is the default remote read configuration.
|
// DefaultRemoteReadConfig is the default remote read configuration.
|
||||||
@ -258,7 +264,7 @@ func (c Config) String() string {
|
|||||||
return string(b)
|
return string(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ScrapeConfigs returns the scrape configurations.
|
// GetScrapeConfigs returns the scrape configurations.
|
||||||
func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
||||||
scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs))
|
scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs))
|
||||||
|
|
||||||
@ -266,7 +272,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
|||||||
for i, scfg := range c.ScrapeConfigs {
|
for i, scfg := range c.ScrapeConfigs {
|
||||||
// We do these checks for library users that would not call Validate in
|
// We do these checks for library users that would not call Validate in
|
||||||
// Unmarshal.
|
// Unmarshal.
|
||||||
if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil {
|
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -293,7 +299,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
|||||||
return nil, fileErr(filename, err)
|
return nil, fileErr(filename, err)
|
||||||
}
|
}
|
||||||
for _, scfg := range cfg.ScrapeConfigs {
|
for _, scfg := range cfg.ScrapeConfigs {
|
||||||
if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil {
|
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||||
return nil, fileErr(filename, err)
|
return nil, fileErr(filename, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,7 +348,7 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||||||
// Do global overrides and validate unique names.
|
// Do global overrides and validate unique names.
|
||||||
jobNames := map[string]struct{}{}
|
jobNames := map[string]struct{}{}
|
||||||
for _, scfg := range c.ScrapeConfigs {
|
for _, scfg := range c.ScrapeConfigs {
|
||||||
if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil {
|
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -383,12 +389,100 @@ type GlobalConfig struct {
|
|||||||
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
|
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
|
||||||
// The default timeout when scraping targets.
|
// The default timeout when scraping targets.
|
||||||
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
|
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
|
||||||
|
// The protocols to negotiate during a scrape. It tells clients what
|
||||||
|
// protocol are accepted by Prometheus and with what weight (most wanted is first).
|
||||||
|
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||||
|
// OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||||
|
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
||||||
// How frequently to evaluate rules by default.
|
// How frequently to evaluate rules by default.
|
||||||
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
|
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
|
||||||
// File to which PromQL queries are logged.
|
// File to which PromQL queries are logged.
|
||||||
QueryLogFile string `yaml:"query_log_file,omitempty"`
|
QueryLogFile string `yaml:"query_log_file,omitempty"`
|
||||||
// The labels to add to any timeseries that this Prometheus instance scrapes.
|
// The labels to add to any timeseries that this Prometheus instance scrapes.
|
||||||
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
||||||
|
// An uncompressed response body larger than this many bytes will cause the
|
||||||
|
// scrape to fail. 0 means no limit.
|
||||||
|
BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`
|
||||||
|
// More than this many samples post metric-relabeling will cause the scrape to
|
||||||
|
// fail. 0 means no limit.
|
||||||
|
SampleLimit uint `yaml:"sample_limit,omitempty"`
|
||||||
|
// More than this many targets after the target relabeling will cause the
|
||||||
|
// scrapes to fail. 0 means no limit.
|
||||||
|
TargetLimit uint `yaml:"target_limit,omitempty"`
|
||||||
|
// More than this many labels post metric-relabeling will cause the scrape to
|
||||||
|
// fail. 0 means no limit.
|
||||||
|
LabelLimit uint `yaml:"label_limit,omitempty"`
|
||||||
|
// More than this label name length post metric-relabeling will cause the
|
||||||
|
// scrape to fail. 0 means no limit.
|
||||||
|
LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
|
||||||
|
// More than this label value length post metric-relabeling will cause the
|
||||||
|
// scrape to fail. 0 means no limit.
|
||||||
|
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
|
||||||
|
// Keep no more than this many dropped targets per job.
|
||||||
|
// 0 means no limit.
|
||||||
|
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScrapeProtocol represents supported protocol for scraping metrics.
|
||||||
|
type ScrapeProtocol string
|
||||||
|
|
||||||
|
// Validate returns error if given scrape protocol is not supported.
|
||||||
|
func (s ScrapeProtocol) Validate() error {
|
||||||
|
if _, ok := ScrapeProtocolsHeaders[s]; !ok {
|
||||||
|
return fmt.Errorf("unknown scrape protocol %v, supported: %v",
|
||||||
|
s, func() (ret []string) {
|
||||||
|
for k := range ScrapeProtocolsHeaders {
|
||||||
|
ret = append(ret, string(k))
|
||||||
|
}
|
||||||
|
sort.Strings(ret)
|
||||||
|
return ret
|
||||||
|
}())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
PrometheusProto ScrapeProtocol = "PrometheusProto"
|
||||||
|
PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4"
|
||||||
|
OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1"
|
||||||
|
OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0"
|
||||||
|
|
||||||
|
ScrapeProtocolsHeaders = map[ScrapeProtocol]string{
|
||||||
|
PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
||||||
|
PrometheusText0_0_4: "text/plain;version=0.0.4",
|
||||||
|
OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1",
|
||||||
|
OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0",
|
||||||
|
}
|
||||||
|
|
||||||
|
DefaultScrapeProtocols = []ScrapeProtocol{
|
||||||
|
OpenMetricsText1_0_0,
|
||||||
|
OpenMetricsText0_0_1,
|
||||||
|
PrometheusText0_0_4,
|
||||||
|
}
|
||||||
|
DefaultNativeHistogramScrapeProtocols = []ScrapeProtocol{
|
||||||
|
PrometheusProto,
|
||||||
|
OpenMetricsText1_0_0,
|
||||||
|
OpenMetricsText0_0_1,
|
||||||
|
PrometheusText0_0_4,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// validateAcceptScrapeProtocols return errors if we see problems with accept scrape protocols option.
|
||||||
|
func validateAcceptScrapeProtocols(sps []ScrapeProtocol) error {
|
||||||
|
if len(sps) == 0 {
|
||||||
|
return errors.New("scrape_protocols cannot be empty")
|
||||||
|
}
|
||||||
|
dups := map[string]struct{}{}
|
||||||
|
for _, sp := range sps {
|
||||||
|
if _, ok := dups[strings.ToLower(string(sp))]; ok {
|
||||||
|
return fmt.Errorf("duplicated protocol in scrape_protocols, got %v", sps)
|
||||||
|
}
|
||||||
|
if err := sp.Validate(); err != nil {
|
||||||
|
return fmt.Errorf("scrape_protocols: %w", err)
|
||||||
|
}
|
||||||
|
dups[strings.ToLower(string(sp))] = struct{}{}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
@ -436,6 +530,14 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||||||
if gc.EvaluationInterval == 0 {
|
if gc.EvaluationInterval == 0 {
|
||||||
gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval
|
gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if gc.ScrapeProtocols == nil {
|
||||||
|
gc.ScrapeProtocols = DefaultGlobalConfig.ScrapeProtocols
|
||||||
|
}
|
||||||
|
if err := validateAcceptScrapeProtocols(gc.ScrapeProtocols); err != nil {
|
||||||
|
return fmt.Errorf("%w for global config", err)
|
||||||
|
}
|
||||||
|
|
||||||
*c = *gc
|
*c = *gc
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -446,7 +548,8 @@ func (c *GlobalConfig) isZero() bool {
|
|||||||
c.ScrapeInterval == 0 &&
|
c.ScrapeInterval == 0 &&
|
||||||
c.ScrapeTimeout == 0 &&
|
c.ScrapeTimeout == 0 &&
|
||||||
c.EvaluationInterval == 0 &&
|
c.EvaluationInterval == 0 &&
|
||||||
c.QueryLogFile == ""
|
c.QueryLogFile == "" &&
|
||||||
|
c.ScrapeProtocols == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type ScrapeConfigs struct {
|
type ScrapeConfigs struct {
|
||||||
@ -461,34 +564,51 @@ type ScrapeConfig struct {
|
|||||||
HonorLabels bool `yaml:"honor_labels,omitempty"`
|
HonorLabels bool `yaml:"honor_labels,omitempty"`
|
||||||
// Indicator whether the scraped timestamps should be respected.
|
// Indicator whether the scraped timestamps should be respected.
|
||||||
HonorTimestamps bool `yaml:"honor_timestamps"`
|
HonorTimestamps bool `yaml:"honor_timestamps"`
|
||||||
|
// Indicator whether to track the staleness of the scraped timestamps.
|
||||||
|
TrackTimestampsStaleness bool `yaml:"track_timestamps_staleness"`
|
||||||
// A set of query parameters with which the target is scraped.
|
// A set of query parameters with which the target is scraped.
|
||||||
Params url.Values `yaml:"params,omitempty"`
|
Params url.Values `yaml:"params,omitempty"`
|
||||||
// How frequently to scrape the targets of this scrape config.
|
// How frequently to scrape the targets of this scrape config.
|
||||||
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
|
ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"`
|
||||||
// The timeout for scraping targets of this config.
|
// The timeout for scraping targets of this config.
|
||||||
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
|
ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"`
|
||||||
|
// The protocols to negotiate during a scrape. It tells clients what
|
||||||
|
// protocol are accepted by Prometheus and with what preference (most wanted is first).
|
||||||
|
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||||
|
// OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||||
|
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
||||||
|
// Whether to scrape a classic histogram that is also exposed as a native histogram.
|
||||||
|
ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"`
|
||||||
// The HTTP resource path on which to fetch metrics from targets.
|
// The HTTP resource path on which to fetch metrics from targets.
|
||||||
MetricsPath string `yaml:"metrics_path,omitempty"`
|
MetricsPath string `yaml:"metrics_path,omitempty"`
|
||||||
// The URL scheme with which to fetch metrics from targets.
|
// The URL scheme with which to fetch metrics from targets.
|
||||||
Scheme string `yaml:"scheme,omitempty"`
|
Scheme string `yaml:"scheme,omitempty"`
|
||||||
|
// Indicator whether to request compressed response from the target.
|
||||||
|
EnableCompression bool `yaml:"enable_compression"`
|
||||||
// An uncompressed response body larger than this many bytes will cause the
|
// An uncompressed response body larger than this many bytes will cause the
|
||||||
// scrape to fail. 0 means no limit.
|
// scrape to fail. 0 means no limit.
|
||||||
BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`
|
BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"`
|
||||||
// More than this many samples post metric-relabeling will cause the scrape to
|
// More than this many samples post metric-relabeling will cause the scrape to
|
||||||
// fail.
|
// fail. 0 means no limit.
|
||||||
SampleLimit uint `yaml:"sample_limit,omitempty"`
|
SampleLimit uint `yaml:"sample_limit,omitempty"`
|
||||||
// More than this many targets after the target relabeling will cause the
|
// More than this many targets after the target relabeling will cause the
|
||||||
// scrapes to fail.
|
// scrapes to fail. 0 means no limit.
|
||||||
TargetLimit uint `yaml:"target_limit,omitempty"`
|
TargetLimit uint `yaml:"target_limit,omitempty"`
|
||||||
// More than this many labels post metric-relabeling will cause the scrape to
|
// More than this many labels post metric-relabeling will cause the scrape to
|
||||||
// fail.
|
// fail. 0 means no limit.
|
||||||
LabelLimit uint `yaml:"label_limit,omitempty"`
|
LabelLimit uint `yaml:"label_limit,omitempty"`
|
||||||
// More than this label name length post metric-relabeling will cause the
|
// More than this label name length post metric-relabeling will cause the
|
||||||
// scrape to fail.
|
// scrape to fail. 0 means no limit.
|
||||||
LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
|
LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"`
|
||||||
// More than this label value length post metric-relabeling will cause the
|
// More than this label value length post metric-relabeling will cause the
|
||||||
// scrape to fail.
|
// scrape to fail. 0 means no limit.
|
||||||
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
|
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
|
||||||
|
// More than this many buckets in a native histogram will cause the scrape to
|
||||||
|
// fail.
|
||||||
|
NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"`
|
||||||
|
// Keep no more than this many dropped targets per job.
|
||||||
|
// 0 means no limit.
|
||||||
|
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
|
||||||
|
|
||||||
// We cannot do proper Go type embedding below as the parser will then parse
|
// We cannot do proper Go type embedding below as the parser will then parse
|
||||||
// values arbitrarily into the overflow maps of further-down types.
|
// values arbitrarily into the overflow maps of further-down types.
|
||||||
@ -546,25 +666,55 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ScrapeConfig) Validate(defaultInterval, defaultTimeout model.Duration) error {
|
// Validate validates scrape config, but also fills relevant default values from global config if needed.
|
||||||
|
func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return errors.New("empty or null scrape config section")
|
return errors.New("empty or null scrape config section")
|
||||||
}
|
}
|
||||||
// First set the correct scrape interval, then check that the timeout
|
// First set the correct scrape interval, then check that the timeout
|
||||||
// (inferred or explicit) is not greater than that.
|
// (inferred or explicit) is not greater than that.
|
||||||
if c.ScrapeInterval == 0 {
|
if c.ScrapeInterval == 0 {
|
||||||
c.ScrapeInterval = defaultInterval
|
c.ScrapeInterval = globalConfig.ScrapeInterval
|
||||||
}
|
}
|
||||||
if c.ScrapeTimeout > c.ScrapeInterval {
|
if c.ScrapeTimeout > c.ScrapeInterval {
|
||||||
return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName)
|
return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName)
|
||||||
}
|
}
|
||||||
if c.ScrapeTimeout == 0 {
|
if c.ScrapeTimeout == 0 {
|
||||||
if defaultTimeout > c.ScrapeInterval {
|
if globalConfig.ScrapeTimeout > c.ScrapeInterval {
|
||||||
c.ScrapeTimeout = c.ScrapeInterval
|
c.ScrapeTimeout = c.ScrapeInterval
|
||||||
} else {
|
} else {
|
||||||
c.ScrapeTimeout = defaultTimeout
|
c.ScrapeTimeout = globalConfig.ScrapeTimeout
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if c.BodySizeLimit == 0 {
|
||||||
|
c.BodySizeLimit = globalConfig.BodySizeLimit
|
||||||
|
}
|
||||||
|
if c.SampleLimit == 0 {
|
||||||
|
c.SampleLimit = globalConfig.SampleLimit
|
||||||
|
}
|
||||||
|
if c.TargetLimit == 0 {
|
||||||
|
c.TargetLimit = globalConfig.TargetLimit
|
||||||
|
}
|
||||||
|
if c.LabelLimit == 0 {
|
||||||
|
c.LabelLimit = globalConfig.LabelLimit
|
||||||
|
}
|
||||||
|
if c.LabelNameLengthLimit == 0 {
|
||||||
|
c.LabelNameLengthLimit = globalConfig.LabelNameLengthLimit
|
||||||
|
}
|
||||||
|
if c.LabelValueLengthLimit == 0 {
|
||||||
|
c.LabelValueLengthLimit = globalConfig.LabelValueLengthLimit
|
||||||
|
}
|
||||||
|
if c.KeepDroppedTargets == 0 {
|
||||||
|
c.KeepDroppedTargets = globalConfig.KeepDroppedTargets
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.ScrapeProtocols == nil {
|
||||||
|
c.ScrapeProtocols = globalConfig.ScrapeProtocols
|
||||||
|
}
|
||||||
|
if err := validateAcceptScrapeProtocols(c.ScrapeProtocols); err != nil {
|
||||||
|
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -766,6 +916,7 @@ type AlertmanagerConfig struct {
|
|||||||
|
|
||||||
ServiceDiscoveryConfigs discovery.Configs `yaml:"-"`
|
ServiceDiscoveryConfigs discovery.Configs `yaml:"-"`
|
||||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||||
|
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
|
||||||
|
|
||||||
// The URL scheme to use when talking to Alertmanagers.
|
// The URL scheme to use when talking to Alertmanagers.
|
||||||
Scheme string `yaml:"scheme,omitempty"`
|
Scheme string `yaml:"scheme,omitempty"`
|
||||||
@ -801,6 +952,13 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
|
||||||
|
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
|
||||||
|
|
||||||
|
if httpClientConfigAuthEnabled && c.SigV4Config != nil {
|
||||||
|
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
|
||||||
|
}
|
||||||
|
|
||||||
// Check for users putting URLs in target groups.
|
// Check for users putting URLs in target groups.
|
||||||
if len(c.RelabelConfigs) == 0 {
|
if len(c.RelabelConfigs) == 0 {
|
||||||
if err := checkStaticTargets(c.ServiceDiscoveryConfigs); err != nil {
|
if err := checkStaticTargets(c.ServiceDiscoveryConfigs); err != nil {
|
||||||
@ -864,6 +1022,7 @@ type RemoteWriteConfig struct {
|
|||||||
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
|
QueueConfig QueueConfig `yaml:"queue_config,omitempty"`
|
||||||
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
|
MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"`
|
||||||
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
|
SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"`
|
||||||
|
AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
@ -900,8 +1059,12 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
|||||||
httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
|
httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil ||
|
||||||
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
|
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
|
||||||
|
|
||||||
if httpClientConfigAuthEnabled && c.SigV4Config != nil {
|
if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) {
|
||||||
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
|
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.SigV4Config != nil && c.AzureADConfig != nil {
|
||||||
|
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -922,7 +1085,7 @@ func validateHeadersForTracing(headers map[string]string) error {
|
|||||||
func validateHeaders(headers map[string]string) error {
|
func validateHeaders(headers map[string]string) error {
|
||||||
for header := range headers {
|
for header := range headers {
|
||||||
if strings.ToLower(header) == "authorization" {
|
if strings.ToLower(header) == "authorization" {
|
||||||
return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter")
|
return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter")
|
||||||
}
|
}
|
||||||
if _, ok := reservedHeaders[strings.ToLower(header)]; ok {
|
if _, ok := reservedHeaders[strings.ToLower(header)]; ok {
|
||||||
return fmt.Errorf("%s is a reserved header. It must not be changed", header)
|
return fmt.Errorf("%s is a reserved header. It must not be changed", header)
|
||||||
|
@ -68,6 +68,15 @@ func mustParseURL(u string) *config.URL {
|
|||||||
return &config.URL{URL: parsed}
|
return &config.URL{URL: parsed}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
globBodySizeLimit = 15 * units.MiB
|
||||||
|
globSampleLimit = 1500
|
||||||
|
globTargetLimit = 30
|
||||||
|
globLabelLimit = 30
|
||||||
|
globLabelNameLengthLimit = 200
|
||||||
|
globLabelValueLengthLimit = 200
|
||||||
|
)
|
||||||
|
|
||||||
var expectedConf = &Config{
|
var expectedConf = &Config{
|
||||||
GlobalConfig: GlobalConfig{
|
GlobalConfig: GlobalConfig{
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
@ -76,6 +85,14 @@ var expectedConf = &Config{
|
|||||||
QueryLogFile: "",
|
QueryLogFile: "",
|
||||||
|
|
||||||
ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"),
|
ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"),
|
||||||
|
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
},
|
},
|
||||||
|
|
||||||
RuleFiles: []string{
|
RuleFiles: []string{
|
||||||
@ -165,10 +182,18 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "prometheus",
|
JobName: "prometheus",
|
||||||
|
|
||||||
HonorLabels: true,
|
HonorLabels: true,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -261,11 +286,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-x",
|
JobName: "service-x",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(50 * time.Second),
|
ScrapeInterval: model.Duration(50 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(5 * time.Second),
|
ScrapeTimeout: model.Duration(5 * time.Second),
|
||||||
BodySizeLimit: 10 * units.MiB,
|
EnableCompression: true,
|
||||||
SampleLimit: 1000,
|
BodySizeLimit: 10 * units.MiB,
|
||||||
|
SampleLimit: 1000,
|
||||||
|
TargetLimit: 35,
|
||||||
|
LabelLimit: 35,
|
||||||
|
LabelNameLengthLimit: 210,
|
||||||
|
LabelValueLengthLimit: 210,
|
||||||
|
ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4},
|
||||||
|
|
||||||
HTTPClientConfig: config.HTTPClientConfig{
|
HTTPClientConfig: config.HTTPClientConfig{
|
||||||
BasicAuth: &config.BasicAuth{
|
BasicAuth: &config.BasicAuth{
|
||||||
@ -352,9 +383,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-y",
|
JobName: "service-y",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -363,6 +402,7 @@ var expectedConf = &Config{
|
|||||||
ServiceDiscoveryConfigs: discovery.Configs{
|
ServiceDiscoveryConfigs: discovery.Configs{
|
||||||
&consul.SDConfig{
|
&consul.SDConfig{
|
||||||
Server: "localhost:1234",
|
Server: "localhost:1234",
|
||||||
|
PathPrefix: "/consul",
|
||||||
Token: "mysecret",
|
Token: "mysecret",
|
||||||
Services: []string{"nginx", "cache", "mysql"},
|
Services: []string{"nginx", "cache", "mysql"},
|
||||||
ServiceTags: []string{"canary", "v1"},
|
ServiceTags: []string{"canary", "v1"},
|
||||||
@ -398,9 +438,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-z",
|
JobName: "service-z",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: model.Duration(10 * time.Second),
|
ScrapeTimeout: model.Duration(10 * time.Second),
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: "/metrics",
|
MetricsPath: "/metrics",
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
@ -423,9 +471,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-kubernetes",
|
JobName: "service-kubernetes",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -454,9 +510,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-kubernetes-namespaces",
|
JobName: "service-kubernetes-namespaces",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -485,9 +549,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-kuma",
|
JobName: "service-kuma",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -505,9 +577,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-marathon",
|
JobName: "service-marathon",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -534,9 +614,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-nomad",
|
JobName: "service-nomad",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -560,9 +648,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-ec2",
|
JobName: "service-ec2",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -593,9 +689,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-lightsail",
|
JobName: "service-lightsail",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -616,9 +720,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-azure",
|
JobName: "service-azure",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -642,9 +754,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-nerve",
|
JobName: "service-nerve",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -661,9 +781,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "0123service-xxx",
|
JobName: "0123service-xxx",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -683,9 +811,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "badfederation",
|
JobName: "badfederation",
|
||||||
|
|
||||||
HonorTimestamps: false,
|
HonorTimestamps: false,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: "/federate",
|
MetricsPath: "/federate",
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -705,9 +841,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "測試",
|
JobName: "測試",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -727,9 +871,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "httpsd",
|
JobName: "httpsd",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -746,9 +898,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-triton",
|
JobName: "service-triton",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -773,9 +933,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "digitalocean-droplets",
|
JobName: "digitalocean-droplets",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -799,9 +967,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "docker",
|
JobName: "docker",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -821,9 +997,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "dockerswarm",
|
JobName: "dockerswarm",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -843,9 +1027,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-openstack",
|
JobName: "service-openstack",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -869,9 +1061,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-puppetdb",
|
JobName: "service-puppetdb",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -897,10 +1097,18 @@ var expectedConf = &Config{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
JobName: "hetzner",
|
JobName: "hetzner",
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -946,9 +1154,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "service-eureka",
|
JobName: "service-eureka",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -965,9 +1181,18 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "ovhcloud",
|
JobName: "ovhcloud",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -994,9 +1219,18 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "scaleway",
|
JobName: "scaleway",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -1029,9 +1263,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "linode-instances",
|
JobName: "linode-instances",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -1056,9 +1298,18 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "uyuni",
|
JobName: "uyuni",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -1075,10 +1326,18 @@ var expectedConf = &Config{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
JobName: "ionos",
|
JobName: "ionos",
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -1100,9 +1359,17 @@ var expectedConf = &Config{
|
|||||||
{
|
{
|
||||||
JobName: "vultr",
|
JobName: "vultr",
|
||||||
|
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
EnableCompression: true,
|
||||||
|
BodySizeLimit: globBodySizeLimit,
|
||||||
|
SampleLimit: globSampleLimit,
|
||||||
|
TargetLimit: globTargetLimit,
|
||||||
|
LabelLimit: globLabelLimit,
|
||||||
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
@ -1523,7 +1790,7 @@ var expectedErrors = []struct {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "remote_write_authorization_header.bad.yml",
|
filename: "remote_write_authorization_header.bad.yml",
|
||||||
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`,
|
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "remote_write_url_missing.bad.yml",
|
filename: "remote_write_url_missing.bad.yml",
|
||||||
@ -1541,6 +1808,14 @@ var expectedErrors = []struct {
|
|||||||
filename: "ec2_filters_empty_values.bad.yml",
|
filename: "ec2_filters_empty_values.bad.yml",
|
||||||
errMsg: `EC2 SD configuration filter values cannot be empty`,
|
errMsg: `EC2 SD configuration filter values cannot be empty`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
filename: "ec2_token_file.bad.yml",
|
||||||
|
errMsg: `at most one of bearer_token & bearer_token_file must be configured`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
filename: "lightsail_token_file.bad.yml",
|
||||||
|
errMsg: `at most one of bearer_token & bearer_token_file must be configured`,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
filename: "section_key_dup.bad.yml",
|
filename: "section_key_dup.bad.yml",
|
||||||
errMsg: "field scrape_configs already set in type config.plain",
|
errMsg: "field scrape_configs already set in type config.plain",
|
||||||
@ -1565,6 +1840,10 @@ var expectedErrors = []struct {
|
|||||||
filename: "azure_authentication_method.bad.yml",
|
filename: "azure_authentication_method.bad.yml",
|
||||||
errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\" or \"ManagedIdentity\"",
|
errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\" or \"ManagedIdentity\"",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
filename: "azure_bearertoken_basicauth.bad.yml",
|
||||||
|
errMsg: "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
filename: "empty_scrape_config.bad.yml",
|
filename: "empty_scrape_config.bad.yml",
|
||||||
errMsg: "empty or null scrape config section",
|
errMsg: "empty or null scrape config section",
|
||||||
@ -1617,6 +1896,10 @@ var expectedErrors = []struct {
|
|||||||
filename: "puppetdb_no_scheme.bad.yml",
|
filename: "puppetdb_no_scheme.bad.yml",
|
||||||
errMsg: "URL scheme must be 'http' or 'https'",
|
errMsg: "URL scheme must be 'http' or 'https'",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
filename: "puppetdb_token_file.bad.yml",
|
||||||
|
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
filename: "hetzner_role.bad.yml",
|
filename: "hetzner_role.bad.yml",
|
||||||
errMsg: "unknown role",
|
errMsg: "unknown role",
|
||||||
@ -1653,6 +1936,10 @@ var expectedErrors = []struct {
|
|||||||
filename: "http_url_no_host.bad.yml",
|
filename: "http_url_no_host.bad.yml",
|
||||||
errMsg: "host is missing in URL",
|
errMsg: "host is missing in URL",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
filename: "http_token_file.bad.yml",
|
||||||
|
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
filename: "http_url_bad_scheme.bad.yml",
|
filename: "http_url_bad_scheme.bad.yml",
|
||||||
errMsg: "URL scheme must be 'http' or 'https'",
|
errMsg: "URL scheme must be 'http' or 'https'",
|
||||||
@ -1681,6 +1968,10 @@ var expectedErrors = []struct {
|
|||||||
filename: "uyuni_no_server.bad.yml",
|
filename: "uyuni_no_server.bad.yml",
|
||||||
errMsg: "Uyuni SD configuration requires server host",
|
errMsg: "Uyuni SD configuration requires server host",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
filename: "uyuni_token_file.bad.yml",
|
||||||
|
errMsg: "at most one of bearer_token & bearer_token_file must be configured",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
filename: "ionos_datacenter.bad.yml",
|
filename: "ionos_datacenter.bad.yml",
|
||||||
errMsg: "datacenter id can't be empty",
|
errMsg: "datacenter id can't be empty",
|
||||||
@ -1697,6 +1988,14 @@ var expectedErrors = []struct {
|
|||||||
filename: "scrape_config_files_glob.bad.yml",
|
filename: "scrape_config_files_glob.bad.yml",
|
||||||
errMsg: `parsing YAML file testdata/scrape_config_files_glob.bad.yml: invalid scrape config file path "scrape_configs/*/*"`,
|
errMsg: `parsing YAML file testdata/scrape_config_files_glob.bad.yml: invalid scrape config file path "scrape_configs/*/*"`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
filename: "scrape_config_files_scrape_protocols.bad.yml",
|
||||||
|
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4] for scrape config with job name "node"`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
filename: "scrape_config_files_scrape_protocols2.bad.yml",
|
||||||
|
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols2.bad.yml: duplicated protocol in scrape_protocols, got [OpenMetricsText1.0.0 PrometheusProto OpenMetricsText1.0.0] for scrape config with job name "node"`,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBadConfigs(t *testing.T) {
|
func TestBadConfigs(t *testing.T) {
|
||||||
@ -1786,13 +2085,16 @@ func TestEmptyGlobalBlock(t *testing.T) {
|
|||||||
func TestGetScrapeConfigs(t *testing.T) {
|
func TestGetScrapeConfigs(t *testing.T) {
|
||||||
sc := func(jobName string, scrapeInterval, scrapeTimeout model.Duration) *ScrapeConfig {
|
sc := func(jobName string, scrapeInterval, scrapeTimeout model.Duration) *ScrapeConfig {
|
||||||
return &ScrapeConfig{
|
return &ScrapeConfig{
|
||||||
JobName: jobName,
|
JobName: jobName,
|
||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: scrapeInterval,
|
ScrapeInterval: scrapeInterval,
|
||||||
ScrapeTimeout: scrapeTimeout,
|
ScrapeTimeout: scrapeTimeout,
|
||||||
MetricsPath: "/metrics",
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
Scheme: "http",
|
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
MetricsPath: "/metrics",
|
||||||
|
Scheme: "http",
|
||||||
|
EnableCompression: true,
|
||||||
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
ServiceDiscoveryConfigs: discovery.Configs{
|
ServiceDiscoveryConfigs: discovery.Configs{
|
||||||
discovery.StaticConfig{
|
discovery.StaticConfig{
|
||||||
{
|
{
|
||||||
@ -1843,10 +2145,13 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(60 * time.Second),
|
ScrapeInterval: model.Duration(60 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
|
||||||
|
EnableCompression: true,
|
||||||
|
|
||||||
HTTPClientConfig: config.HTTPClientConfig{
|
HTTPClientConfig: config.HTTPClientConfig{
|
||||||
TLSConfig: config.TLSConfig{
|
TLSConfig: config.TLSConfig{
|
||||||
CertFile: filepath.FromSlash("testdata/scrape_configs/valid_cert_file"),
|
CertFile: filepath.FromSlash("testdata/scrape_configs/valid_cert_file"),
|
||||||
@ -1873,6 +2178,8 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||||||
HonorTimestamps: true,
|
HonorTimestamps: true,
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
|
||||||
HTTPClientConfig: config.HTTPClientConfig{
|
HTTPClientConfig: config.HTTPClientConfig{
|
||||||
TLSConfig: config.TLSConfig{
|
TLSConfig: config.TLSConfig{
|
||||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||||
@ -1885,6 +2192,8 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
|
||||||
|
EnableCompression: true,
|
||||||
|
|
||||||
ServiceDiscoveryConfigs: discovery.Configs{
|
ServiceDiscoveryConfigs: discovery.Configs{
|
||||||
&vultr.SDConfig{
|
&vultr.SDConfig{
|
||||||
HTTPClientConfig: config.HTTPClientConfig{
|
HTTPClientConfig: config.HTTPClientConfig{
|
||||||
@ -1937,3 +2246,16 @@ func kubernetesSDHostURL() config.URL {
|
|||||||
tURL, _ := url.Parse("https://localhost:1234")
|
tURL, _ := url.Parse("https://localhost:1234")
|
||||||
return config.URL{URL: tURL}
|
return config.URL{URL: tURL}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestScrapeConfigDisableCompression(t *testing.T) {
|
||||||
|
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, log.NewNopLogger())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
out, err := yaml.Marshal(want)
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
got := &Config{}
|
||||||
|
require.NoError(t, yaml.UnmarshalStrict(out, got))
|
||||||
|
|
||||||
|
require.Equal(t, false, got.ScrapeConfigs[0].EnableCompression)
|
||||||
|
}
|
||||||
|
11
config/testdata/azure_bearertoken_basicauth.bad.yml
vendored
Normal file
11
config/testdata/azure_bearertoken_basicauth.bad.yml
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
azure_sd_configs:
|
||||||
|
- subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11
|
||||||
|
tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2
|
||||||
|
client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C
|
||||||
|
client_secret: mysecret
|
||||||
|
bearer_token: 1234
|
||||||
|
basic_auth:
|
||||||
|
username: user
|
||||||
|
password: password
|
12
config/testdata/conf.good.yml
vendored
12
config/testdata/conf.good.yml
vendored
@ -2,6 +2,12 @@
|
|||||||
global:
|
global:
|
||||||
scrape_interval: 15s
|
scrape_interval: 15s
|
||||||
evaluation_interval: 30s
|
evaluation_interval: 30s
|
||||||
|
body_size_limit: 15MB
|
||||||
|
sample_limit: 1500
|
||||||
|
target_limit: 30
|
||||||
|
label_limit: 30
|
||||||
|
label_name_length_limit: 200
|
||||||
|
label_value_length_limit: 200
|
||||||
# scrape_timeout is set to the global default (10s).
|
# scrape_timeout is set to the global default (10s).
|
||||||
|
|
||||||
external_labels:
|
external_labels:
|
||||||
@ -108,9 +114,14 @@ scrape_configs:
|
|||||||
|
|
||||||
scrape_interval: 50s
|
scrape_interval: 50s
|
||||||
scrape_timeout: 5s
|
scrape_timeout: 5s
|
||||||
|
scrape_protocols: ["PrometheusText0.0.4"]
|
||||||
|
|
||||||
body_size_limit: 10MB
|
body_size_limit: 10MB
|
||||||
sample_limit: 1000
|
sample_limit: 1000
|
||||||
|
target_limit: 35
|
||||||
|
label_limit: 35
|
||||||
|
label_name_length_limit: 210
|
||||||
|
label_value_length_limit: 210
|
||||||
|
|
||||||
metrics_path: /my_path
|
metrics_path: /my_path
|
||||||
scheme: https
|
scheme: https
|
||||||
@ -151,6 +162,7 @@ scrape_configs:
|
|||||||
consul_sd_configs:
|
consul_sd_configs:
|
||||||
- server: "localhost:1234"
|
- server: "localhost:1234"
|
||||||
token: mysecret
|
token: mysecret
|
||||||
|
path_prefix: /consul
|
||||||
services: ["nginx", "cache", "mysql"]
|
services: ["nginx", "cache", "mysql"]
|
||||||
tags: ["canary", "v1"]
|
tags: ["canary", "v1"]
|
||||||
node_meta:
|
node_meta:
|
||||||
|
6
config/testdata/ec2_token_file.bad.yml
vendored
Normal file
6
config/testdata/ec2_token_file.bad.yml
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
scrape_configs:
|
||||||
|
- job_name: foo
|
||||||
|
ec2_sd_configs:
|
||||||
|
- region: us-east-1
|
||||||
|
bearer_token: foo
|
||||||
|
bearer_token_file: foo
|
6
config/testdata/http_token_file.bad.yml
vendored
Normal file
6
config/testdata/http_token_file.bad.yml
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
scrape_configs:
|
||||||
|
- job_name: foo
|
||||||
|
http_sd_configs:
|
||||||
|
- url: http://foo
|
||||||
|
bearer_token: foo
|
||||||
|
bearer_token_file: foo
|
6
config/testdata/lightsail_token_file.bad.yml
vendored
Normal file
6
config/testdata/lightsail_token_file.bad.yml
vendored
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
scrape_configs:
|
||||||
|
- job_name: foo
|
||||||
|
lightsail_sd_configs:
|
||||||
|
- region: us-east-1
|
||||||
|
bearer_token: foo
|
||||||
|
bearer_token_file: foo
|
7
config/testdata/puppetdb_token_file.bad.yml
vendored
Normal file
7
config/testdata/puppetdb_token_file.bad.yml
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
scrape_configs:
|
||||||
|
- job_name: puppetdb
|
||||||
|
puppetdb_sd_configs:
|
||||||
|
- url: http://puppet
|
||||||
|
query: 'resources { type = "Package" and title = "httpd" }'
|
||||||
|
bearer_token: foo
|
||||||
|
bearer_token_file: foo
|
5
config/testdata/scrape_config_disable_compression.good.yml
vendored
Normal file
5
config/testdata/scrape_config_disable_compression.good.yml
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
scrape_configs:
|
||||||
|
- job_name: prometheus
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8080']
|
||||||
|
enable_compression: false
|
5
config/testdata/scrape_config_files_scrape_protocols.bad.yml
vendored
Normal file
5
config/testdata/scrape_config_files_scrape_protocols.bad.yml
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
scrape_configs:
|
||||||
|
- job_name: node
|
||||||
|
scrape_protocols: ["prometheusproto"]
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8080']
|
5
config/testdata/scrape_config_files_scrape_protocols2.bad.yml
vendored
Normal file
5
config/testdata/scrape_config_files_scrape_protocols2.bad.yml
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
scrape_configs:
|
||||||
|
- job_name: node
|
||||||
|
scrape_protocols: ["OpenMetricsText1.0.0", "PrometheusProto", "OpenMetricsText1.0.0"]
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8080']
|
8
config/testdata/uyuni_token_file.bad.yml
vendored
Normal file
8
config/testdata/uyuni_token_file.bad.yml
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
scrape_configs:
|
||||||
|
- job_name: uyuni
|
||||||
|
uyuni_sd_configs:
|
||||||
|
- server: "server"
|
||||||
|
username: "username"
|
||||||
|
password: "password"
|
||||||
|
bearer_token: foo
|
||||||
|
bearer_token_file: foo
|
@ -129,7 +129,7 @@ func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||||||
return errors.New("EC2 SD configuration filter values cannot be empty")
|
return errors.New("EC2 SD configuration filter values cannot be empty")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return c.HTTPClientConfig.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// EC2Discovery periodically performs EC2-SD requests. It implements
|
// EC2Discovery periodically performs EC2-SD requests. It implements
|
||||||
@ -164,7 +164,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger) *EC2Discovery {
|
|||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *EC2Discovery) ec2Client(ctx context.Context) (*ec2.EC2, error) {
|
func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) {
|
||||||
if d.ec2 != nil {
|
if d.ec2 != nil {
|
||||||
return d.ec2, nil
|
return d.ec2, nil
|
||||||
}
|
}
|
||||||
|
@ -109,7 +109,7 @@ func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
|||||||
}
|
}
|
||||||
c.Region = region
|
c.Region = region
|
||||||
}
|
}
|
||||||
return nil
|
return c.HTTPClientConfig.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
// LightsailDiscovery periodically performs Lightsail-SD requests. It implements
|
// LightsailDiscovery periodically performs Lightsail-SD requests. It implements
|
||||||
|
@ -17,21 +17,27 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-10-01/network"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
|
||||||
"github.com/Azure/go-autorest/autorest"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
|
||||||
"github.com/Azure/go-autorest/autorest/adal"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||||
"github.com/Azure/go-autorest/autorest/azure"
|
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2"
|
||||||
|
cache "github.com/Code-Hex/go-generics-cache"
|
||||||
|
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/go-kit/log/level"
|
"github.com/go-kit/log/level"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
config_util "github.com/prometheus/common/config"
|
config_util "github.com/prometheus/common/config"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
|
||||||
@ -68,7 +74,7 @@ var (
|
|||||||
DefaultSDConfig = SDConfig{
|
DefaultSDConfig = SDConfig{
|
||||||
Port: 80,
|
Port: 80,
|
||||||
RefreshInterval: model.Duration(5 * time.Minute),
|
RefreshInterval: model.Duration(5 * time.Minute),
|
||||||
Environment: azure.PublicCloud.Name,
|
Environment: "AzurePublicCloud",
|
||||||
AuthenticationMethod: authMethodOAuth,
|
AuthenticationMethod: authMethodOAuth,
|
||||||
HTTPClientConfig: config_util.DefaultHTTPClientConfig,
|
HTTPClientConfig: config_util.DefaultHTTPClientConfig,
|
||||||
}
|
}
|
||||||
@ -78,11 +84,37 @@ var (
|
|||||||
Name: "prometheus_sd_azure_failures_total",
|
Name: "prometheus_sd_azure_failures_total",
|
||||||
Help: "Number of Azure service discovery refresh failures.",
|
Help: "Number of Azure service discovery refresh failures.",
|
||||||
})
|
})
|
||||||
|
cacheHitCount = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Name: "prometheus_sd_azure_cache_hit_total",
|
||||||
|
Help: "Number of cache hit during refresh.",
|
||||||
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var environments = map[string]cloud.Configuration{
|
||||||
|
"AZURECHINACLOUD": cloud.AzureChina,
|
||||||
|
"AZURECLOUD": cloud.AzurePublic,
|
||||||
|
"AZUREGERMANCLOUD": cloud.AzurePublic,
|
||||||
|
"AZUREPUBLICCLOUD": cloud.AzurePublic,
|
||||||
|
"AZUREUSGOVERNMENT": cloud.AzureGovernment,
|
||||||
|
"AZUREUSGOVERNMENTCLOUD": cloud.AzureGovernment,
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloudConfigurationFromName returns cloud configuration based on the common name specified.
|
||||||
|
func CloudConfigurationFromName(name string) (cloud.Configuration, error) {
|
||||||
|
name = strings.ToUpper(name)
|
||||||
|
env, ok := environments[name]
|
||||||
|
if !ok {
|
||||||
|
return env, fmt.Errorf("There is no cloud configuration matching the name %q", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return env, nil
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
discovery.RegisterConfig(&SDConfig{})
|
discovery.RegisterConfig(&SDConfig{})
|
||||||
prometheus.MustRegister(failuresCount)
|
prometheus.MustRegister(failuresCount)
|
||||||
|
prometheus.MustRegister(cacheHitCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SDConfig is the configuration for Azure based service discovery.
|
// SDConfig is the configuration for Azure based service discovery.
|
||||||
@ -123,7 +155,6 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = validateAuthParam(c.SubscriptionID, "subscription_id"); err != nil {
|
if err = validateAuthParam(c.SubscriptionID, "subscription_id"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -144,7 +175,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||||||
return fmt.Errorf("unknown authentication_type %q. Supported types are %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity)
|
return fmt.Errorf("unknown authentication_type %q. Supported types are %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return c.HTTPClientConfig.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
type Discovery struct {
|
type Discovery struct {
|
||||||
@ -152,6 +183,7 @@ type Discovery struct {
|
|||||||
logger log.Logger
|
logger log.Logger
|
||||||
cfg *SDConfig
|
cfg *SDConfig
|
||||||
port int
|
port int
|
||||||
|
cache *cache.Cache[string, *armnetwork.Interface]
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
|
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
|
||||||
@ -159,99 +191,111 @@ func NewDiscovery(cfg *SDConfig, logger log.Logger) *Discovery {
|
|||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = log.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000)))
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
port: cfg.Port,
|
port: cfg.Port,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
|
cache: l,
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Discovery = refresh.NewDiscovery(
|
d.Discovery = refresh.NewDiscovery(
|
||||||
logger,
|
logger,
|
||||||
"azure",
|
"azure",
|
||||||
time.Duration(cfg.RefreshInterval),
|
time.Duration(cfg.RefreshInterval),
|
||||||
d.refresh,
|
d.refresh,
|
||||||
)
|
)
|
||||||
|
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
// azureClient represents multiple Azure Resource Manager providers.
|
// azureClient represents multiple Azure Resource Manager providers.
|
||||||
type azureClient struct {
|
type azureClient struct {
|
||||||
nic network.InterfacesClient
|
nic *armnetwork.InterfacesClient
|
||||||
vm compute.VirtualMachinesClient
|
vm *armcompute.VirtualMachinesClient
|
||||||
vmss compute.VirtualMachineScaleSetsClient
|
vmss *armcompute.VirtualMachineScaleSetsClient
|
||||||
vmssvm compute.VirtualMachineScaleSetVMsClient
|
vmssvm *armcompute.VirtualMachineScaleSetVMsClient
|
||||||
|
logger log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// createAzureClient is a helper function for creating an Azure compute client to ARM.
|
// createAzureClient is a helper function for creating an Azure compute client to ARM.
|
||||||
func createAzureClient(cfg SDConfig) (azureClient, error) {
|
func createAzureClient(cfg SDConfig) (azureClient, error) {
|
||||||
env, err := azure.EnvironmentFromName(cfg.Environment)
|
cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return azureClient{}, err
|
return azureClient{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
activeDirectoryEndpoint := env.ActiveDirectoryEndpoint
|
|
||||||
resourceManagerEndpoint := env.ResourceManagerEndpoint
|
|
||||||
|
|
||||||
var c azureClient
|
var c azureClient
|
||||||
|
|
||||||
var spt *adal.ServicePrincipalToken
|
telemetry := policy.TelemetryOptions{
|
||||||
|
ApplicationID: userAgent,
|
||||||
|
}
|
||||||
|
|
||||||
switch cfg.AuthenticationMethod {
|
credential, err := newCredential(cfg, policy.ClientOptions{
|
||||||
case authMethodManagedIdentity:
|
Cloud: cloudConfiguration,
|
||||||
spt, err = adal.NewServicePrincipalTokenFromManagedIdentity(resourceManagerEndpoint, &adal.ManagedIdentityOptions{ClientID: cfg.ClientID})
|
Telemetry: telemetry,
|
||||||
if err != nil {
|
})
|
||||||
return azureClient{}, err
|
if err != nil {
|
||||||
}
|
return azureClient{}, err
|
||||||
case authMethodOAuth:
|
|
||||||
oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, cfg.TenantID)
|
|
||||||
if err != nil {
|
|
||||||
return azureClient{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
spt, err = adal.NewServicePrincipalToken(*oauthConfig, cfg.ClientID, string(cfg.ClientSecret), resourceManagerEndpoint)
|
|
||||||
if err != nil {
|
|
||||||
return azureClient{}, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd")
|
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return azureClient{}, err
|
return azureClient{}, err
|
||||||
}
|
}
|
||||||
sender := autorest.DecorateSender(client)
|
options := &arm.ClientOptions{
|
||||||
preparer := autorest.WithUserAgent(userAgent)
|
ClientOptions: policy.ClientOptions{
|
||||||
|
Transport: client,
|
||||||
|
Cloud: cloudConfiguration,
|
||||||
|
Telemetry: telemetry,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
bearerAuthorizer := autorest.NewBearerAuthorizer(spt)
|
c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options)
|
||||||
|
if err != nil {
|
||||||
|
return azureClient{}, err
|
||||||
|
}
|
||||||
|
|
||||||
c.vm = compute.NewVirtualMachinesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
|
c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options)
|
||||||
c.vm.Authorizer = bearerAuthorizer
|
if err != nil {
|
||||||
c.vm.Sender = sender
|
return azureClient{}, err
|
||||||
c.vm.RequestInspector = preparer
|
}
|
||||||
|
|
||||||
c.nic = network.NewInterfacesClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
|
c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options)
|
||||||
c.nic.Authorizer = bearerAuthorizer
|
if err != nil {
|
||||||
c.nic.Sender = sender
|
return azureClient{}, err
|
||||||
c.nic.RequestInspector = preparer
|
}
|
||||||
|
|
||||||
c.vmss = compute.NewVirtualMachineScaleSetsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
|
c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options)
|
||||||
c.vmss.Authorizer = bearerAuthorizer
|
if err != nil {
|
||||||
c.vmss.Sender = sender
|
return azureClient{}, err
|
||||||
c.vmss.RequestInspector = preparer
|
}
|
||||||
|
|
||||||
c.vmssvm = compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(resourceManagerEndpoint, cfg.SubscriptionID)
|
|
||||||
c.vmssvm.Authorizer = bearerAuthorizer
|
|
||||||
c.vmssvm.Sender = sender
|
|
||||||
c.vmssvm.RequestInspector = preparer
|
|
||||||
|
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// azureResource represents a resource identifier in Azure.
|
func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azcore.TokenCredential, error) {
|
||||||
type azureResource struct {
|
var credential azcore.TokenCredential
|
||||||
Name string
|
switch cfg.AuthenticationMethod {
|
||||||
ResourceGroup string
|
case authMethodManagedIdentity:
|
||||||
|
options := &azidentity.ManagedIdentityCredentialOptions{ClientOptions: policyClientOptions, ID: azidentity.ClientID(cfg.ClientID)}
|
||||||
|
managedIdentityCredential, err := azidentity.NewManagedIdentityCredential(options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
credential = azcore.TokenCredential(managedIdentityCredential)
|
||||||
|
case authMethodOAuth:
|
||||||
|
options := &azidentity.ClientSecretCredentialOptions{ClientOptions: policyClientOptions}
|
||||||
|
secretCredential, err := azidentity.NewClientSecretCredential(cfg.TenantID, cfg.ClientID, string(cfg.ClientSecret), options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
credential = azcore.TokenCredential(secretCredential)
|
||||||
|
}
|
||||||
|
return credential, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// virtualMachine represents an Azure virtual machine (which can also be created by a VMSS)
|
// virtualMachine represents an Azure virtual machine (which can also be created by a VMSS).
|
||||||
type virtualMachine struct {
|
type virtualMachine struct {
|
||||||
ID string
|
ID string
|
||||||
Name string
|
Name string
|
||||||
@ -266,22 +310,17 @@ type virtualMachine struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create a new azureResource object from an ID string.
|
// Create a new azureResource object from an ID string.
|
||||||
func newAzureResourceFromID(id string, logger log.Logger) (azureResource, error) {
|
func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, error) {
|
||||||
// Resource IDs have the following format.
|
if logger == nil {
|
||||||
// /subscriptions/SUBSCRIPTION_ID/resourceGroups/RESOURCE_GROUP/providers/PROVIDER/TYPE/NAME
|
logger = log.NewNopLogger()
|
||||||
// or if embedded resource then
|
|
||||||
// /subscriptions/SUBSCRIPTION_ID/resourceGroups/RESOURCE_GROUP/providers/PROVIDER/TYPE/NAME/TYPE/NAME
|
|
||||||
s := strings.Split(id, "/")
|
|
||||||
if len(s) != 9 && len(s) != 11 {
|
|
||||||
err := fmt.Errorf("invalid ID '%s'. Refusing to create azureResource", id)
|
|
||||||
level.Error(logger).Log("err", err)
|
|
||||||
return azureResource{}, err
|
|
||||||
}
|
}
|
||||||
|
resourceID, err := arm.ParseResourceID(id)
|
||||||
return azureResource{
|
if err != nil {
|
||||||
Name: strings.ToLower(s[8]),
|
err := fmt.Errorf("invalid ID '%s': %w", id, err)
|
||||||
ResourceGroup: strings.ToLower(s[4]),
|
level.Error(logger).Log("err", err)
|
||||||
}, nil
|
return &arm.ResourceID{}, err
|
||||||
|
}
|
||||||
|
return resourceID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
@ -292,6 +331,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||||||
failuresCount.Inc()
|
failuresCount.Inc()
|
||||||
return nil, fmt.Errorf("could not create Azure client: %w", err)
|
return nil, fmt.Errorf("could not create Azure client: %w", err)
|
||||||
}
|
}
|
||||||
|
client.logger = d.logger
|
||||||
|
|
||||||
machines, err := client.getVMs(ctx, d.cfg.ResourceGroup)
|
machines, err := client.getVMs(ctx, d.cfg.ResourceGroup)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -344,7 +384,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||||||
azureLabelMachineComputerName: model.LabelValue(vm.ComputerName),
|
azureLabelMachineComputerName: model.LabelValue(vm.ComputerName),
|
||||||
azureLabelMachineOSType: model.LabelValue(vm.OsType),
|
azureLabelMachineOSType: model.LabelValue(vm.OsType),
|
||||||
azureLabelMachineLocation: model.LabelValue(vm.Location),
|
azureLabelMachineLocation: model.LabelValue(vm.Location),
|
||||||
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroup),
|
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroupName),
|
||||||
azureLabelMachineSize: model.LabelValue(vm.Size),
|
azureLabelMachineSize: model.LabelValue(vm.Size),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -359,18 +399,25 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||||||
|
|
||||||
// Get the IP address information via separate call to the network provider.
|
// Get the IP address information via separate call to the network provider.
|
||||||
for _, nicID := range vm.NetworkInterfaces {
|
for _, nicID := range vm.NetworkInterfaces {
|
||||||
networkInterface, err := client.getNetworkInterfaceByID(ctx, nicID)
|
var networkInterface *armnetwork.Interface
|
||||||
if err != nil {
|
if v, ok := d.getFromCache(nicID); ok {
|
||||||
if errors.Is(err, errorNotFound) {
|
networkInterface = v
|
||||||
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
|
cacheHitCount.Add(1)
|
||||||
} else {
|
} else {
|
||||||
ch <- target{labelSet: nil, err: err}
|
networkInterface, err = client.getNetworkInterfaceByID(ctx, nicID)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, errorNotFound) {
|
||||||
|
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
|
||||||
|
} else {
|
||||||
|
ch <- target{labelSet: nil, err: err}
|
||||||
|
}
|
||||||
|
// Get out of this routine because we cannot continue without a network interface.
|
||||||
|
return
|
||||||
}
|
}
|
||||||
// Get out of this routine because we cannot continue without a network interface.
|
d.addToCache(nicID, networkInterface)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if networkInterface.InterfacePropertiesFormat == nil {
|
if networkInterface.Properties == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -378,21 +425,21 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||||||
// This information is available via another API call however the Go SDK does not
|
// This information is available via another API call however the Go SDK does not
|
||||||
// yet support this. On deallocated machines, this value happens to be nil so it
|
// yet support this. On deallocated machines, this value happens to be nil so it
|
||||||
// is a cheap and easy way to determine if a machine is allocated or not.
|
// is a cheap and easy way to determine if a machine is allocated or not.
|
||||||
if networkInterface.Primary == nil {
|
if networkInterface.Properties.Primary == nil {
|
||||||
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name)
|
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if *networkInterface.Primary {
|
if *networkInterface.Properties.Primary {
|
||||||
for _, ip := range *networkInterface.IPConfigurations {
|
for _, ip := range networkInterface.Properties.IPConfigurations {
|
||||||
// IPAddress is a field defined in PublicIPAddressPropertiesFormat,
|
// IPAddress is a field defined in PublicIPAddressPropertiesFormat,
|
||||||
// therefore we need to validate that both are not nil.
|
// therefore we need to validate that both are not nil.
|
||||||
if ip.PublicIPAddress != nil && ip.PublicIPAddress.PublicIPAddressPropertiesFormat != nil && ip.PublicIPAddress.IPAddress != nil {
|
if ip.Properties != nil && ip.Properties.PublicIPAddress != nil && ip.Properties.PublicIPAddress.Properties != nil && ip.Properties.PublicIPAddress.Properties.IPAddress != nil {
|
||||||
labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.PublicIPAddress.IPAddress)
|
labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.Properties.PublicIPAddress.Properties.IPAddress)
|
||||||
}
|
}
|
||||||
if ip.PrivateIPAddress != nil {
|
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
|
||||||
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.PrivateIPAddress)
|
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
|
||||||
address := net.JoinHostPort(*ip.PrivateIPAddress, fmt.Sprintf("%d", d.port))
|
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port))
|
||||||
labels[model.AddressLabel] = model.LabelValue(address)
|
labels[model.AddressLabel] = model.LabelValue(address)
|
||||||
ch <- target{labelSet: labels, err: nil}
|
ch <- target{labelSet: labels, err: nil}
|
||||||
return
|
return
|
||||||
@ -427,93 +474,84 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||||||
|
|
||||||
func (client *azureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) {
|
func (client *azureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) {
|
||||||
var vms []virtualMachine
|
var vms []virtualMachine
|
||||||
var result compute.VirtualMachineListResultPage
|
|
||||||
var err error
|
|
||||||
if len(resourceGroup) == 0 {
|
if len(resourceGroup) == 0 {
|
||||||
result, err = client.vm.ListAll(ctx)
|
pager := client.vm.NewListAllPager(nil)
|
||||||
|
for pager.More() {
|
||||||
|
nextResult, err := pager.NextPage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not list virtual machines: %w", err)
|
||||||
|
}
|
||||||
|
for _, vm := range nextResult.Value {
|
||||||
|
vms = append(vms, mapFromVM(*vm))
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
result, err = client.vm.List(ctx, resourceGroup)
|
pager := client.vm.NewListPager(resourceGroup, nil)
|
||||||
}
|
for pager.More() {
|
||||||
if err != nil {
|
nextResult, err := pager.NextPage(ctx)
|
||||||
return nil, fmt.Errorf("could not list virtual machines: %w", err)
|
if err != nil {
|
||||||
}
|
return nil, fmt.Errorf("could not list virtual machines: %w", err)
|
||||||
for result.NotDone() {
|
}
|
||||||
for _, vm := range result.Values() {
|
for _, vm := range nextResult.Value {
|
||||||
vms = append(vms, mapFromVM(vm))
|
vms = append(vms, mapFromVM(*vm))
|
||||||
}
|
}
|
||||||
err = result.NextWithContext(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not list virtual machines: %w", err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return vms, nil
|
return vms, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type VmssListResultPage interface {
|
func (client *azureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) {
|
||||||
NextWithContext(ctx context.Context) (err error)
|
var scaleSets []armcompute.VirtualMachineScaleSet
|
||||||
NotDone() bool
|
|
||||||
Values() []compute.VirtualMachineScaleSet
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *azureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]compute.VirtualMachineScaleSet, error) {
|
|
||||||
var scaleSets []compute.VirtualMachineScaleSet
|
|
||||||
var result VmssListResultPage
|
|
||||||
var err error
|
|
||||||
if len(resourceGroup) == 0 {
|
if len(resourceGroup) == 0 {
|
||||||
var rtn compute.VirtualMachineScaleSetListWithLinkResultPage
|
pager := client.vmss.NewListAllPager(nil)
|
||||||
rtn, err = client.vmss.ListAll(ctx)
|
for pager.More() {
|
||||||
if err != nil {
|
nextResult, err := pager.NextPage(ctx)
|
||||||
return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err)
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err)
|
||||||
|
}
|
||||||
|
for _, vmss := range nextResult.Value {
|
||||||
|
scaleSets = append(scaleSets, *vmss)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
result = &rtn
|
|
||||||
} else {
|
} else {
|
||||||
var rtn compute.VirtualMachineScaleSetListResultPage
|
pager := client.vmss.NewListPager(resourceGroup, nil)
|
||||||
rtn, err = client.vmss.List(ctx, resourceGroup)
|
for pager.More() {
|
||||||
if err != nil {
|
nextResult, err := pager.NextPage(ctx)
|
||||||
return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err)
|
if err != nil {
|
||||||
}
|
return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err)
|
||||||
result = &rtn
|
}
|
||||||
}
|
for _, vmss := range nextResult.Value {
|
||||||
|
scaleSets = append(scaleSets, *vmss)
|
||||||
for result.NotDone() {
|
}
|
||||||
scaleSets = append(scaleSets, result.Values()...)
|
|
||||||
err = result.NextWithContext(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not list virtual machine scale sets: %w", err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return scaleSets, nil
|
return scaleSets, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet compute.VirtualMachineScaleSet) ([]virtualMachine, error) {
|
func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) {
|
||||||
var vms []virtualMachine
|
var vms []virtualMachine
|
||||||
// TODO do we really need to fetch the resourcegroup this way?
|
// TODO do we really need to fetch the resourcegroup this way?
|
||||||
r, err := newAzureResourceFromID(*scaleSet.ID, nil)
|
r, err := newAzureResourceFromID(*scaleSet.ID, client.logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not parse scale set ID: %w", err)
|
return nil, fmt.Errorf("could not parse scale set ID: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := client.vmssvm.List(ctx, r.ResourceGroup, *(scaleSet.Name), "", "", "")
|
pager := client.vmssvm.NewListPager(r.ResourceGroupName, *(scaleSet.Name), nil)
|
||||||
if err != nil {
|
for pager.More() {
|
||||||
return nil, fmt.Errorf("could not list virtual machine scale set vms: %w", err)
|
nextResult, err := pager.NextPage(ctx)
|
||||||
}
|
|
||||||
for result.NotDone() {
|
|
||||||
for _, vm := range result.Values() {
|
|
||||||
vms = append(vms, mapFromVMScaleSetVM(vm, *scaleSet.Name))
|
|
||||||
}
|
|
||||||
err = result.NextWithContext(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not list virtual machine scale set vms: %w", err)
|
return nil, fmt.Errorf("could not list virtual machine scale set vms: %w", err)
|
||||||
}
|
}
|
||||||
|
for _, vmssvm := range nextResult.Value {
|
||||||
|
vms = append(vms, mapFromVMScaleSetVM(*vmssvm, *scaleSet.Name))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return vms, nil
|
return vms, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func mapFromVM(vm compute.VirtualMachine) virtualMachine {
|
func mapFromVM(vm armcompute.VirtualMachine) virtualMachine {
|
||||||
osType := string(vm.StorageProfile.OsDisk.OsType)
|
osType := string(*vm.Properties.StorageProfile.OSDisk.OSType)
|
||||||
tags := map[string]*string{}
|
tags := map[string]*string{}
|
||||||
networkInterfaces := []string{}
|
networkInterfaces := []string{}
|
||||||
var computerName string
|
var computerName string
|
||||||
@ -523,18 +561,17 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
|
|||||||
tags = vm.Tags
|
tags = vm.Tags
|
||||||
}
|
}
|
||||||
|
|
||||||
if vm.NetworkProfile != nil {
|
if vm.Properties != nil {
|
||||||
for _, vmNIC := range *(vm.NetworkProfile.NetworkInterfaces) {
|
if vm.Properties.NetworkProfile != nil {
|
||||||
networkInterfaces = append(networkInterfaces, *vmNIC.ID)
|
for _, vmNIC := range vm.Properties.NetworkProfile.NetworkInterfaces {
|
||||||
|
networkInterfaces = append(networkInterfaces, *vmNIC.ID)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
if vm.Properties.OSProfile != nil && vm.Properties.OSProfile.ComputerName != nil {
|
||||||
|
computerName = *(vm.Properties.OSProfile.ComputerName)
|
||||||
if vm.VirtualMachineProperties != nil {
|
|
||||||
if vm.VirtualMachineProperties.OsProfile != nil && vm.VirtualMachineProperties.OsProfile.ComputerName != nil {
|
|
||||||
computerName = *(vm.VirtualMachineProperties.OsProfile.ComputerName)
|
|
||||||
}
|
}
|
||||||
if vm.VirtualMachineProperties.HardwareProfile != nil {
|
if vm.Properties.HardwareProfile != nil {
|
||||||
size = string(vm.VirtualMachineProperties.HardwareProfile.VMSize)
|
size = string(*vm.Properties.HardwareProfile.VMSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -552,8 +589,8 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName string) virtualMachine {
|
func mapFromVMScaleSetVM(vm armcompute.VirtualMachineScaleSetVM, scaleSetName string) virtualMachine {
|
||||||
osType := string(vm.StorageProfile.OsDisk.OsType)
|
osType := string(*vm.Properties.StorageProfile.OSDisk.OSType)
|
||||||
tags := map[string]*string{}
|
tags := map[string]*string{}
|
||||||
networkInterfaces := []string{}
|
networkInterfaces := []string{}
|
||||||
var computerName string
|
var computerName string
|
||||||
@ -563,18 +600,17 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin
|
|||||||
tags = vm.Tags
|
tags = vm.Tags
|
||||||
}
|
}
|
||||||
|
|
||||||
if vm.NetworkProfile != nil {
|
if vm.Properties != nil {
|
||||||
for _, vmNIC := range *(vm.NetworkProfile.NetworkInterfaces) {
|
if vm.Properties.NetworkProfile != nil {
|
||||||
networkInterfaces = append(networkInterfaces, *vmNIC.ID)
|
for _, vmNIC := range vm.Properties.NetworkProfile.NetworkInterfaces {
|
||||||
|
networkInterfaces = append(networkInterfaces, *vmNIC.ID)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
if vm.Properties.OSProfile != nil && vm.Properties.OSProfile.ComputerName != nil {
|
||||||
|
computerName = *(vm.Properties.OSProfile.ComputerName)
|
||||||
if vm.VirtualMachineScaleSetVMProperties != nil {
|
|
||||||
if vm.VirtualMachineScaleSetVMProperties.OsProfile != nil && vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName != nil {
|
|
||||||
computerName = *(vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName)
|
|
||||||
}
|
}
|
||||||
if vm.VirtualMachineScaleSetVMProperties.HardwareProfile != nil {
|
if vm.Properties.HardwareProfile != nil {
|
||||||
size = string(vm.VirtualMachineScaleSetVMProperties.HardwareProfile.VMSize)
|
size = string(*vm.Properties.HardwareProfile.VMSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -596,36 +632,36 @@ var errorNotFound = errors.New("network interface does not exist")
|
|||||||
|
|
||||||
// getNetworkInterfaceByID gets the network interface.
|
// getNetworkInterfaceByID gets the network interface.
|
||||||
// If a 404 is returned from the Azure API, `errorNotFound` is returned.
|
// If a 404 is returned from the Azure API, `errorNotFound` is returned.
|
||||||
// On all other errors, an autorest.DetailedError is returned.
|
func (client *azureClient) getNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) {
|
||||||
func (client *azureClient) getNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*network.Interface, error) {
|
r, err := newAzureResourceFromID(networkInterfaceID, client.logger)
|
||||||
result := network.Interface{}
|
if err != nil {
|
||||||
queryParameters := map[string]interface{}{
|
return nil, fmt.Errorf("could not parse network interface ID: %w", err)
|
||||||
"api-version": "2018-10-01",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
preparer := autorest.CreatePreparer(
|
resp, err := client.nic.Get(ctx, r.ResourceGroupName, r.Name, nil)
|
||||||
autorest.AsGet(),
|
|
||||||
autorest.WithBaseURL(client.nic.BaseURI),
|
|
||||||
autorest.WithPath(networkInterfaceID),
|
|
||||||
autorest.WithQueryParameters(queryParameters),
|
|
||||||
autorest.WithUserAgent(userAgent))
|
|
||||||
req, err := preparer.Prepare((&http.Request{}).WithContext(ctx))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request")
|
var responseError *azcore.ResponseError
|
||||||
}
|
if errors.As(err, &responseError) && responseError.StatusCode == http.StatusNotFound {
|
||||||
|
|
||||||
resp, err := client.nic.GetSender(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure sending request")
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err = client.nic.GetResponder(resp)
|
|
||||||
if err != nil {
|
|
||||||
if resp.StatusCode == http.StatusNotFound {
|
|
||||||
return nil, errorNotFound
|
return nil, errorNotFound
|
||||||
}
|
}
|
||||||
return nil, autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure responding to request")
|
return nil, fmt.Errorf("Failed to retrieve Interface %v with error: %w", networkInterfaceID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &result, nil
|
return &resp.Interface, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// addToCache will add the network interface information for the specified nicID.
|
||||||
|
func (d *Discovery) addToCache(nicID string, netInt *armnetwork.Interface) {
|
||||||
|
random := rand.Int63n(int64(time.Duration(d.cfg.RefreshInterval * 3).Seconds()))
|
||||||
|
rs := time.Duration(random) * time.Second
|
||||||
|
exptime := time.Duration(d.cfg.RefreshInterval*10) + rs
|
||||||
|
d.cache.Set(nicID, netInt, cache.WithExpiration(exptime))
|
||||||
|
level.Debug(d.logger).Log("msg", "Adding nic", "nic", nicID, "time", exptime.Seconds())
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFromCache will get the network Interface for the specified nicID
|
||||||
|
// If the cache is disabled nothing will happen.
|
||||||
|
func (d *Discovery) getFromCache(nicID string) (*armnetwork.Interface, bool) {
|
||||||
|
net, found := d.cache.Get(nicID)
|
||||||
|
return net, found
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,8 @@ package azure
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
)
|
)
|
||||||
@ -29,34 +30,36 @@ func TestMapFromVMWithEmptyTags(t *testing.T) {
|
|||||||
id := "test"
|
id := "test"
|
||||||
name := "name"
|
name := "name"
|
||||||
size := "size"
|
size := "size"
|
||||||
|
vmSize := armcompute.VirtualMachineSizeTypes(size)
|
||||||
|
osType := armcompute.OperatingSystemTypesLinux
|
||||||
vmType := "type"
|
vmType := "type"
|
||||||
location := "westeurope"
|
location := "westeurope"
|
||||||
computerName := "computer_name"
|
computerName := "computer_name"
|
||||||
networkProfile := compute.NetworkProfile{
|
networkProfile := armcompute.NetworkProfile{
|
||||||
NetworkInterfaces: &[]compute.NetworkInterfaceReference{},
|
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{},
|
||||||
}
|
}
|
||||||
properties := &compute.VirtualMachineProperties{
|
properties := &armcompute.VirtualMachineProperties{
|
||||||
OsProfile: &compute.OSProfile{
|
OSProfile: &armcompute.OSProfile{
|
||||||
ComputerName: &computerName,
|
ComputerName: &computerName,
|
||||||
},
|
},
|
||||||
StorageProfile: &compute.StorageProfile{
|
StorageProfile: &armcompute.StorageProfile{
|
||||||
OsDisk: &compute.OSDisk{
|
OSDisk: &armcompute.OSDisk{
|
||||||
OsType: "Linux",
|
OSType: &osType,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
NetworkProfile: &networkProfile,
|
NetworkProfile: &networkProfile,
|
||||||
HardwareProfile: &compute.HardwareProfile{
|
HardwareProfile: &armcompute.HardwareProfile{
|
||||||
VMSize: compute.VirtualMachineSizeTypes(size),
|
VMSize: &vmSize,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testVM := compute.VirtualMachine{
|
testVM := armcompute.VirtualMachine{
|
||||||
ID: &id,
|
ID: &id,
|
||||||
Name: &name,
|
Name: &name,
|
||||||
Type: &vmType,
|
Type: &vmType,
|
||||||
Location: &location,
|
Location: &location,
|
||||||
Tags: nil,
|
Tags: nil,
|
||||||
VirtualMachineProperties: properties,
|
Properties: properties,
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedVM := virtualMachine{
|
expectedVM := virtualMachine{
|
||||||
@ -80,37 +83,39 @@ func TestMapFromVMWithTags(t *testing.T) {
|
|||||||
id := "test"
|
id := "test"
|
||||||
name := "name"
|
name := "name"
|
||||||
size := "size"
|
size := "size"
|
||||||
|
vmSize := armcompute.VirtualMachineSizeTypes(size)
|
||||||
|
osType := armcompute.OperatingSystemTypesLinux
|
||||||
vmType := "type"
|
vmType := "type"
|
||||||
location := "westeurope"
|
location := "westeurope"
|
||||||
computerName := "computer_name"
|
computerName := "computer_name"
|
||||||
tags := map[string]*string{
|
tags := map[string]*string{
|
||||||
"prometheus": new(string),
|
"prometheus": new(string),
|
||||||
}
|
}
|
||||||
networkProfile := compute.NetworkProfile{
|
networkProfile := armcompute.NetworkProfile{
|
||||||
NetworkInterfaces: &[]compute.NetworkInterfaceReference{},
|
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{},
|
||||||
}
|
}
|
||||||
properties := &compute.VirtualMachineProperties{
|
properties := &armcompute.VirtualMachineProperties{
|
||||||
OsProfile: &compute.OSProfile{
|
OSProfile: &armcompute.OSProfile{
|
||||||
ComputerName: &computerName,
|
ComputerName: &computerName,
|
||||||
},
|
},
|
||||||
StorageProfile: &compute.StorageProfile{
|
StorageProfile: &armcompute.StorageProfile{
|
||||||
OsDisk: &compute.OSDisk{
|
OSDisk: &armcompute.OSDisk{
|
||||||
OsType: "Linux",
|
OSType: &osType,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
NetworkProfile: &networkProfile,
|
NetworkProfile: &networkProfile,
|
||||||
HardwareProfile: &compute.HardwareProfile{
|
HardwareProfile: &armcompute.HardwareProfile{
|
||||||
VMSize: compute.VirtualMachineSizeTypes(size),
|
VMSize: &vmSize,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testVM := compute.VirtualMachine{
|
testVM := armcompute.VirtualMachine{
|
||||||
ID: &id,
|
ID: &id,
|
||||||
Name: &name,
|
Name: &name,
|
||||||
Type: &vmType,
|
Type: &vmType,
|
||||||
Location: &location,
|
Location: &location,
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
VirtualMachineProperties: properties,
|
Properties: properties,
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedVM := virtualMachine{
|
expectedVM := virtualMachine{
|
||||||
@ -134,34 +139,36 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
|
|||||||
id := "test"
|
id := "test"
|
||||||
name := "name"
|
name := "name"
|
||||||
size := "size"
|
size := "size"
|
||||||
|
vmSize := armcompute.VirtualMachineSizeTypes(size)
|
||||||
|
osType := armcompute.OperatingSystemTypesLinux
|
||||||
vmType := "type"
|
vmType := "type"
|
||||||
location := "westeurope"
|
location := "westeurope"
|
||||||
computerName := "computer_name"
|
computerName := "computer_name"
|
||||||
networkProfile := compute.NetworkProfile{
|
networkProfile := armcompute.NetworkProfile{
|
||||||
NetworkInterfaces: &[]compute.NetworkInterfaceReference{},
|
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{},
|
||||||
}
|
}
|
||||||
properties := &compute.VirtualMachineScaleSetVMProperties{
|
properties := &armcompute.VirtualMachineScaleSetVMProperties{
|
||||||
OsProfile: &compute.OSProfile{
|
OSProfile: &armcompute.OSProfile{
|
||||||
ComputerName: &computerName,
|
ComputerName: &computerName,
|
||||||
},
|
},
|
||||||
StorageProfile: &compute.StorageProfile{
|
StorageProfile: &armcompute.StorageProfile{
|
||||||
OsDisk: &compute.OSDisk{
|
OSDisk: &armcompute.OSDisk{
|
||||||
OsType: "Linux",
|
OSType: &osType,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
NetworkProfile: &networkProfile,
|
NetworkProfile: &networkProfile,
|
||||||
HardwareProfile: &compute.HardwareProfile{
|
HardwareProfile: &armcompute.HardwareProfile{
|
||||||
VMSize: compute.VirtualMachineSizeTypes(size),
|
VMSize: &vmSize,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testVM := compute.VirtualMachineScaleSetVM{
|
testVM := armcompute.VirtualMachineScaleSetVM{
|
||||||
ID: &id,
|
ID: &id,
|
||||||
Name: &name,
|
Name: &name,
|
||||||
Type: &vmType,
|
Type: &vmType,
|
||||||
Location: &location,
|
Location: &location,
|
||||||
Tags: nil,
|
Tags: nil,
|
||||||
VirtualMachineScaleSetVMProperties: properties,
|
Properties: properties,
|
||||||
}
|
}
|
||||||
|
|
||||||
scaleSet := "testSet"
|
scaleSet := "testSet"
|
||||||
@ -187,37 +194,39 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
|
|||||||
id := "test"
|
id := "test"
|
||||||
name := "name"
|
name := "name"
|
||||||
size := "size"
|
size := "size"
|
||||||
|
vmSize := armcompute.VirtualMachineSizeTypes(size)
|
||||||
|
osType := armcompute.OperatingSystemTypesLinux
|
||||||
vmType := "type"
|
vmType := "type"
|
||||||
location := "westeurope"
|
location := "westeurope"
|
||||||
computerName := "computer_name"
|
computerName := "computer_name"
|
||||||
tags := map[string]*string{
|
tags := map[string]*string{
|
||||||
"prometheus": new(string),
|
"prometheus": new(string),
|
||||||
}
|
}
|
||||||
networkProfile := compute.NetworkProfile{
|
networkProfile := armcompute.NetworkProfile{
|
||||||
NetworkInterfaces: &[]compute.NetworkInterfaceReference{},
|
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{},
|
||||||
}
|
}
|
||||||
properties := &compute.VirtualMachineScaleSetVMProperties{
|
properties := &armcompute.VirtualMachineScaleSetVMProperties{
|
||||||
OsProfile: &compute.OSProfile{
|
OSProfile: &armcompute.OSProfile{
|
||||||
ComputerName: &computerName,
|
ComputerName: &computerName,
|
||||||
},
|
},
|
||||||
StorageProfile: &compute.StorageProfile{
|
StorageProfile: &armcompute.StorageProfile{
|
||||||
OsDisk: &compute.OSDisk{
|
OSDisk: &armcompute.OSDisk{
|
||||||
OsType: "Linux",
|
OSType: &osType,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
NetworkProfile: &networkProfile,
|
NetworkProfile: &networkProfile,
|
||||||
HardwareProfile: &compute.HardwareProfile{
|
HardwareProfile: &armcompute.HardwareProfile{
|
||||||
VMSize: compute.VirtualMachineSizeTypes(size),
|
VMSize: &vmSize,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testVM := compute.VirtualMachineScaleSetVM{
|
testVM := armcompute.VirtualMachineScaleSetVM{
|
||||||
ID: &id,
|
ID: &id,
|
||||||
Name: &name,
|
Name: &name,
|
||||||
Type: &vmType,
|
Type: &vmType,
|
||||||
Location: &location,
|
Location: &location,
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
VirtualMachineScaleSetVMProperties: properties,
|
Properties: properties,
|
||||||
}
|
}
|
||||||
|
|
||||||
scaleSet := "testSet"
|
scaleSet := "testSet"
|
||||||
@ -242,18 +251,26 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
|
|||||||
func TestNewAzureResourceFromID(t *testing.T) {
|
func TestNewAzureResourceFromID(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
id string
|
id string
|
||||||
expected azureResource
|
expected *arm.ResourceID
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
id: "/a/b/c/group/d/e/f/name",
|
id: "/subscriptions/SUBSCRIPTION_ID/resourceGroups/group/providers/PROVIDER/TYPE/name",
|
||||||
expected: azureResource{"name", "group"},
|
expected: &arm.ResourceID{
|
||||||
|
Name: "name",
|
||||||
|
ResourceGroupName: "group",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "/a/b/c/group/d/e/f/name/g/h",
|
id: "/subscriptions/SUBSCRIPTION_ID/resourceGroups/group/providers/PROVIDER/TYPE/name/TYPE/h",
|
||||||
expected: azureResource{"name", "group"},
|
expected: &arm.ResourceID{
|
||||||
|
Name: "h",
|
||||||
|
ResourceGroupName: "group",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
actual, _ := newAzureResourceFromID(tc.id, nil)
|
actual, err := newAzureResourceFromID(tc.id, nil)
|
||||||
require.Equal(t, tc.expected, actual)
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, tc.expected.Name, actual.Name)
|
||||||
|
require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@ const (
|
|||||||
tagsLabel = model.MetaLabelPrefix + "consul_tags"
|
tagsLabel = model.MetaLabelPrefix + "consul_tags"
|
||||||
// serviceLabel is the name of the label containing the service name.
|
// serviceLabel is the name of the label containing the service name.
|
||||||
serviceLabel = model.MetaLabelPrefix + "consul_service"
|
serviceLabel = model.MetaLabelPrefix + "consul_service"
|
||||||
// healthLabel is the name of the label containing the health of the service instance
|
// healthLabel is the name of the label containing the health of the service instance.
|
||||||
healthLabel = model.MetaLabelPrefix + "consul_health"
|
healthLabel = model.MetaLabelPrefix + "consul_health"
|
||||||
// serviceAddressLabel is the name of the label containing the (optional) service address.
|
// serviceAddressLabel is the name of the label containing the (optional) service address.
|
||||||
serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"
|
serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"
|
||||||
@ -111,6 +111,7 @@ func init() {
|
|||||||
// SDConfig is the configuration for Consul service discovery.
|
// SDConfig is the configuration for Consul service discovery.
|
||||||
type SDConfig struct {
|
type SDConfig struct {
|
||||||
Server string `yaml:"server,omitempty"`
|
Server string `yaml:"server,omitempty"`
|
||||||
|
PathPrefix string `yaml:"path_prefix,omitempty"`
|
||||||
Token config.Secret `yaml:"token,omitempty"`
|
Token config.Secret `yaml:"token,omitempty"`
|
||||||
Datacenter string `yaml:"datacenter,omitempty"`
|
Datacenter string `yaml:"datacenter,omitempty"`
|
||||||
Namespace string `yaml:"namespace,omitempty"`
|
Namespace string `yaml:"namespace,omitempty"`
|
||||||
@ -211,6 +212,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||||||
|
|
||||||
clientConf := &consul.Config{
|
clientConf := &consul.Config{
|
||||||
Address: conf.Server,
|
Address: conf.Server,
|
||||||
|
PathPrefix: conf.PathPrefix,
|
||||||
Scheme: conf.Scheme,
|
Scheme: conf.Scheme,
|
||||||
Datacenter: conf.Datacenter,
|
Datacenter: conf.Datacenter,
|
||||||
Namespace: conf.Namespace,
|
Namespace: conf.Namespace,
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SDMock is the interface for the DigitalOcean mock
|
// SDMock is the interface for the DigitalOcean mock.
|
||||||
type SDMock struct {
|
type SDMock struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
Server *httptest.Server
|
Server *httptest.Server
|
||||||
@ -35,18 +35,18 @@ func NewSDMock(t *testing.T) *SDMock {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Endpoint returns the URI to the mock server
|
// Endpoint returns the URI to the mock server.
|
||||||
func (m *SDMock) Endpoint() string {
|
func (m *SDMock) Endpoint() string {
|
||||||
return m.Server.URL + "/"
|
return m.Server.URL + "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup creates the mock server
|
// Setup creates the mock server.
|
||||||
func (m *SDMock) Setup() {
|
func (m *SDMock) Setup() {
|
||||||
m.Mux = http.NewServeMux()
|
m.Mux = http.NewServeMux()
|
||||||
m.Server = httptest.NewServer(m.Mux)
|
m.Server = httptest.NewServer(m.Mux)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShutdownServer creates the mock server
|
// ShutdownServer creates the mock server.
|
||||||
func (m *SDMock) ShutdownServer() {
|
func (m *SDMock) ShutdownServer() {
|
||||||
m.Server.Close()
|
m.Server.Close()
|
||||||
}
|
}
|
||||||
|
@ -42,6 +42,8 @@ const (
|
|||||||
dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port"
|
dnsSrvRecordPortLabel = dnsSrvRecordPrefix + "port"
|
||||||
dnsMxRecordPrefix = model.MetaLabelPrefix + "dns_mx_record_"
|
dnsMxRecordPrefix = model.MetaLabelPrefix + "dns_mx_record_"
|
||||||
dnsMxRecordTargetLabel = dnsMxRecordPrefix + "target"
|
dnsMxRecordTargetLabel = dnsMxRecordPrefix + "target"
|
||||||
|
dnsNsRecordPrefix = model.MetaLabelPrefix + "dns_ns_record_"
|
||||||
|
dnsNsRecordTargetLabel = dnsNsRecordPrefix + "target"
|
||||||
|
|
||||||
// Constants for instrumentation.
|
// Constants for instrumentation.
|
||||||
namespace = "prometheus"
|
namespace = "prometheus"
|
||||||
@ -102,7 +104,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||||||
}
|
}
|
||||||
switch strings.ToUpper(c.Type) {
|
switch strings.ToUpper(c.Type) {
|
||||||
case "SRV":
|
case "SRV":
|
||||||
case "A", "AAAA", "MX":
|
case "A", "AAAA", "MX", "NS":
|
||||||
if c.Port == 0 {
|
if c.Port == 0 {
|
||||||
return errors.New("a port is required in DNS-SD configs for all record types except SRV")
|
return errors.New("a port is required in DNS-SD configs for all record types except SRV")
|
||||||
}
|
}
|
||||||
@ -140,6 +142,8 @@ func NewDiscovery(conf SDConfig, logger log.Logger) *Discovery {
|
|||||||
qtype = dns.TypeSRV
|
qtype = dns.TypeSRV
|
||||||
case "MX":
|
case "MX":
|
||||||
qtype = dns.TypeMX
|
qtype = dns.TypeMX
|
||||||
|
case "NS":
|
||||||
|
qtype = dns.TypeNS
|
||||||
}
|
}
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
names: conf.Names,
|
names: conf.Names,
|
||||||
@ -199,7 +203,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, record := range response.Answer {
|
for _, record := range response.Answer {
|
||||||
var target, dnsSrvRecordTarget, dnsSrvRecordPort, dnsMxRecordTarget model.LabelValue
|
var target, dnsSrvRecordTarget, dnsSrvRecordPort, dnsMxRecordTarget, dnsNsRecordTarget model.LabelValue
|
||||||
|
|
||||||
switch addr := record.(type) {
|
switch addr := record.(type) {
|
||||||
case *dns.SRV:
|
case *dns.SRV:
|
||||||
@ -217,6 +221,13 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
|||||||
addr.Mx = strings.TrimRight(addr.Mx, ".")
|
addr.Mx = strings.TrimRight(addr.Mx, ".")
|
||||||
|
|
||||||
target = hostPort(addr.Mx, d.port)
|
target = hostPort(addr.Mx, d.port)
|
||||||
|
case *dns.NS:
|
||||||
|
dnsNsRecordTarget = model.LabelValue(addr.Ns)
|
||||||
|
|
||||||
|
// Remove the final dot from rooted DNS names to make them look more usual.
|
||||||
|
addr.Ns = strings.TrimRight(addr.Ns, ".")
|
||||||
|
|
||||||
|
target = hostPort(addr.Ns, d.port)
|
||||||
case *dns.A:
|
case *dns.A:
|
||||||
target = hostPort(addr.A.String(), d.port)
|
target = hostPort(addr.A.String(), d.port)
|
||||||
case *dns.AAAA:
|
case *dns.AAAA:
|
||||||
@ -234,6 +245,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
|||||||
dnsSrvRecordTargetLabel: dnsSrvRecordTarget,
|
dnsSrvRecordTargetLabel: dnsSrvRecordTarget,
|
||||||
dnsSrvRecordPortLabel: dnsSrvRecordPort,
|
dnsSrvRecordPortLabel: dnsSrvRecordPort,
|
||||||
dnsMxRecordTargetLabel: dnsMxRecordTarget,
|
dnsMxRecordTargetLabel: dnsMxRecordTarget,
|
||||||
|
dnsNsRecordTargetLabel: dnsNsRecordTarget,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -285,21 +297,22 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms
|
|||||||
for _, lname := range conf.NameList(name) {
|
for _, lname := range conf.NameList(name) {
|
||||||
response, err := lookupFromAnyServer(lname, qtype, conf, logger)
|
response, err := lookupFromAnyServer(lname, qtype, conf, logger)
|
||||||
|
|
||||||
if err != nil {
|
switch {
|
||||||
|
case err != nil:
|
||||||
// We can't go home yet, because a later name
|
// We can't go home yet, because a later name
|
||||||
// may give us a valid, successful answer. However
|
// may give us a valid, successful answer. However
|
||||||
// we can no longer say "this name definitely doesn't
|
// we can no longer say "this name definitely doesn't
|
||||||
// exist", because we did not get that answer for
|
// exist", because we did not get that answer for
|
||||||
// at least one name.
|
// at least one name.
|
||||||
allResponsesValid = false
|
allResponsesValid = false
|
||||||
} else if response.Rcode == dns.RcodeSuccess {
|
case response.Rcode == dns.RcodeSuccess:
|
||||||
// Outcome 1: GOLD!
|
// Outcome 1: GOLD!
|
||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if allResponsesValid {
|
if allResponsesValid {
|
||||||
// Outcome 2: everyone says NXDOMAIN, that's good enough for me
|
// Outcome 2: everyone says NXDOMAIN, that's good enough for me.
|
||||||
return &dns.Msg{}, nil
|
return &dns.Msg{}, nil
|
||||||
}
|
}
|
||||||
// Outcome 3: boned.
|
// Outcome 3: boned.
|
||||||
|
@ -81,6 +81,7 @@ func TestDNS(t *testing.T) {
|
|||||||
"__meta_dns_srv_record_target": "",
|
"__meta_dns_srv_record_target": "",
|
||||||
"__meta_dns_srv_record_port": "",
|
"__meta_dns_srv_record_port": "",
|
||||||
"__meta_dns_mx_record_target": "",
|
"__meta_dns_mx_record_target": "",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -112,6 +113,7 @@ func TestDNS(t *testing.T) {
|
|||||||
"__meta_dns_srv_record_target": "",
|
"__meta_dns_srv_record_target": "",
|
||||||
"__meta_dns_srv_record_port": "",
|
"__meta_dns_srv_record_port": "",
|
||||||
"__meta_dns_mx_record_target": "",
|
"__meta_dns_mx_record_target": "",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -143,6 +145,7 @@ func TestDNS(t *testing.T) {
|
|||||||
"__meta_dns_srv_record_target": "db1.example.com.",
|
"__meta_dns_srv_record_target": "db1.example.com.",
|
||||||
"__meta_dns_srv_record_port": "3306",
|
"__meta_dns_srv_record_port": "3306",
|
||||||
"__meta_dns_mx_record_target": "",
|
"__meta_dns_mx_record_target": "",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "db2.example.com:3306",
|
"__address__": "db2.example.com:3306",
|
||||||
@ -150,6 +153,7 @@ func TestDNS(t *testing.T) {
|
|||||||
"__meta_dns_srv_record_target": "db2.example.com.",
|
"__meta_dns_srv_record_target": "db2.example.com.",
|
||||||
"__meta_dns_srv_record_port": "3306",
|
"__meta_dns_srv_record_port": "3306",
|
||||||
"__meta_dns_mx_record_target": "",
|
"__meta_dns_mx_record_target": "",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -180,6 +184,7 @@ func TestDNS(t *testing.T) {
|
|||||||
"__meta_dns_srv_record_target": "db1.example.com.",
|
"__meta_dns_srv_record_target": "db1.example.com.",
|
||||||
"__meta_dns_srv_record_port": "3306",
|
"__meta_dns_srv_record_port": "3306",
|
||||||
"__meta_dns_mx_record_target": "",
|
"__meta_dns_mx_record_target": "",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -227,6 +232,7 @@ func TestDNS(t *testing.T) {
|
|||||||
"__meta_dns_srv_record_target": "",
|
"__meta_dns_srv_record_target": "",
|
||||||
"__meta_dns_srv_record_port": "",
|
"__meta_dns_srv_record_port": "",
|
||||||
"__meta_dns_mx_record_target": "smtp1.example.com.",
|
"__meta_dns_mx_record_target": "smtp1.example.com.",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "smtp2.example.com:25",
|
"__address__": "smtp2.example.com:25",
|
||||||
@ -234,6 +240,7 @@ func TestDNS(t *testing.T) {
|
|||||||
"__meta_dns_srv_record_target": "",
|
"__meta_dns_srv_record_target": "",
|
||||||
"__meta_dns_srv_record_port": "",
|
"__meta_dns_srv_record_port": "",
|
||||||
"__meta_dns_mx_record_target": "smtp2.example.com.",
|
"__meta_dns_mx_record_target": "smtp2.example.com.",
|
||||||
|
"__meta_dns_ns_record_target": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -226,8 +226,8 @@ func (d *Discovery) watchFiles() {
|
|||||||
panic("no watcher configured")
|
panic("no watcher configured")
|
||||||
}
|
}
|
||||||
for _, p := range d.paths {
|
for _, p := range d.paths {
|
||||||
if idx := strings.LastIndex(p, "/"); idx > -1 {
|
if dir, _ := filepath.Split(p); dir != "" {
|
||||||
p = p[:idx]
|
p = dir
|
||||||
} else {
|
} else {
|
||||||
p = "./"
|
p = "./"
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/hetznercloud/hcloud-go/hcloud"
|
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
@ -59,7 +59,7 @@ type hcloudDiscovery struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
|
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
|
||||||
func newHcloudDiscovery(conf *SDConfig, logger log.Logger) (*hcloudDiscovery, error) {
|
func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) {
|
||||||
d := &hcloudDiscovery{
|
d := &hcloudDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
}
|
}
|
||||||
@ -91,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
|
|||||||
targets := make([]model.LabelSet, len(servers))
|
targets := make([]model.LabelSet, len(servers))
|
||||||
for i, server := range servers {
|
for i, server := range servers {
|
||||||
labels := model.LabelSet{
|
labels := model.LabelSet{
|
||||||
hetznerLabelRole: model.LabelValue(hetznerRoleHcloud),
|
hetznerLabelRole: model.LabelValue(HetznerRoleHcloud),
|
||||||
hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)),
|
hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)),
|
||||||
hetznerLabelServerName: model.LabelValue(server.Name),
|
hetznerLabelServerName: model.LabelValue(server.Name),
|
||||||
hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
|
hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/hetznercloud/hcloud-go/hcloud"
|
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
@ -57,7 +57,7 @@ type SDConfig struct {
|
|||||||
|
|
||||||
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
RefreshInterval model.Duration `yaml:"refresh_interval"`
|
||||||
Port int `yaml:"port"`
|
Port int `yaml:"port"`
|
||||||
Role role `yaml:"role"`
|
Role Role `yaml:"role"`
|
||||||
hcloudEndpoint string // For tests only.
|
hcloudEndpoint string // For tests only.
|
||||||
robotEndpoint string // For tests only.
|
robotEndpoint string // For tests only.
|
||||||
}
|
}
|
||||||
@ -74,26 +74,26 @@ type refresher interface {
|
|||||||
refresh(context.Context) ([]*targetgroup.Group, error)
|
refresh(context.Context) ([]*targetgroup.Group, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// role is the role of the target within the Hetzner Ecosystem.
|
// Role is the Role of the target within the Hetzner Ecosystem.
|
||||||
type role string
|
type Role string
|
||||||
|
|
||||||
// The valid options for role.
|
// The valid options for role.
|
||||||
const (
|
const (
|
||||||
// Hetzner Robot Role (Dedicated Server)
|
// Hetzner Robot Role (Dedicated Server)
|
||||||
// https://robot.hetzner.com
|
// https://robot.hetzner.com
|
||||||
hetznerRoleRobot role = "robot"
|
HetznerRoleRobot Role = "robot"
|
||||||
// Hetzner Cloud Role
|
// Hetzner Cloud Role
|
||||||
// https://console.hetzner.cloud
|
// https://console.hetzner.cloud
|
||||||
hetznerRoleHcloud role = "hcloud"
|
HetznerRoleHcloud Role = "hcloud"
|
||||||
)
|
)
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (c *role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *Role) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
if err := unmarshal((*string)(c)); err != nil {
|
if err := unmarshal((*string)(c)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch *c {
|
switch *c {
|
||||||
case hetznerRoleRobot, hetznerRoleHcloud:
|
case HetznerRoleRobot, HetznerRoleHcloud:
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown role %q", *c)
|
return fmt.Errorf("unknown role %q", *c)
|
||||||
@ -143,12 +143,12 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error)
|
|||||||
|
|
||||||
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
|
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
|
||||||
switch conf.Role {
|
switch conf.Role {
|
||||||
case hetznerRoleHcloud:
|
case HetznerRoleHcloud:
|
||||||
if conf.hcloudEndpoint == "" {
|
if conf.hcloudEndpoint == "" {
|
||||||
conf.hcloudEndpoint = hcloud.Endpoint
|
conf.hcloudEndpoint = hcloud.Endpoint
|
||||||
}
|
}
|
||||||
return newHcloudDiscovery(conf, l)
|
return newHcloudDiscovery(conf, l)
|
||||||
case hetznerRoleRobot:
|
case HetznerRoleRobot:
|
||||||
if conf.robotEndpoint == "" {
|
if conf.robotEndpoint == "" {
|
||||||
conf.robotEndpoint = "https://robot-ws.your-server.de"
|
conf.robotEndpoint = "https://robot-ws.your-server.de"
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SDMock is the interface for the Hetzner Cloud mock
|
// SDMock is the interface for the Hetzner Cloud mock.
|
||||||
type SDMock struct {
|
type SDMock struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
Server *httptest.Server
|
Server *httptest.Server
|
||||||
@ -34,19 +34,19 @@ func NewSDMock(t *testing.T) *SDMock {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Endpoint returns the URI to the mock server
|
// Endpoint returns the URI to the mock server.
|
||||||
func (m *SDMock) Endpoint() string {
|
func (m *SDMock) Endpoint() string {
|
||||||
return m.Server.URL + "/"
|
return m.Server.URL + "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup creates the mock server
|
// Setup creates the mock server.
|
||||||
func (m *SDMock) Setup() {
|
func (m *SDMock) Setup() {
|
||||||
m.Mux = http.NewServeMux()
|
m.Mux = http.NewServeMux()
|
||||||
m.Server = httptest.NewServer(m.Mux)
|
m.Server = httptest.NewServer(m.Mux)
|
||||||
m.t.Cleanup(m.Server.Close)
|
m.t.Cleanup(m.Server.Close)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShutdownServer creates the mock server
|
// ShutdownServer creates the mock server.
|
||||||
func (m *SDMock) ShutdownServer() {
|
func (m *SDMock) ShutdownServer() {
|
||||||
m.Server.Close()
|
m.Server.Close()
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,7 @@ type robotDiscovery struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
|
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
|
||||||
func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, error) {
|
func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) {
|
||||||
d := &robotDiscovery{
|
d := &robotDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
endpoint: conf.robotEndpoint,
|
endpoint: conf.robotEndpoint,
|
||||||
@ -69,7 +69,7 @@ func newRobotDiscovery(conf *SDConfig, logger log.Logger) (*robotDiscovery, erro
|
|||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
||||||
req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
|
req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -105,7 +105,7 @@ func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, err
|
|||||||
targets := make([]model.LabelSet, len(servers))
|
targets := make([]model.LabelSet, len(servers))
|
||||||
for i, server := range servers {
|
for i, server := range servers {
|
||||||
labels := model.LabelSet{
|
labels := model.LabelSet{
|
||||||
hetznerLabelRole: model.LabelValue(hetznerRoleRobot),
|
hetznerLabelRole: model.LabelValue(HetznerRoleRobot),
|
||||||
hetznerLabelServerID: model.LabelValue(strconv.Itoa(server.Server.ServerNumber)),
|
hetznerLabelServerID: model.LabelValue(strconv.Itoa(server.Server.ServerNumber)),
|
||||||
hetznerLabelServerName: model.LabelValue(server.Server.ServerName),
|
hetznerLabelServerName: model.LabelValue(server.Server.ServerName),
|
||||||
hetznerLabelDatacenter: model.LabelValue(strings.ToLower(server.Server.Dc)),
|
hetznerLabelDatacenter: model.LabelValue(strings.ToLower(server.Server.Dc)),
|
||||||
|
@ -99,7 +99,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||||||
if parsedURL.Host == "" {
|
if parsedURL.Host == "" {
|
||||||
return fmt.Errorf("host is missing in URL")
|
return fmt.Errorf("host is missing in URL")
|
||||||
}
|
}
|
||||||
return nil
|
return c.HTTPClientConfig.Validate()
|
||||||
}
|
}
|
||||||
|
|
||||||
const httpSDURLLabel = model.MetaLabelPrefix + "url"
|
const httpSDURLLabel = model.MetaLabelPrefix + "url"
|
||||||
|
@ -14,10 +14,10 @@
|
|||||||
package ionos
|
package ionos
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ type serverDiscovery struct {
|
|||||||
datacenterID string
|
datacenterID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newServerDiscovery(conf *SDConfig, logger log.Logger) (*serverDiscovery, error) {
|
func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) {
|
||||||
d := &serverDiscovery{
|
d := &serverDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
datacenterID: conf.DatacenterID,
|
datacenterID: conf.DatacenterID,
|
||||||
|
@ -45,7 +45,7 @@ var (
|
|||||||
[]string{"endpoint"},
|
[]string{"endpoint"},
|
||||||
)
|
)
|
||||||
|
|
||||||
// Definition of metrics for client-go workflow metrics provider
|
// Definition of metrics for client-go workflow metrics provider.
|
||||||
clientGoWorkqueueDepthMetricVec = prometheus.NewGaugeVec(
|
clientGoWorkqueueDepthMetricVec = prometheus.NewGaugeVec(
|
||||||
prometheus.GaugeOpts{
|
prometheus.GaugeOpts{
|
||||||
Namespace: workqueueMetricsNamespace,
|
Namespace: workqueueMetricsNamespace,
|
||||||
@ -106,7 +106,7 @@ func (noopMetric) Dec() {}
|
|||||||
func (noopMetric) Observe(float64) {}
|
func (noopMetric) Observe(float64) {}
|
||||||
func (noopMetric) Set(float64) {}
|
func (noopMetric) Set(float64) {}
|
||||||
|
|
||||||
// Definition of client-go metrics adapters for HTTP requests observation
|
// Definition of client-go metrics adapters for HTTP requests observation.
|
||||||
type clientGoRequestMetricAdapter struct{}
|
type clientGoRequestMetricAdapter struct{}
|
||||||
|
|
||||||
func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer) {
|
func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer) {
|
||||||
@ -122,15 +122,15 @@ func (f *clientGoRequestMetricAdapter) Register(registerer prometheus.Registerer
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) {
|
func (clientGoRequestMetricAdapter) Increment(_ context.Context, code, _, _ string) {
|
||||||
clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
|
clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) {
|
func (clientGoRequestMetricAdapter) Observe(_ context.Context, _ string, u url.URL, latency time.Duration) {
|
||||||
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
|
clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Definition of client-go workqueue metrics provider definition
|
// Definition of client-go workqueue metrics provider definition.
|
||||||
type clientGoWorkqueueMetricsProvider struct{}
|
type clientGoWorkqueueMetricsProvider struct{}
|
||||||
|
|
||||||
func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Registerer) {
|
func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Registerer) {
|
||||||
@ -169,7 +169,7 @@ func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetr
|
|||||||
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
|
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
|
func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(string) workqueue.CounterMetric {
|
||||||
// Retries are not used so the metric is omitted.
|
// Retries are not used so the metric is omitted.
|
||||||
return noopMetric{}
|
return noopMetric{}
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -247,9 +246,6 @@ func endpointsSourceFromNamespaceAndName(namespace, name string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
endpointsLabelPrefix = metaLabelPrefix + "endpoints_label_"
|
|
||||||
endpointsLabelPresentPrefix = metaLabelPrefix + "endpoints_labelpresent_"
|
|
||||||
endpointsNameLabel = metaLabelPrefix + "endpoints_name"
|
|
||||||
endpointNodeName = metaLabelPrefix + "endpoint_node_name"
|
endpointNodeName = metaLabelPrefix + "endpoint_node_name"
|
||||||
endpointHostname = metaLabelPrefix + "endpoint_hostname"
|
endpointHostname = metaLabelPrefix + "endpoint_hostname"
|
||||||
endpointReadyLabel = metaLabelPrefix + "endpoint_ready"
|
endpointReadyLabel = metaLabelPrefix + "endpoint_ready"
|
||||||
@ -264,16 +260,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
|||||||
Source: endpointsSource(eps),
|
Source: endpointsSource(eps),
|
||||||
}
|
}
|
||||||
tg.Labels = model.LabelSet{
|
tg.Labels = model.LabelSet{
|
||||||
namespaceLabel: lv(eps.Namespace),
|
namespaceLabel: lv(eps.Namespace),
|
||||||
endpointsNameLabel: lv(eps.Name),
|
|
||||||
}
|
}
|
||||||
e.addServiceLabels(eps.Namespace, eps.Name, tg)
|
e.addServiceLabels(eps.Namespace, eps.Name, tg)
|
||||||
// Add endpoints labels metadata.
|
// Add endpoints labels metadata.
|
||||||
for k, v := range eps.Labels {
|
addObjectMetaLabels(tg.Labels, eps.ObjectMeta, RoleEndpoint)
|
||||||
ln := strutil.SanitizeLabelName(k)
|
|
||||||
tg.Labels[model.LabelName(endpointsLabelPrefix+ln)] = lv(v)
|
|
||||||
tg.Labels[model.LabelName(endpointsLabelPresentPrefix+ln)] = presentValue
|
|
||||||
}
|
|
||||||
|
|
||||||
type podEntry struct {
|
type podEntry struct {
|
||||||
pod *apiv1.Pod
|
pod *apiv1.Pod
|
||||||
@ -304,7 +295,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if e.withNodeMetadata {
|
if e.withNodeMetadata {
|
||||||
target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName)
|
if addr.NodeName != nil {
|
||||||
|
target = addNodeLabels(target, e.nodeInf, e.logger, addr.NodeName)
|
||||||
|
} else if addr.TargetRef != nil && addr.TargetRef.Kind == "Node" {
|
||||||
|
target = addNodeLabels(target, e.nodeInf, e.logger, &addr.TargetRef.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pod := e.resolvePodRef(addr.TargetRef)
|
pod := e.resolvePodRef(addr.TargetRef)
|
||||||
@ -384,18 +379,21 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
// PodIP can be empty when a pod is starting or has been evicted.
|
||||||
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
if len(pe.pod.Status.PodIP) != 0 {
|
||||||
|
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
||||||
|
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
||||||
|
|
||||||
target := model.LabelSet{
|
target := model.LabelSet{
|
||||||
model.AddressLabel: lv(a),
|
model.AddressLabel: lv(a),
|
||||||
podContainerNameLabel: lv(c.Name),
|
podContainerNameLabel: lv(c.Name),
|
||||||
podContainerImageLabel: lv(c.Image),
|
podContainerImageLabel: lv(c.Image),
|
||||||
podContainerPortNameLabel: lv(cport.Name),
|
podContainerPortNameLabel: lv(cport.Name),
|
||||||
podContainerPortNumberLabel: lv(ports),
|
podContainerPortNumberLabel: lv(ports),
|
||||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||||
|
}
|
||||||
|
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
||||||
}
|
}
|
||||||
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -457,13 +455,7 @@ func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.L
|
|||||||
|
|
||||||
node := obj.(*apiv1.Node)
|
node := obj.(*apiv1.Node)
|
||||||
// Allocate one target label for the node name,
|
// Allocate one target label for the node name,
|
||||||
// and two target labels for each node label.
|
nodeLabelset := make(model.LabelSet)
|
||||||
nodeLabelset := make(model.LabelSet, 1+2*len(node.GetLabels()))
|
addObjectMetaLabels(nodeLabelset, node.ObjectMeta, RoleNode)
|
||||||
nodeLabelset[nodeNameLabel] = lv(*nodeName)
|
|
||||||
for k, v := range node.GetLabels() {
|
|
||||||
ln := strutil.SanitizeLabelName(k)
|
|
||||||
nodeLabelset[model.LabelName(nodeLabelPrefix+ln)] = lv(v)
|
|
||||||
nodeLabelset[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue
|
|
||||||
}
|
|
||||||
return tg.Merge(nodeLabelset)
|
return tg.Merge(nodeLabelset)
|
||||||
}
|
}
|
||||||
|
@ -32,6 +32,9 @@ func makeEndpoints() *v1.Endpoints {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "testendpoints",
|
Name: "testendpoints",
|
||||||
Namespace: "default",
|
Namespace: "default",
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"test.annotation": "test",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Subsets: []v1.EndpointSubset{
|
Subsets: []v1.EndpointSubset{
|
||||||
{
|
{
|
||||||
@ -69,6 +72,24 @@ func makeEndpoints() *v1.Endpoints {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Addresses: []v1.EndpointAddress{
|
||||||
|
{
|
||||||
|
IP: "6.7.8.9",
|
||||||
|
TargetRef: &v1.ObjectReference{
|
||||||
|
Kind: "Node",
|
||||||
|
Name: "barbaz",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Ports: []v1.EndpointPort{
|
||||||
|
{
|
||||||
|
Name: "testport",
|
||||||
|
Port: 9002,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -106,10 +127,20 @@ func TestEndpointsDiscoveryBeforeRun(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_namespace": "default",
|
||||||
"__meta_kubernetes_endpoints_name": "testendpoints",
|
"__meta_kubernetes_endpoints_name": "testendpoints",
|
||||||
|
"__meta_kubernetes_endpoints_annotation_test_annotation": "test",
|
||||||
|
"__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true",
|
||||||
},
|
},
|
||||||
Source: "endpoints/default/testendpoints",
|
Source: "endpoints/default/testendpoints",
|
||||||
},
|
},
|
||||||
@ -398,13 +429,23 @@ func TestEndpointsDiscoveryWithService(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_namespace": "default",
|
||||||
"__meta_kubernetes_endpoints_name": "testendpoints",
|
"__meta_kubernetes_endpoints_name": "testendpoints",
|
||||||
"__meta_kubernetes_service_label_app_name": "test",
|
"__meta_kubernetes_service_label_app_name": "test",
|
||||||
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
||||||
"__meta_kubernetes_service_name": "testendpoints",
|
"__meta_kubernetes_service_name": "testendpoints",
|
||||||
|
"__meta_kubernetes_endpoints_annotation_test_annotation": "test",
|
||||||
|
"__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true",
|
||||||
},
|
},
|
||||||
Source: "endpoints/default/testendpoints",
|
Source: "endpoints/default/testendpoints",
|
||||||
},
|
},
|
||||||
@ -466,15 +507,25 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_namespace": "default",
|
||||||
"__meta_kubernetes_endpoints_name": "testendpoints",
|
"__meta_kubernetes_endpoints_name": "testendpoints",
|
||||||
"__meta_kubernetes_service_label_app_name": "svc",
|
"__meta_kubernetes_service_label_app_name": "svc",
|
||||||
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
||||||
"__meta_kubernetes_service_name": "testendpoints",
|
"__meta_kubernetes_service_name": "testendpoints",
|
||||||
"__meta_kubernetes_service_label_component": "testing",
|
"__meta_kubernetes_service_label_component": "testing",
|
||||||
"__meta_kubernetes_service_labelpresent_component": "true",
|
"__meta_kubernetes_service_labelpresent_component": "true",
|
||||||
|
"__meta_kubernetes_endpoints_annotation_test_annotation": "test",
|
||||||
|
"__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true",
|
||||||
},
|
},
|
||||||
Source: "endpoints/default/testendpoints",
|
Source: "endpoints/default/testendpoints",
|
||||||
},
|
},
|
||||||
@ -484,8 +535,10 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
|
|||||||
|
|
||||||
func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
||||||
metadataConfig := AttachMetadataConfig{Node: true}
|
metadataConfig := AttachMetadataConfig{Node: true}
|
||||||
nodeLabels := map[string]string{"az": "us-east1"}
|
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||||
node := makeNode("foobar", "", "", nodeLabels, nil)
|
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||||
|
node1 := makeNode("foobar", "", "", nodeLabels1, nil)
|
||||||
|
node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
|
||||||
svc := &v1.Service{
|
svc := &v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "testendpoints",
|
Name: "testendpoints",
|
||||||
@ -495,7 +548,7 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node)
|
n, _ := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), svc, node1, node2)
|
||||||
|
|
||||||
k8sDiscoveryTest{
|
k8sDiscoveryTest{
|
||||||
discovery: n,
|
discovery: n,
|
||||||
@ -526,13 +579,26 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
"__meta_kubernetes_node_label_az": "us-west2",
|
||||||
|
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||||
|
"__meta_kubernetes_node_name": "barbaz",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_namespace": "default",
|
||||||
"__meta_kubernetes_endpoints_name": "testendpoints",
|
"__meta_kubernetes_endpoints_name": "testendpoints",
|
||||||
"__meta_kubernetes_service_label_app_name": "test",
|
"__meta_kubernetes_service_label_app_name": "test",
|
||||||
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
||||||
"__meta_kubernetes_service_name": "testendpoints",
|
"__meta_kubernetes_service_name": "testendpoints",
|
||||||
|
"__meta_kubernetes_endpoints_annotation_test_annotation": "test",
|
||||||
|
"__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true",
|
||||||
},
|
},
|
||||||
Source: "endpoints/default/testendpoints",
|
Source: "endpoints/default/testendpoints",
|
||||||
},
|
},
|
||||||
@ -541,8 +607,10 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||||
nodeLabels := map[string]string{"az": "us-east1"}
|
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||||
nodes := makeNode("foobar", "", "", nodeLabels, nil)
|
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||||
|
node1 := makeNode("foobar", "", "", nodeLabels1, nil)
|
||||||
|
node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
|
||||||
metadataConfig := AttachMetadataConfig{Node: true}
|
metadataConfig := AttachMetadataConfig{Node: true}
|
||||||
svc := &v1.Service{
|
svc := &v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -553,13 +621,13 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), nodes, svc)
|
n, c := makeDiscoveryWithMetadata(RoleEndpoint, NamespaceDiscovery{}, metadataConfig, makeEndpoints(), node1, node2, svc)
|
||||||
|
|
||||||
k8sDiscoveryTest{
|
k8sDiscoveryTest{
|
||||||
discovery: n,
|
discovery: n,
|
||||||
afterStart: func() {
|
afterStart: func() {
|
||||||
nodes.Labels["az"] = "eu-central1"
|
node1.Labels["az"] = "eu-central1"
|
||||||
c.CoreV1().Nodes().Update(context.Background(), nodes, metav1.UpdateOptions{})
|
c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{})
|
||||||
},
|
},
|
||||||
expectedMaxItems: 2,
|
expectedMaxItems: 2,
|
||||||
expectedRes: map[string]*targetgroup.Group{
|
expectedRes: map[string]*targetgroup.Group{
|
||||||
@ -572,7 +640,7 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpoint_port_name": "testport",
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "true",
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
"__meta_kubernetes_node_label_az": "eu-central1",
|
"__meta_kubernetes_node_label_az": "us-east1",
|
||||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||||
"__meta_kubernetes_node_name": "foobar",
|
"__meta_kubernetes_node_name": "foobar",
|
||||||
},
|
},
|
||||||
@ -588,13 +656,26 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
"__meta_kubernetes_node_label_az": "us-west2",
|
||||||
|
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||||
|
"__meta_kubernetes_node_name": "barbaz",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_namespace": "default",
|
||||||
"__meta_kubernetes_endpoints_name": "testendpoints",
|
"__meta_kubernetes_endpoints_name": "testendpoints",
|
||||||
"__meta_kubernetes_service_label_app_name": "test",
|
"__meta_kubernetes_service_label_app_name": "test",
|
||||||
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
||||||
"__meta_kubernetes_service_name": "testendpoints",
|
"__meta_kubernetes_service_name": "testendpoints",
|
||||||
|
"__meta_kubernetes_endpoints_annotation_test_annotation": "test",
|
||||||
|
"__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true",
|
||||||
},
|
},
|
||||||
Source: "endpoints/default/testendpoints",
|
Source: "endpoints/default/testendpoints",
|
||||||
},
|
},
|
||||||
@ -699,13 +780,23 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "ns1",
|
"__meta_kubernetes_namespace": "ns1",
|
||||||
"__meta_kubernetes_endpoints_name": "testendpoints",
|
"__meta_kubernetes_endpoints_name": "testendpoints",
|
||||||
"__meta_kubernetes_service_label_app": "app1",
|
"__meta_kubernetes_endpoints_annotation_test_annotation": "test",
|
||||||
"__meta_kubernetes_service_labelpresent_app": "true",
|
"__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true",
|
||||||
"__meta_kubernetes_service_name": "testendpoints",
|
"__meta_kubernetes_service_label_app": "app1",
|
||||||
|
"__meta_kubernetes_service_labelpresent_app": "true",
|
||||||
|
"__meta_kubernetes_service_name": "testendpoints",
|
||||||
},
|
},
|
||||||
Source: "endpoints/ns1/testendpoints",
|
Source: "endpoints/ns1/testendpoints",
|
||||||
},
|
},
|
||||||
@ -815,13 +906,66 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpoint_ready": "false",
|
"__meta_kubernetes_endpoint_ready": "false",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "6.7.8.9:9002",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_namespace": "own-ns",
|
"__meta_kubernetes_namespace": "own-ns",
|
||||||
"__meta_kubernetes_endpoints_name": "testendpoints",
|
"__meta_kubernetes_endpoints_name": "testendpoints",
|
||||||
|
"__meta_kubernetes_endpoints_annotation_test_annotation": "test",
|
||||||
|
"__meta_kubernetes_endpoints_annotationpresent_test_annotation": "true",
|
||||||
},
|
},
|
||||||
Source: "endpoints/own-ns/testendpoints",
|
Source: "endpoints/own-ns/testendpoints",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}.Run(t)
|
}.Run(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) {
|
||||||
|
ep := makeEndpoints()
|
||||||
|
ep.Namespace = "ns"
|
||||||
|
|
||||||
|
pod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "testpod",
|
||||||
|
Namespace: "ns",
|
||||||
|
UID: types.UID("deadbeef"),
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
NodeName: "testnode",
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "p1",
|
||||||
|
Image: "p1:latest",
|
||||||
|
Ports: []v1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "mainport",
|
||||||
|
ContainerPort: 9000,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: v1.PodStatus{},
|
||||||
|
}
|
||||||
|
|
||||||
|
objs := []runtime.Object{
|
||||||
|
ep,
|
||||||
|
pod,
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...)
|
||||||
|
|
||||||
|
k8sDiscoveryTest{
|
||||||
|
discovery: n,
|
||||||
|
expectedMaxItems: 0,
|
||||||
|
expectedRes: map[string]*targetgroup.Group{},
|
||||||
|
}.Run(t)
|
||||||
|
}
|
||||||
|
@ -15,6 +15,7 @@ package kubernetes
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -183,7 +184,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group)
|
|||||||
cacheSyncs = append(cacheSyncs, e.nodeInf.HasSynced)
|
cacheSyncs = append(cacheSyncs, e.nodeInf.HasSynced)
|
||||||
}
|
}
|
||||||
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
||||||
if ctx.Err() != context.Canceled {
|
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||||
level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache")
|
level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@ -252,7 +253,6 @@ func endpointSliceSourceFromNamespaceAndName(namespace, name string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
endpointSliceNameLabel = metaLabelPrefix + "endpointslice_name"
|
|
||||||
endpointSliceAddressTypeLabel = metaLabelPrefix + "endpointslice_address_type"
|
endpointSliceAddressTypeLabel = metaLabelPrefix + "endpointslice_address_type"
|
||||||
endpointSlicePortNameLabel = metaLabelPrefix + "endpointslice_port_name"
|
endpointSlicePortNameLabel = metaLabelPrefix + "endpointslice_port_name"
|
||||||
endpointSlicePortProtocolLabel = metaLabelPrefix + "endpointslice_port_protocol"
|
endpointSlicePortProtocolLabel = metaLabelPrefix + "endpointslice_port_protocol"
|
||||||
@ -274,9 +274,11 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
|||||||
}
|
}
|
||||||
tg.Labels = model.LabelSet{
|
tg.Labels = model.LabelSet{
|
||||||
namespaceLabel: lv(eps.namespace()),
|
namespaceLabel: lv(eps.namespace()),
|
||||||
endpointSliceNameLabel: lv(eps.name()),
|
|
||||||
endpointSliceAddressTypeLabel: lv(eps.addressType()),
|
endpointSliceAddressTypeLabel: lv(eps.addressType()),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
addObjectMetaLabels(tg.Labels, eps.getObjectMeta(), RoleEndpointSlice)
|
||||||
|
|
||||||
e.addServiceLabels(eps, tg)
|
e.addServiceLabels(eps, tg)
|
||||||
|
|
||||||
type podEntry struct {
|
type podEntry struct {
|
||||||
@ -300,7 +302,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
|||||||
}
|
}
|
||||||
|
|
||||||
if port.protocol() != nil {
|
if port.protocol() != nil {
|
||||||
target[endpointSlicePortProtocolLabel] = lv(string(*port.protocol()))
|
target[endpointSlicePortProtocolLabel] = lv(*port.protocol())
|
||||||
}
|
}
|
||||||
|
|
||||||
if port.port() != nil {
|
if port.port() != nil {
|
||||||
@ -339,7 +341,11 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
|||||||
}
|
}
|
||||||
|
|
||||||
if e.withNodeMetadata {
|
if e.withNodeMetadata {
|
||||||
target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename())
|
if ep.targetRef() != nil && ep.targetRef().Kind == "Node" {
|
||||||
|
target = addNodeLabels(target, e.nodeInf, e.logger, &ep.targetRef().Name)
|
||||||
|
} else {
|
||||||
|
target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pod := e.resolvePodRef(ep.targetRef())
|
pod := e.resolvePodRef(ep.targetRef())
|
||||||
@ -412,18 +418,21 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
// PodIP can be empty when a pod is starting or has been evicted.
|
||||||
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
if len(pe.pod.Status.PodIP) != 0 {
|
||||||
|
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
||||||
|
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
||||||
|
|
||||||
target := model.LabelSet{
|
target := model.LabelSet{
|
||||||
model.AddressLabel: lv(a),
|
model.AddressLabel: lv(a),
|
||||||
podContainerNameLabel: lv(c.Name),
|
podContainerNameLabel: lv(c.Name),
|
||||||
podContainerImageLabel: lv(c.Image),
|
podContainerImageLabel: lv(c.Image),
|
||||||
podContainerPortNameLabel: lv(cport.Name),
|
podContainerPortNameLabel: lv(cport.Name),
|
||||||
podContainerPortNumberLabel: lv(ports),
|
podContainerPortNumberLabel: lv(ports),
|
||||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||||
|
}
|
||||||
|
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
||||||
}
|
}
|
||||||
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,11 +17,13 @@ import (
|
|||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/discovery/v1"
|
v1 "k8s.io/api/discovery/v1"
|
||||||
"k8s.io/api/discovery/v1beta1"
|
"k8s.io/api/discovery/v1beta1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// endpointSliceAdaptor is an adaptor for the different EndpointSlice versions
|
// endpointSliceAdaptor is an adaptor for the different EndpointSlice versions.
|
||||||
type endpointSliceAdaptor interface {
|
type endpointSliceAdaptor interface {
|
||||||
get() interface{}
|
get() interface{}
|
||||||
|
getObjectMeta() metav1.ObjectMeta
|
||||||
name() string
|
name() string
|
||||||
namespace() string
|
namespace() string
|
||||||
addressType() string
|
addressType() string
|
||||||
@ -53,7 +55,7 @@ type endpointSliceEndpointConditionsAdaptor interface {
|
|||||||
terminating() *bool
|
terminating() *bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adaptor for k8s.io/api/discovery/v1
|
// Adaptor for k8s.io/api/discovery/v1.
|
||||||
type endpointSliceAdaptorV1 struct {
|
type endpointSliceAdaptorV1 struct {
|
||||||
endpointSlice *v1.EndpointSlice
|
endpointSlice *v1.EndpointSlice
|
||||||
}
|
}
|
||||||
@ -66,6 +68,10 @@ func (e *endpointSliceAdaptorV1) get() interface{} {
|
|||||||
return e.endpointSlice
|
return e.endpointSlice
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *endpointSliceAdaptorV1) getObjectMeta() metav1.ObjectMeta {
|
||||||
|
return e.endpointSlice.ObjectMeta
|
||||||
|
}
|
||||||
|
|
||||||
func (e *endpointSliceAdaptorV1) name() string {
|
func (e *endpointSliceAdaptorV1) name() string {
|
||||||
return e.endpointSlice.ObjectMeta.Name
|
return e.endpointSlice.ObjectMeta.Name
|
||||||
}
|
}
|
||||||
@ -102,7 +108,7 @@ func (e *endpointSliceAdaptorV1) labelServiceName() string {
|
|||||||
return v1.LabelServiceName
|
return v1.LabelServiceName
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adaptor for k8s.io/api/discovery/v1beta1
|
// Adaptor for k8s.io/api/discovery/v1beta1.
|
||||||
type endpointSliceAdaptorV1Beta1 struct {
|
type endpointSliceAdaptorV1Beta1 struct {
|
||||||
endpointSlice *v1beta1.EndpointSlice
|
endpointSlice *v1beta1.EndpointSlice
|
||||||
}
|
}
|
||||||
@ -115,6 +121,10 @@ func (e *endpointSliceAdaptorV1Beta1) get() interface{} {
|
|||||||
return e.endpointSlice
|
return e.endpointSlice
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *endpointSliceAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta {
|
||||||
|
return e.endpointSlice.ObjectMeta
|
||||||
|
}
|
||||||
|
|
||||||
func (e *endpointSliceAdaptorV1Beta1) name() string {
|
func (e *endpointSliceAdaptorV1Beta1) name() string {
|
||||||
return e.endpointSlice.Name
|
return e.endpointSlice.Name
|
||||||
}
|
}
|
||||||
|
@ -52,6 +52,9 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
|
|||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
v1.LabelServiceName: "testendpoints",
|
v1.LabelServiceName: "testendpoints",
|
||||||
},
|
},
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"test.annotation": "test",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
AddressType: v1.AddressTypeIPv4,
|
AddressType: v1.AddressTypeIPv4,
|
||||||
Ports: []v1.EndpointPort{
|
Ports: []v1.EndpointPort{
|
||||||
@ -90,6 +93,17 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
|
|||||||
Serving: boolptr(true),
|
Serving: boolptr(true),
|
||||||
Terminating: boolptr(true),
|
Terminating: boolptr(true),
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
Addresses: []string{"4.5.6.7"},
|
||||||
|
Conditions: v1.EndpointConditions{
|
||||||
|
Ready: boolptr(true),
|
||||||
|
Serving: boolptr(true),
|
||||||
|
Terminating: boolptr(false),
|
||||||
|
},
|
||||||
|
TargetRef: &corev1.ObjectReference{
|
||||||
|
Kind: "Node",
|
||||||
|
Name: "barbaz",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -103,6 +117,9 @@ func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice {
|
|||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
v1beta1.LabelServiceName: "testendpoints",
|
v1beta1.LabelServiceName: "testendpoints",
|
||||||
},
|
},
|
||||||
|
Annotations: map[string]string{
|
||||||
|
"test.annotation": "test",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
AddressType: v1beta1.AddressTypeIPv4,
|
AddressType: v1beta1.AddressTypeIPv4,
|
||||||
Ports: []v1beta1.EndpointPort{
|
Ports: []v1beta1.EndpointPort{
|
||||||
@ -130,6 +147,17 @@ func makeEndpointSliceV1beta1() *v1beta1.EndpointSlice {
|
|||||||
Serving: boolptr(true),
|
Serving: boolptr(true),
|
||||||
Terminating: boolptr(true),
|
Terminating: boolptr(true),
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
Addresses: []string{"4.5.6.7"},
|
||||||
|
Conditions: v1beta1.EndpointConditions{
|
||||||
|
Ready: boolptr(true),
|
||||||
|
Serving: boolptr(true),
|
||||||
|
Terminating: boolptr(false),
|
||||||
|
},
|
||||||
|
TargetRef: &corev1.ObjectReference{
|
||||||
|
Kind: "Node",
|
||||||
|
Name: "barbaz",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -183,11 +211,27 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_namespace": "default",
|
||||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||||
|
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||||
|
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||||
|
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||||
},
|
},
|
||||||
Source: "endpointslice/default/testendpoints",
|
Source: "endpointslice/default/testendpoints",
|
||||||
},
|
},
|
||||||
@ -233,11 +277,26 @@ func TestEndpointSliceDiscoveryBeforeRunV1beta1(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_namespace": "default",
|
||||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||||
|
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||||
|
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||||
|
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||||
},
|
},
|
||||||
Source: "endpointslice/default/testendpoints",
|
Source: "endpointslice/default/testendpoints",
|
||||||
},
|
},
|
||||||
@ -419,11 +478,27 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: map[model.LabelName]model.LabelValue{
|
Labels: map[model.LabelName]model.LabelValue{
|
||||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||||
|
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||||
|
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||||
|
"__meta_kubernetes_namespace": "default",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -503,11 +578,27 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||||
|
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||||
|
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||||
|
"__meta_kubernetes_namespace": "default",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -576,11 +667,27 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||||
|
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||||
|
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||||
|
"__meta_kubernetes_namespace": "default",
|
||||||
},
|
},
|
||||||
Source: "endpointslice/default/testendpoints",
|
Source: "endpointslice/default/testendpoints",
|
||||||
},
|
},
|
||||||
@ -644,14 +751,30 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||||
"__meta_kubernetes_service_label_app_name": "test",
|
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||||
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||||
"__meta_kubernetes_service_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||||
|
"__meta_kubernetes_namespace": "default",
|
||||||
|
"__meta_kubernetes_service_label_app_name": "test",
|
||||||
|
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
||||||
|
"__meta_kubernetes_service_name": "testendpoints",
|
||||||
},
|
},
|
||||||
Source: "endpointslice/default/testendpoints",
|
Source: "endpointslice/default/testendpoints",
|
||||||
},
|
},
|
||||||
@ -728,16 +851,32 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||||
"__meta_kubernetes_service_label_app_name": "svc",
|
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||||
"__meta_kubernetes_service_label_component": "testing",
|
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||||
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||||
"__meta_kubernetes_service_labelpresent_component": "true",
|
"__meta_kubernetes_namespace": "default",
|
||||||
"__meta_kubernetes_service_name": "testendpoints",
|
"__meta_kubernetes_service_label_app_name": "svc",
|
||||||
|
"__meta_kubernetes_service_label_component": "testing",
|
||||||
|
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
||||||
|
"__meta_kubernetes_service_labelpresent_component": "true",
|
||||||
|
"__meta_kubernetes_service_name": "testendpoints",
|
||||||
},
|
},
|
||||||
Source: "endpointslice/default/testendpoints",
|
Source: "endpointslice/default/testendpoints",
|
||||||
},
|
},
|
||||||
@ -747,7 +886,8 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
|
|||||||
|
|
||||||
func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
||||||
metadataConfig := AttachMetadataConfig{Node: true}
|
metadataConfig := AttachMetadataConfig{Node: true}
|
||||||
nodeLabels := map[string]string{"az": "us-east1"}
|
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||||
|
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||||
svc := &corev1.Service{
|
svc := &corev1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "testendpoints",
|
Name: "testendpoints",
|
||||||
@ -757,7 +897,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels, nil), svc}
|
objs := []runtime.Object{makeEndpointSliceV1(), makeNode("foobar", "", "", nodeLabels1, nil), makeNode("barbaz", "", "", nodeLabels2, nil), svc}
|
||||||
n, _ := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
|
n, _ := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
|
||||||
|
|
||||||
k8sDiscoveryTest{
|
k8sDiscoveryTest{
|
||||||
@ -804,14 +944,33 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_node_label_az": "us-west2",
|
||||||
|
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||||
|
"__meta_kubernetes_node_name": "barbaz",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||||
"__meta_kubernetes_service_label_app_name": "test",
|
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||||
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||||
"__meta_kubernetes_service_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||||
|
"__meta_kubernetes_namespace": "default",
|
||||||
|
"__meta_kubernetes_service_label_app_name": "test",
|
||||||
|
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
||||||
|
"__meta_kubernetes_service_name": "testendpoints",
|
||||||
},
|
},
|
||||||
Source: "endpointslice/default/testendpoints",
|
Source: "endpointslice/default/testendpoints",
|
||||||
},
|
},
|
||||||
@ -821,7 +980,8 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
|||||||
|
|
||||||
func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||||
metadataConfig := AttachMetadataConfig{Node: true}
|
metadataConfig := AttachMetadataConfig{Node: true}
|
||||||
nodeLabels := map[string]string{"az": "us-east1"}
|
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||||
|
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||||
svc := &corev1.Service{
|
svc := &corev1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "testendpoints",
|
Name: "testendpoints",
|
||||||
@ -831,16 +991,17 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
node := makeNode("foobar", "", "", nodeLabels, nil)
|
node1 := makeNode("foobar", "", "", nodeLabels1, nil)
|
||||||
objs := []runtime.Object{makeEndpointSliceV1(), node, svc}
|
node2 := makeNode("barbaz", "", "", nodeLabels2, nil)
|
||||||
|
objs := []runtime.Object{makeEndpointSliceV1(), node1, node2, svc}
|
||||||
n, c := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
|
n, c := makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, metadataConfig, objs...)
|
||||||
|
|
||||||
k8sDiscoveryTest{
|
k8sDiscoveryTest{
|
||||||
discovery: n,
|
discovery: n,
|
||||||
expectedMaxItems: 2,
|
expectedMaxItems: 2,
|
||||||
afterStart: func() {
|
afterStart: func() {
|
||||||
node.Labels["az"] = "us-central1"
|
node1.Labels["az"] = "us-central1"
|
||||||
c.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{})
|
c.CoreV1().Nodes().Update(context.Background(), node1, metav1.UpdateOptions{})
|
||||||
},
|
},
|
||||||
expectedRes: map[string]*targetgroup.Group{
|
expectedRes: map[string]*targetgroup.Group{
|
||||||
"endpointslice/default/testendpoints": {
|
"endpointslice/default/testendpoints": {
|
||||||
@ -859,7 +1020,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_node_label_az": "us-central1",
|
"__meta_kubernetes_node_label_az": "us-east1",
|
||||||
"__meta_kubernetes_node_labelpresent_az": "true",
|
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||||
"__meta_kubernetes_node_name": "foobar",
|
"__meta_kubernetes_node_name": "foobar",
|
||||||
},
|
},
|
||||||
@ -883,14 +1044,33 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpointslice_port_name": "testport",
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_node_label_az": "us-west2",
|
||||||
|
"__meta_kubernetes_node_labelpresent_az": "true",
|
||||||
|
"__meta_kubernetes_node_name": "barbaz",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||||
"__meta_kubernetes_namespace": "default",
|
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||||
"__meta_kubernetes_service_label_app_name": "test",
|
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||||
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||||
"__meta_kubernetes_service_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||||
|
"__meta_kubernetes_namespace": "default",
|
||||||
|
"__meta_kubernetes_service_label_app_name": "test",
|
||||||
|
"__meta_kubernetes_service_labelpresent_app_name": "true",
|
||||||
|
"__meta_kubernetes_service_name": "testendpoints",
|
||||||
},
|
},
|
||||||
Source: "endpointslice/default/testendpoints",
|
Source: "endpointslice/default/testendpoints",
|
||||||
},
|
},
|
||||||
@ -1007,14 +1187,30 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||||
"__meta_kubernetes_namespace": "ns1",
|
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||||
"__meta_kubernetes_service_label_app": "app1",
|
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||||
"__meta_kubernetes_service_labelpresent_app": "true",
|
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||||
"__meta_kubernetes_service_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||||
|
"__meta_kubernetes_namespace": "ns1",
|
||||||
|
"__meta_kubernetes_service_label_app": "app1",
|
||||||
|
"__meta_kubernetes_service_labelpresent_app": "true",
|
||||||
|
"__meta_kubernetes_service_name": "testendpoints",
|
||||||
},
|
},
|
||||||
Source: "endpointslice/ns1/testendpoints",
|
Source: "endpointslice/ns1/testendpoints",
|
||||||
},
|
},
|
||||||
@ -1139,14 +1335,73 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
|
|||||||
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.5.6.7:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Node",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "barbaz",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_app_protocol": "http",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||||
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
"__meta_kubernetes_endpointslice_name": "testendpoints",
|
||||||
"__meta_kubernetes_namespace": "own-ns",
|
"__meta_kubernetes_namespace": "own-ns",
|
||||||
|
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "testendpoints",
|
||||||
|
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||||
|
"__meta_kubernetes_endpointslice_annotation_test_annotation": "test",
|
||||||
|
"__meta_kubernetes_endpointslice_annotationpresent_test_annotation": "true",
|
||||||
},
|
},
|
||||||
Source: "endpointslice/own-ns/testendpoints",
|
Source: "endpointslice/own-ns/testendpoints",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}.Run(t)
|
}.Run(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) {
|
||||||
|
ep := makeEndpointSliceV1()
|
||||||
|
ep.Namespace = "ns"
|
||||||
|
|
||||||
|
pod := &corev1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "testpod",
|
||||||
|
Namespace: "ns",
|
||||||
|
UID: types.UID("deadbeef"),
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
NodeName: "testnode",
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "p1",
|
||||||
|
Image: "p1:latest",
|
||||||
|
Ports: []corev1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "mainport",
|
||||||
|
ContainerPort: 9000,
|
||||||
|
Protocol: corev1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: corev1.PodStatus{},
|
||||||
|
}
|
||||||
|
|
||||||
|
objs := []runtime.Object{
|
||||||
|
ep,
|
||||||
|
pod,
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...)
|
||||||
|
|
||||||
|
k8sDiscoveryTest{
|
||||||
|
discovery: n,
|
||||||
|
expectedMaxItems: 0,
|
||||||
|
expectedRes: map[string]*targetgroup.Group{},
|
||||||
|
}.Run(t)
|
||||||
|
}
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -143,37 +142,22 @@ func ingressSourceFromNamespaceAndName(namespace, name string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ingressNameLabel = metaLabelPrefix + "ingress_name"
|
ingressSchemeLabel = metaLabelPrefix + "ingress_scheme"
|
||||||
ingressLabelPrefix = metaLabelPrefix + "ingress_label_"
|
ingressHostLabel = metaLabelPrefix + "ingress_host"
|
||||||
ingressLabelPresentPrefix = metaLabelPrefix + "ingress_labelpresent_"
|
ingressPathLabel = metaLabelPrefix + "ingress_path"
|
||||||
ingressAnnotationPrefix = metaLabelPrefix + "ingress_annotation_"
|
ingressClassNameLabel = metaLabelPrefix + "ingress_class_name"
|
||||||
ingressAnnotationPresentPrefix = metaLabelPrefix + "ingress_annotationpresent_"
|
|
||||||
ingressSchemeLabel = metaLabelPrefix + "ingress_scheme"
|
|
||||||
ingressHostLabel = metaLabelPrefix + "ingress_host"
|
|
||||||
ingressPathLabel = metaLabelPrefix + "ingress_path"
|
|
||||||
ingressClassNameLabel = metaLabelPrefix + "ingress_class_name"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func ingressLabels(ingress ingressAdaptor) model.LabelSet {
|
func ingressLabels(ingress ingressAdaptor) model.LabelSet {
|
||||||
// Each label and annotation will create two key-value pairs in the map.
|
// Each label and annotation will create two key-value pairs in the map.
|
||||||
ls := make(model.LabelSet, 2*(len(ingress.labels())+len(ingress.annotations()))+2)
|
ls := make(model.LabelSet)
|
||||||
ls[ingressNameLabel] = lv(ingress.name())
|
|
||||||
ls[namespaceLabel] = lv(ingress.namespace())
|
ls[namespaceLabel] = lv(ingress.namespace())
|
||||||
if cls := ingress.ingressClassName(); cls != nil {
|
if cls := ingress.ingressClassName(); cls != nil {
|
||||||
ls[ingressClassNameLabel] = lv(*cls)
|
ls[ingressClassNameLabel] = lv(*cls)
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range ingress.labels() {
|
addObjectMetaLabels(ls, ingress.getObjectMeta(), RoleIngress)
|
||||||
ln := strutil.SanitizeLabelName(k)
|
|
||||||
ls[model.LabelName(ingressLabelPrefix+ln)] = lv(v)
|
|
||||||
ls[model.LabelName(ingressLabelPresentPrefix+ln)] = presentValue
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range ingress.annotations() {
|
|
||||||
ln := strutil.SanitizeLabelName(k)
|
|
||||||
ls[model.LabelName(ingressAnnotationPrefix+ln)] = lv(v)
|
|
||||||
ls[model.LabelName(ingressAnnotationPresentPrefix+ln)] = presentValue
|
|
||||||
}
|
|
||||||
return ls
|
return ls
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,10 +16,12 @@ package kubernetes
|
|||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/networking/v1"
|
v1 "k8s.io/api/networking/v1"
|
||||||
"k8s.io/api/networking/v1beta1"
|
"k8s.io/api/networking/v1beta1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ingressAdaptor is an adaptor for the different Ingress versions
|
// ingressAdaptor is an adaptor for the different Ingress versions.
|
||||||
type ingressAdaptor interface {
|
type ingressAdaptor interface {
|
||||||
|
getObjectMeta() metav1.ObjectMeta
|
||||||
name() string
|
name() string
|
||||||
namespace() string
|
namespace() string
|
||||||
labels() map[string]string
|
labels() map[string]string
|
||||||
@ -34,7 +36,7 @@ type ingressRuleAdaptor interface {
|
|||||||
host() string
|
host() string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adaptor for networking.k8s.io/v1
|
// Adaptor for networking.k8s.io/v1.
|
||||||
type ingressAdaptorV1 struct {
|
type ingressAdaptorV1 struct {
|
||||||
ingress *v1.Ingress
|
ingress *v1.Ingress
|
||||||
}
|
}
|
||||||
@ -43,11 +45,12 @@ func newIngressAdaptorFromV1(ingress *v1.Ingress) ingressAdaptor {
|
|||||||
return &ingressAdaptorV1{ingress: ingress}
|
return &ingressAdaptorV1{ingress: ingress}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *ingressAdaptorV1) name() string { return i.ingress.Name }
|
func (i *ingressAdaptorV1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta }
|
||||||
func (i *ingressAdaptorV1) namespace() string { return i.ingress.Namespace }
|
func (i *ingressAdaptorV1) name() string { return i.ingress.Name }
|
||||||
func (i *ingressAdaptorV1) labels() map[string]string { return i.ingress.Labels }
|
func (i *ingressAdaptorV1) namespace() string { return i.ingress.Namespace }
|
||||||
func (i *ingressAdaptorV1) annotations() map[string]string { return i.ingress.Annotations }
|
func (i *ingressAdaptorV1) labels() map[string]string { return i.ingress.Labels }
|
||||||
func (i *ingressAdaptorV1) ingressClassName() *string { return i.ingress.Spec.IngressClassName }
|
func (i *ingressAdaptorV1) annotations() map[string]string { return i.ingress.Annotations }
|
||||||
|
func (i *ingressAdaptorV1) ingressClassName() *string { return i.ingress.Spec.IngressClassName }
|
||||||
|
|
||||||
func (i *ingressAdaptorV1) tlsHosts() []string {
|
func (i *ingressAdaptorV1) tlsHosts() []string {
|
||||||
var hosts []string
|
var hosts []string
|
||||||
@ -87,7 +90,7 @@ func (i *ingressRuleAdaptorV1) paths() []string {
|
|||||||
|
|
||||||
func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host }
|
func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host }
|
||||||
|
|
||||||
// Adaptor for networking.k8s.io/v1beta1
|
// Adaptor for networking.k8s.io/v1beta1.
|
||||||
type ingressAdaptorV1Beta1 struct {
|
type ingressAdaptorV1Beta1 struct {
|
||||||
ingress *v1beta1.Ingress
|
ingress *v1beta1.Ingress
|
||||||
}
|
}
|
||||||
@ -95,12 +98,12 @@ type ingressAdaptorV1Beta1 struct {
|
|||||||
func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor {
|
func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor {
|
||||||
return &ingressAdaptorV1Beta1{ingress: ingress}
|
return &ingressAdaptorV1Beta1{ingress: ingress}
|
||||||
}
|
}
|
||||||
|
func (i *ingressAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta }
|
||||||
func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name }
|
func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name }
|
||||||
func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace }
|
func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace }
|
||||||
func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels }
|
func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels }
|
||||||
func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations }
|
func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations }
|
||||||
func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName }
|
func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName }
|
||||||
|
|
||||||
func (i *ingressAdaptorV1Beta1) tlsHosts() []string {
|
func (i *ingressAdaptorV1Beta1) tlsHosts() []string {
|
||||||
var hosts []string
|
var hosts []string
|
||||||
|
@ -23,6 +23,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
|
|
||||||
disv1beta1 "k8s.io/api/discovery/v1beta1"
|
disv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/go-kit/log"
|
||||||
@ -63,9 +65,9 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Http header
|
// Http header.
|
||||||
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||||
// Custom events metric
|
// Custom events metric.
|
||||||
eventCount = prometheus.NewCounterVec(
|
eventCount = prometheus.NewCounterVec(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{
|
||||||
Namespace: metricsNamespace,
|
Namespace: metricsNamespace,
|
||||||
@ -74,7 +76,7 @@ var (
|
|||||||
},
|
},
|
||||||
[]string{"role", "event"},
|
[]string{"role", "event"},
|
||||||
)
|
)
|
||||||
// DefaultSDConfig is the default Kubernetes SD configuration
|
// DefaultSDConfig is the default Kubernetes SD configuration.
|
||||||
DefaultSDConfig = SDConfig{
|
DefaultSDConfig = SDConfig{
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
}
|
}
|
||||||
@ -299,12 +301,13 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
|||||||
err error
|
err error
|
||||||
ownNamespace string
|
ownNamespace string
|
||||||
)
|
)
|
||||||
if conf.KubeConfig != "" {
|
switch {
|
||||||
|
case conf.KubeConfig != "":
|
||||||
kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig)
|
kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else if conf.APIServer.URL == nil {
|
case conf.APIServer.URL == nil:
|
||||||
// Use the Kubernetes provided pod service account
|
// Use the Kubernetes provided pod service account
|
||||||
// as described in https://kubernetes.io/docs/admin/service-accounts-admin/
|
// as described in https://kubernetes.io/docs/admin/service-accounts-admin/
|
||||||
kcfg, err = rest.InClusterConfig()
|
kcfg, err = rest.InClusterConfig()
|
||||||
@ -324,7 +327,7 @@ func New(l log.Logger, conf *SDConfig) (*Discovery, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
|
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
|
||||||
} else {
|
default:
|
||||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
|
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -760,15 +763,21 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
|
|||||||
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
||||||
e, ok := obj.(*apiv1.Endpoints)
|
e, ok := obj.(*apiv1.Endpoints)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("object is not a pod")
|
return nil, fmt.Errorf("object is not endpoints")
|
||||||
}
|
}
|
||||||
var nodes []string
|
var nodes []string
|
||||||
for _, target := range e.Subsets {
|
for _, target := range e.Subsets {
|
||||||
for _, addr := range target.Addresses {
|
for _, addr := range target.Addresses {
|
||||||
if addr.NodeName == nil {
|
if addr.TargetRef != nil {
|
||||||
continue
|
switch addr.TargetRef.Kind {
|
||||||
|
case "Pod":
|
||||||
|
if addr.NodeName != nil {
|
||||||
|
nodes = append(nodes, *addr.NodeName)
|
||||||
|
}
|
||||||
|
case "Node":
|
||||||
|
nodes = append(nodes, addr.TargetRef.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
nodes = append(nodes, *addr.NodeName)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nodes, nil
|
return nodes, nil
|
||||||
@ -788,17 +797,29 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
|
|||||||
switch e := obj.(type) {
|
switch e := obj.(type) {
|
||||||
case *disv1.EndpointSlice:
|
case *disv1.EndpointSlice:
|
||||||
for _, target := range e.Endpoints {
|
for _, target := range e.Endpoints {
|
||||||
if target.NodeName == nil {
|
if target.TargetRef != nil {
|
||||||
continue
|
switch target.TargetRef.Kind {
|
||||||
|
case "Pod":
|
||||||
|
if target.NodeName != nil {
|
||||||
|
nodes = append(nodes, *target.NodeName)
|
||||||
|
}
|
||||||
|
case "Node":
|
||||||
|
nodes = append(nodes, target.TargetRef.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
nodes = append(nodes, *target.NodeName)
|
|
||||||
}
|
}
|
||||||
case *disv1beta1.EndpointSlice:
|
case *disv1beta1.EndpointSlice:
|
||||||
for _, target := range e.Endpoints {
|
for _, target := range e.Endpoints {
|
||||||
if target.NodeName == nil {
|
if target.TargetRef != nil {
|
||||||
continue
|
switch target.TargetRef.Kind {
|
||||||
|
case "Pod":
|
||||||
|
if target.NodeName != nil {
|
||||||
|
nodes = append(nodes, *target.NodeName)
|
||||||
|
}
|
||||||
|
case "Node":
|
||||||
|
nodes = append(nodes, target.TargetRef.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
nodes = append(nodes, *target.NodeName)
|
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("object is not an endpointslice")
|
return nil, fmt.Errorf("object is not an endpointslice")
|
||||||
@ -824,3 +845,19 @@ func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) {
|
|||||||
// https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-25
|
// https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-25
|
||||||
return semVer.Major() >= 1 && semVer.Minor() >= 21, nil
|
return semVer.Major() >= 1 && semVer.Minor() >= 21, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) {
|
||||||
|
labelSet[model.LabelName(metaLabelPrefix+string(role)+"_name")] = lv(objectMeta.Name)
|
||||||
|
|
||||||
|
for k, v := range objectMeta.Labels {
|
||||||
|
ln := strutil.SanitizeLabelName(k)
|
||||||
|
labelSet[model.LabelName(metaLabelPrefix+string(role)+"_label_"+ln)] = lv(v)
|
||||||
|
labelSet[model.LabelName(metaLabelPrefix+string(role)+"_labelpresent_"+ln)] = presentValue
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range objectMeta.Annotations {
|
||||||
|
ln := strutil.SanitizeLabelName(k)
|
||||||
|
labelSet[model.LabelName(metaLabelPrefix+string(role)+"_annotation_"+ln)] = lv(v)
|
||||||
|
labelSet[model.LabelName(metaLabelPrefix+string(role)+"_annotationpresent_"+ln)] = presentValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -152,33 +152,18 @@ func nodeSourceFromName(name string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
nodeNameLabel = metaLabelPrefix + "node_name"
|
nodeProviderIDLabel = metaLabelPrefix + "node_provider_id"
|
||||||
nodeProviderIDLabel = metaLabelPrefix + "node_provider_id"
|
nodeAddressPrefix = metaLabelPrefix + "node_address_"
|
||||||
nodeLabelPrefix = metaLabelPrefix + "node_label_"
|
|
||||||
nodeLabelPresentPrefix = metaLabelPrefix + "node_labelpresent_"
|
|
||||||
nodeAnnotationPrefix = metaLabelPrefix + "node_annotation_"
|
|
||||||
nodeAnnotationPresentPrefix = metaLabelPrefix + "node_annotationpresent_"
|
|
||||||
nodeAddressPrefix = metaLabelPrefix + "node_address_"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func nodeLabels(n *apiv1.Node) model.LabelSet {
|
func nodeLabels(n *apiv1.Node) model.LabelSet {
|
||||||
// Each label and annotation will create two key-value pairs in the map.
|
// Each label and annotation will create two key-value pairs in the map.
|
||||||
ls := make(model.LabelSet, 2*(len(n.Labels)+len(n.Annotations))+1)
|
ls := make(model.LabelSet)
|
||||||
|
|
||||||
ls[nodeNameLabel] = lv(n.Name)
|
|
||||||
ls[nodeProviderIDLabel] = lv(n.Spec.ProviderID)
|
ls[nodeProviderIDLabel] = lv(n.Spec.ProviderID)
|
||||||
|
|
||||||
for k, v := range n.Labels {
|
addObjectMetaLabels(ls, n.ObjectMeta, RoleNode)
|
||||||
ln := strutil.SanitizeLabelName(k)
|
|
||||||
ls[model.LabelName(nodeLabelPrefix+ln)] = lv(v)
|
|
||||||
ls[model.LabelName(nodeLabelPresentPrefix+ln)] = presentValue
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range n.Annotations {
|
|
||||||
ln := strutil.SanitizeLabelName(k)
|
|
||||||
ls[model.LabelName(nodeAnnotationPrefix+ln)] = lv(v)
|
|
||||||
ls[model.LabelName(nodeAnnotationPresentPrefix+ln)] = presentValue
|
|
||||||
}
|
|
||||||
return ls
|
return ls
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,7 +194,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group {
|
|||||||
return tg
|
return tg
|
||||||
}
|
}
|
||||||
|
|
||||||
// nodeAddresses returns the provided node's address, based on the priority:
|
// nodeAddress returns the provided node's address, based on the priority:
|
||||||
// 1. NodeInternalIP
|
// 1. NodeInternalIP
|
||||||
// 2. NodeInternalDNS
|
// 2. NodeInternalDNS
|
||||||
// 3. NodeExternalIP
|
// 3. NodeExternalIP
|
||||||
@ -217,7 +202,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group {
|
|||||||
// 5. NodeLegacyHostIP
|
// 5. NodeLegacyHostIP
|
||||||
// 6. NodeHostName
|
// 6. NodeHostName
|
||||||
//
|
//
|
||||||
// Derived from k8s.io/kubernetes/pkg/util/node/node.go
|
// Derived from k8s.io/kubernetes/pkg/util/node/node.go.
|
||||||
func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, error) {
|
func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, error) {
|
||||||
m := map[apiv1.NodeAddressType][]string{}
|
m := map[apiv1.NodeAddressType][]string{}
|
||||||
for _, a := range node.Status.Addresses {
|
for _, a := range node.Status.Addresses {
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const nodeIndex = "node"
|
const nodeIndex = "node"
|
||||||
@ -180,7 +179,6 @@ func convertToPod(o interface{}) (*apiv1.Pod, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
podNameLabel = metaLabelPrefix + "pod_name"
|
|
||||||
podIPLabel = metaLabelPrefix + "pod_ip"
|
podIPLabel = metaLabelPrefix + "pod_ip"
|
||||||
podContainerNameLabel = metaLabelPrefix + "pod_container_name"
|
podContainerNameLabel = metaLabelPrefix + "pod_container_name"
|
||||||
podContainerIDLabel = metaLabelPrefix + "pod_container_id"
|
podContainerIDLabel = metaLabelPrefix + "pod_container_id"
|
||||||
@ -191,10 +189,6 @@ const (
|
|||||||
podContainerIsInit = metaLabelPrefix + "pod_container_init"
|
podContainerIsInit = metaLabelPrefix + "pod_container_init"
|
||||||
podReadyLabel = metaLabelPrefix + "pod_ready"
|
podReadyLabel = metaLabelPrefix + "pod_ready"
|
||||||
podPhaseLabel = metaLabelPrefix + "pod_phase"
|
podPhaseLabel = metaLabelPrefix + "pod_phase"
|
||||||
podLabelPrefix = metaLabelPrefix + "pod_label_"
|
|
||||||
podLabelPresentPrefix = metaLabelPrefix + "pod_labelpresent_"
|
|
||||||
podAnnotationPrefix = metaLabelPrefix + "pod_annotation_"
|
|
||||||
podAnnotationPresentPrefix = metaLabelPrefix + "pod_annotationpresent_"
|
|
||||||
podNodeNameLabel = metaLabelPrefix + "pod_node_name"
|
podNodeNameLabel = metaLabelPrefix + "pod_node_name"
|
||||||
podHostIPLabel = metaLabelPrefix + "pod_host_ip"
|
podHostIPLabel = metaLabelPrefix + "pod_host_ip"
|
||||||
podUID = metaLabelPrefix + "pod_uid"
|
podUID = metaLabelPrefix + "pod_uid"
|
||||||
@ -215,7 +209,6 @@ func GetControllerOf(controllee metav1.Object) *metav1.OwnerReference {
|
|||||||
|
|
||||||
func podLabels(pod *apiv1.Pod) model.LabelSet {
|
func podLabels(pod *apiv1.Pod) model.LabelSet {
|
||||||
ls := model.LabelSet{
|
ls := model.LabelSet{
|
||||||
podNameLabel: lv(pod.ObjectMeta.Name),
|
|
||||||
podIPLabel: lv(pod.Status.PodIP),
|
podIPLabel: lv(pod.Status.PodIP),
|
||||||
podReadyLabel: podReady(pod),
|
podReadyLabel: podReady(pod),
|
||||||
podPhaseLabel: lv(string(pod.Status.Phase)),
|
podPhaseLabel: lv(string(pod.Status.Phase)),
|
||||||
@ -224,6 +217,8 @@ func podLabels(pod *apiv1.Pod) model.LabelSet {
|
|||||||
podUID: lv(string(pod.ObjectMeta.UID)),
|
podUID: lv(string(pod.ObjectMeta.UID)),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
addObjectMetaLabels(ls, pod.ObjectMeta, RolePod)
|
||||||
|
|
||||||
createdBy := GetControllerOf(pod)
|
createdBy := GetControllerOf(pod)
|
||||||
if createdBy != nil {
|
if createdBy != nil {
|
||||||
if createdBy.Kind != "" {
|
if createdBy.Kind != "" {
|
||||||
@ -234,18 +229,6 @@ func podLabels(pod *apiv1.Pod) model.LabelSet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range pod.Labels {
|
|
||||||
ln := strutil.SanitizeLabelName(k)
|
|
||||||
ls[model.LabelName(podLabelPrefix+ln)] = lv(v)
|
|
||||||
ls[model.LabelName(podLabelPresentPrefix+ln)] = presentValue
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range pod.Annotations {
|
|
||||||
ln := strutil.SanitizeLabelName(k)
|
|
||||||
ls[model.LabelName(podAnnotationPrefix+ln)] = lv(v)
|
|
||||||
ls[model.LabelName(podAnnotationPresentPrefix+ln)] = presentValue
|
|
||||||
}
|
|
||||||
|
|
||||||
return ls
|
return ls
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -147,38 +146,20 @@ func serviceSourceFromNamespaceAndName(namespace, name string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
serviceNameLabel = metaLabelPrefix + "service_name"
|
servicePortNameLabel = metaLabelPrefix + "service_port_name"
|
||||||
serviceLabelPrefix = metaLabelPrefix + "service_label_"
|
servicePortNumberLabel = metaLabelPrefix + "service_port_number"
|
||||||
serviceLabelPresentPrefix = metaLabelPrefix + "service_labelpresent_"
|
servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol"
|
||||||
serviceAnnotationPrefix = metaLabelPrefix + "service_annotation_"
|
serviceClusterIPLabel = metaLabelPrefix + "service_cluster_ip"
|
||||||
serviceAnnotationPresentPrefix = metaLabelPrefix + "service_annotationpresent_"
|
serviceLoadBalancerIP = metaLabelPrefix + "service_loadbalancer_ip"
|
||||||
servicePortNameLabel = metaLabelPrefix + "service_port_name"
|
serviceExternalNameLabel = metaLabelPrefix + "service_external_name"
|
||||||
servicePortNumberLabel = metaLabelPrefix + "service_port_number"
|
serviceType = metaLabelPrefix + "service_type"
|
||||||
servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol"
|
|
||||||
serviceClusterIPLabel = metaLabelPrefix + "service_cluster_ip"
|
|
||||||
serviceLoadBalancerIP = metaLabelPrefix + "service_loadbalancer_ip"
|
|
||||||
serviceExternalNameLabel = metaLabelPrefix + "service_external_name"
|
|
||||||
serviceType = metaLabelPrefix + "service_type"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func serviceLabels(svc *apiv1.Service) model.LabelSet {
|
func serviceLabels(svc *apiv1.Service) model.LabelSet {
|
||||||
// Each label and annotation will create two key-value pairs in the map.
|
ls := make(model.LabelSet)
|
||||||
ls := make(model.LabelSet, 2*(len(svc.Labels)+len(svc.Annotations))+2)
|
|
||||||
|
|
||||||
ls[serviceNameLabel] = lv(svc.Name)
|
|
||||||
ls[namespaceLabel] = lv(svc.Namespace)
|
ls[namespaceLabel] = lv(svc.Namespace)
|
||||||
|
addObjectMetaLabels(ls, svc.ObjectMeta, RoleService)
|
||||||
|
|
||||||
for k, v := range svc.Labels {
|
|
||||||
ln := strutil.SanitizeLabelName(k)
|
|
||||||
ls[model.LabelName(serviceLabelPrefix+ln)] = lv(v)
|
|
||||||
ls[model.LabelName(serviceLabelPresentPrefix+ln)] = presentValue
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range svc.Annotations {
|
|
||||||
ln := strutil.SanitizeLabelName(k)
|
|
||||||
ls[model.LabelName(serviceAnnotationPrefix+ln)] = lv(v)
|
|
||||||
ls[model.LabelName(serviceAnnotationPresentPrefix+ln)] = presentValue
|
|
||||||
}
|
|
||||||
return ls
|
return ls
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,14 +137,12 @@ type Manager struct {
|
|||||||
triggerSend chan struct{}
|
triggerSend chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run starts the background processing
|
// Run starts the background processing.
|
||||||
func (m *Manager) Run() error {
|
func (m *Manager) Run() error {
|
||||||
go m.sender()
|
go m.sender()
|
||||||
for range m.ctx.Done() {
|
<-m.ctx.Done()
|
||||||
m.cancelDiscoverers()
|
m.cancelDiscoverers()
|
||||||
return m.ctx.Err()
|
return m.ctx.Err()
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SyncCh returns a read only channel used by all the clients to receive target updates.
|
// SyncCh returns a read only channel used by all the clients to receive target updates.
|
||||||
|
@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
|||||||
case tgs := <-provUpdates:
|
case tgs := <-provUpdates:
|
||||||
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
|
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
|
||||||
for _, got := range discoveryManager.allGroups() {
|
for _, got := range discoveryManager.allGroups() {
|
||||||
assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string {
|
assertEqualGroups(t, got, tc.expectedTargets[x])
|
||||||
return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v",
|
|
||||||
x,
|
|
||||||
got,
|
|
||||||
expected)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) {
|
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
// Need to sort by the groups's source as the received order is not guaranteed.
|
// Need to sort by the groups's source as the received order is not guaranteed.
|
||||||
@ -1079,9 +1074,7 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
|||||||
if _, ok := tgs[k]; !ok {
|
if _, ok := tgs[k]; !ok {
|
||||||
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
|
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
|
||||||
}
|
}
|
||||||
assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string {
|
assertEqualGroups(t, tgs[k], expected.tgs[k])
|
||||||
return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -254,7 +254,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error {
|
|||||||
oldStr := oldTyp.String()
|
oldStr := oldTyp.String()
|
||||||
newStr := newTyp.String()
|
newStr := newTyp.String()
|
||||||
for i, s := range e.Errors {
|
for i, s := range e.Errors {
|
||||||
e.Errors[i] = strings.Replace(s, oldStr, newStr, -1)
|
e.Errors[i] = strings.ReplaceAll(s, oldStr, newStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -51,6 +51,7 @@ const (
|
|||||||
linodeLabelStatus = linodeLabel + "status"
|
linodeLabelStatus = linodeLabel + "status"
|
||||||
linodeLabelTags = linodeLabel + "tags"
|
linodeLabelTags = linodeLabel + "tags"
|
||||||
linodeLabelGroup = linodeLabel + "group"
|
linodeLabelGroup = linodeLabel + "group"
|
||||||
|
linodeLabelGPUs = linodeLabel + "gpus"
|
||||||
linodeLabelHypervisor = linodeLabel + "hypervisor"
|
linodeLabelHypervisor = linodeLabel + "hypervisor"
|
||||||
linodeLabelBackups = linodeLabel + "backups"
|
linodeLabelBackups = linodeLabel + "backups"
|
||||||
linodeLabelSpecsDiskBytes = linodeLabel + "specs_disk_bytes"
|
linodeLabelSpecsDiskBytes = linodeLabel + "specs_disk_bytes"
|
||||||
@ -249,20 +250,20 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
|
|||||||
if detailedIP.Address != ip.String() {
|
if detailedIP.Address != ip.String() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
switch {
|
||||||
if detailedIP.Public && publicIPv4 == "" {
|
case detailedIP.Public && publicIPv4 == "":
|
||||||
publicIPv4 = detailedIP.Address
|
publicIPv4 = detailedIP.Address
|
||||||
|
|
||||||
if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
|
if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
|
||||||
publicIPv4RDNS = detailedIP.RDNS
|
publicIPv4RDNS = detailedIP.RDNS
|
||||||
}
|
}
|
||||||
} else if !detailedIP.Public && privateIPv4 == "" {
|
case !detailedIP.Public && privateIPv4 == "":
|
||||||
privateIPv4 = detailedIP.Address
|
privateIPv4 = detailedIP.Address
|
||||||
|
|
||||||
if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
|
if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
|
||||||
privateIPv4RDNS = detailedIP.RDNS
|
privateIPv4RDNS = detailedIP.RDNS
|
||||||
}
|
}
|
||||||
} else {
|
default:
|
||||||
extraIPs = append(extraIPs, detailedIP.Address)
|
extraIPs = append(extraIPs, detailedIP.Address)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -302,12 +303,13 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
|
|||||||
linodeLabelType: model.LabelValue(instance.Type),
|
linodeLabelType: model.LabelValue(instance.Type),
|
||||||
linodeLabelStatus: model.LabelValue(instance.Status),
|
linodeLabelStatus: model.LabelValue(instance.Status),
|
||||||
linodeLabelGroup: model.LabelValue(instance.Group),
|
linodeLabelGroup: model.LabelValue(instance.Group),
|
||||||
|
linodeLabelGPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.GPUs)),
|
||||||
linodeLabelHypervisor: model.LabelValue(instance.Hypervisor),
|
linodeLabelHypervisor: model.LabelValue(instance.Hypervisor),
|
||||||
linodeLabelBackups: model.LabelValue(backupsStatus),
|
linodeLabelBackups: model.LabelValue(backupsStatus),
|
||||||
linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Disk<<20)),
|
linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)),
|
||||||
linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Memory<<20)),
|
linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Memory)<<20)),
|
||||||
linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)),
|
linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)),
|
||||||
linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", instance.Specs.Transfer<<20)),
|
linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Transfer)<<20)),
|
||||||
}
|
}
|
||||||
|
|
||||||
addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10))
|
addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10))
|
||||||
|
@ -85,6 +85,7 @@ func TestLinodeSDRefresh(t *testing.T) {
|
|||||||
"__meta_linode_status": model.LabelValue("running"),
|
"__meta_linode_status": model.LabelValue("running"),
|
||||||
"__meta_linode_tags": model.LabelValue(",monitoring,"),
|
"__meta_linode_tags": model.LabelValue(",monitoring,"),
|
||||||
"__meta_linode_group": model.LabelValue(""),
|
"__meta_linode_group": model.LabelValue(""),
|
||||||
|
"__meta_linode_gpus": model.LabelValue("0"),
|
||||||
"__meta_linode_hypervisor": model.LabelValue("kvm"),
|
"__meta_linode_hypervisor": model.LabelValue("kvm"),
|
||||||
"__meta_linode_backups": model.LabelValue("disabled"),
|
"__meta_linode_backups": model.LabelValue("disabled"),
|
||||||
"__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"),
|
"__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"),
|
||||||
@ -109,6 +110,7 @@ func TestLinodeSDRefresh(t *testing.T) {
|
|||||||
"__meta_linode_status": model.LabelValue("running"),
|
"__meta_linode_status": model.LabelValue("running"),
|
||||||
"__meta_linode_tags": model.LabelValue(",monitoring,"),
|
"__meta_linode_tags": model.LabelValue(",monitoring,"),
|
||||||
"__meta_linode_group": model.LabelValue(""),
|
"__meta_linode_group": model.LabelValue(""),
|
||||||
|
"__meta_linode_gpus": model.LabelValue("0"),
|
||||||
"__meta_linode_hypervisor": model.LabelValue("kvm"),
|
"__meta_linode_hypervisor": model.LabelValue("kvm"),
|
||||||
"__meta_linode_backups": model.LabelValue("disabled"),
|
"__meta_linode_backups": model.LabelValue("disabled"),
|
||||||
"__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"),
|
"__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"),
|
||||||
@ -132,6 +134,7 @@ func TestLinodeSDRefresh(t *testing.T) {
|
|||||||
"__meta_linode_status": model.LabelValue("running"),
|
"__meta_linode_status": model.LabelValue("running"),
|
||||||
"__meta_linode_tags": model.LabelValue(",monitoring,"),
|
"__meta_linode_tags": model.LabelValue(",monitoring,"),
|
||||||
"__meta_linode_group": model.LabelValue(""),
|
"__meta_linode_group": model.LabelValue(""),
|
||||||
|
"__meta_linode_gpus": model.LabelValue("0"),
|
||||||
"__meta_linode_hypervisor": model.LabelValue("kvm"),
|
"__meta_linode_hypervisor": model.LabelValue("kvm"),
|
||||||
"__meta_linode_backups": model.LabelValue("disabled"),
|
"__meta_linode_backups": model.LabelValue("disabled"),
|
||||||
"__meta_linode_specs_disk_bytes": model.LabelValue("53687091200"),
|
"__meta_linode_specs_disk_bytes": model.LabelValue("53687091200"),
|
||||||
@ -155,6 +158,7 @@ func TestLinodeSDRefresh(t *testing.T) {
|
|||||||
"__meta_linode_status": model.LabelValue("running"),
|
"__meta_linode_status": model.LabelValue("running"),
|
||||||
"__meta_linode_tags": model.LabelValue(",monitoring,"),
|
"__meta_linode_tags": model.LabelValue(",monitoring,"),
|
||||||
"__meta_linode_group": model.LabelValue(""),
|
"__meta_linode_group": model.LabelValue(""),
|
||||||
|
"__meta_linode_gpus": model.LabelValue("0"),
|
||||||
"__meta_linode_hypervisor": model.LabelValue("kvm"),
|
"__meta_linode_hypervisor": model.LabelValue("kvm"),
|
||||||
"__meta_linode_backups": model.LabelValue("disabled"),
|
"__meta_linode_backups": model.LabelValue("disabled"),
|
||||||
"__meta_linode_specs_disk_bytes": model.LabelValue("26843545600"),
|
"__meta_linode_specs_disk_bytes": model.LabelValue("26843545600"),
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SDMock is the interface for the Linode mock
|
// SDMock is the interface for the Linode mock.
|
||||||
type SDMock struct {
|
type SDMock struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
Server *httptest.Server
|
Server *httptest.Server
|
||||||
@ -34,18 +34,18 @@ func NewSDMock(t *testing.T) *SDMock {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Endpoint returns the URI to the mock server
|
// Endpoint returns the URI to the mock server.
|
||||||
func (m *SDMock) Endpoint() string {
|
func (m *SDMock) Endpoint() string {
|
||||||
return m.Server.URL + "/"
|
return m.Server.URL + "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup creates the mock server
|
// Setup creates the mock server.
|
||||||
func (m *SDMock) Setup() {
|
func (m *SDMock) Setup() {
|
||||||
m.Mux = http.NewServeMux()
|
m.Mux = http.NewServeMux()
|
||||||
m.Server = httptest.NewServer(m.Mux)
|
m.Server = httptest.NewServer(m.Mux)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShutdownServer creates the mock server
|
// ShutdownServer creates the mock server.
|
||||||
func (m *SDMock) ShutdownServer() {
|
func (m *SDMock) ShutdownServer() {
|
||||||
m.Server.Close()
|
m.Server.Close()
|
||||||
}
|
}
|
||||||
|
@ -92,7 +92,7 @@ type Provider struct {
|
|||||||
newSubs map[string]struct{}
|
newSubs map[string]struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Discoverer return the Discoverer of the provider
|
// Discoverer return the Discoverer of the provider.
|
||||||
func (p *Provider) Discoverer() Discoverer {
|
func (p *Provider) Discoverer() Discoverer {
|
||||||
return p.d
|
return p.d
|
||||||
}
|
}
|
||||||
@ -180,11 +180,9 @@ func (m *Manager) Providers() []*Provider {
|
|||||||
// Run starts the background processing.
|
// Run starts the background processing.
|
||||||
func (m *Manager) Run() error {
|
func (m *Manager) Run() error {
|
||||||
go m.sender()
|
go m.sender()
|
||||||
for range m.ctx.Done() {
|
<-m.ctx.Done()
|
||||||
m.cancelDiscoverers()
|
m.cancelDiscoverers()
|
||||||
return m.ctx.Err()
|
return m.ctx.Err()
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SyncCh returns a read only channel used by all the clients to receive target updates.
|
// SyncCh returns a read only channel used by all the clients to receive target updates.
|
||||||
|
@ -686,12 +686,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
|||||||
case tgs := <-provUpdates:
|
case tgs := <-provUpdates:
|
||||||
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
|
discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs)
|
||||||
for _, got := range discoveryManager.allGroups() {
|
for _, got := range discoveryManager.allGroups() {
|
||||||
assertEqualGroups(t, got, tc.expectedTargets[x], func(got, expected string) string {
|
assertEqualGroups(t, got, tc.expectedTargets[x])
|
||||||
return fmt.Sprintf("%d: \ntargets mismatch \ngot: %v \nexpected: %v",
|
|
||||||
x,
|
|
||||||
got,
|
|
||||||
expected)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -699,7 +694,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group, msg func(got, expected string) string) {
|
func assertEqualGroups(t *testing.T, got, expected []*targetgroup.Group) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
// Need to sort by the groups's source as the received order is not guaranteed.
|
// Need to sort by the groups's source as the received order is not guaranteed.
|
||||||
@ -1129,7 +1124,7 @@ type lockStaticConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s lockStaticConfig) Name() string { return "lockstatic" }
|
func (s lockStaticConfig) Name() string { return "lockstatic" }
|
||||||
func (s lockStaticConfig) NewDiscoverer(options DiscovererOptions) (Discoverer, error) {
|
func (s lockStaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
|
||||||
return (lockStaticDiscoverer)(s), nil
|
return (lockStaticDiscoverer)(s), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1330,9 +1325,7 @@ func TestCoordinationWithReceiver(t *testing.T) {
|
|||||||
if _, ok := tgs[k]; !ok {
|
if _, ok := tgs[k]; !ok {
|
||||||
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
|
t.Fatalf("step %d: target group not found: %s\ngot: %#v", i, k, tgs)
|
||||||
}
|
}
|
||||||
assertEqualGroups(t, tgs[k], expected.tgs[k], func(got, expected string) string {
|
assertEqualGroups(t, tgs[k], expected.tgs[k])
|
||||||
return fmt.Sprintf("step %d: targets mismatch \ngot: %q \nexpected: %q", i, got, expected)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1399,7 +1392,7 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) {
|
|||||||
|
|
||||||
// TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when
|
// TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when
|
||||||
// ApplyConfig happens at the same time as targets update.
|
// ApplyConfig happens at the same time as targets update.
|
||||||
func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
|
func TestTargetSetTargetGroupsUpdateDuringApplyConfig(*testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
discoveryManager := NewManager(ctx, log.NewNopLogger())
|
||||||
|
@ -48,7 +48,7 @@ const (
|
|||||||
// imageLabel is the label that is used for the docker image running the service.
|
// imageLabel is the label that is used for the docker image running the service.
|
||||||
imageLabel model.LabelName = metaLabelPrefix + "image"
|
imageLabel model.LabelName = metaLabelPrefix + "image"
|
||||||
// portIndexLabel is the integer port index when multiple ports are defined;
|
// portIndexLabel is the integer port index when multiple ports are defined;
|
||||||
// e.g. PORT1 would have a value of '1'
|
// e.g. PORT1 would have a value of '1'.
|
||||||
portIndexLabel model.LabelName = metaLabelPrefix + "port_index"
|
portIndexLabel model.LabelName = metaLabelPrefix + "port_index"
|
||||||
// taskLabel contains the mesos task name of the app instance.
|
// taskLabel contains the mesos task name of the app instance.
|
||||||
taskLabel model.LabelName = metaLabelPrefix + "task"
|
taskLabel model.LabelName = metaLabelPrefix + "task"
|
||||||
@ -106,14 +106,16 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||||||
if len(c.AuthToken) > 0 && len(c.AuthTokenFile) > 0 {
|
if len(c.AuthToken) > 0 && len(c.AuthTokenFile) > 0 {
|
||||||
return errors.New("marathon_sd: at most one of auth_token & auth_token_file must be configured")
|
return errors.New("marathon_sd: at most one of auth_token & auth_token_file must be configured")
|
||||||
}
|
}
|
||||||
if c.HTTPClientConfig.BasicAuth != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) {
|
|
||||||
return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured")
|
if len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0 {
|
||||||
}
|
switch {
|
||||||
if (len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0) && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) {
|
case c.HTTPClientConfig.BasicAuth != nil:
|
||||||
return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured")
|
return errors.New("marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured")
|
||||||
}
|
case len(c.HTTPClientConfig.BearerToken) > 0 || len(c.HTTPClientConfig.BearerTokenFile) > 0:
|
||||||
if c.HTTPClientConfig.Authorization != nil && (len(c.AuthToken) > 0 || len(c.AuthTokenFile) > 0) {
|
return errors.New("marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured")
|
||||||
return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured")
|
case c.HTTPClientConfig.Authorization != nil:
|
||||||
|
return errors.New("marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return c.HTTPClientConfig.Validate()
|
return c.HTTPClientConfig.Validate()
|
||||||
}
|
}
|
||||||
@ -136,9 +138,10 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(conf.AuthToken) > 0 {
|
switch {
|
||||||
|
case len(conf.AuthToken) > 0:
|
||||||
rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt)
|
rt, err = newAuthTokenRoundTripper(conf.AuthToken, rt)
|
||||||
} else if len(conf.AuthTokenFile) > 0 {
|
case len(conf.AuthTokenFile) > 0:
|
||||||
rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt)
|
rt, err = newAuthTokenFileRoundTripper(conf.AuthTokenFile, rt)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -400,19 +403,20 @@ func targetsForApp(app *app) []model.LabelSet {
|
|||||||
var labels []map[string]string
|
var labels []map[string]string
|
||||||
var prefix string
|
var prefix string
|
||||||
|
|
||||||
if len(app.Container.PortMappings) != 0 {
|
switch {
|
||||||
|
case len(app.Container.PortMappings) != 0:
|
||||||
// In Marathon 1.5.x the "container.docker.portMappings" object was moved
|
// In Marathon 1.5.x the "container.docker.portMappings" object was moved
|
||||||
// to "container.portMappings".
|
// to "container.portMappings".
|
||||||
ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet())
|
ports, labels = extractPortMapping(app.Container.PortMappings, app.isContainerNet())
|
||||||
prefix = portMappingLabelPrefix
|
prefix = portMappingLabelPrefix
|
||||||
|
|
||||||
} else if len(app.Container.Docker.PortMappings) != 0 {
|
case len(app.Container.Docker.PortMappings) != 0:
|
||||||
// Prior to Marathon 1.5 the port mappings could be found at the path
|
// Prior to Marathon 1.5 the port mappings could be found at the path
|
||||||
// "container.docker.portMappings".
|
// "container.docker.portMappings".
|
||||||
ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet())
|
ports, labels = extractPortMapping(app.Container.Docker.PortMappings, app.isContainerNet())
|
||||||
prefix = portMappingLabelPrefix
|
prefix = portMappingLabelPrefix
|
||||||
|
|
||||||
} else if len(app.PortDefinitions) != 0 {
|
case len(app.PortDefinitions) != 0:
|
||||||
// PortDefinitions deprecates the "ports" array and can be used to specify
|
// PortDefinitions deprecates the "ports" array and can be used to specify
|
||||||
// a list of ports with metadata in case a mapping is not required.
|
// a list of ports with metadata in case a mapping is not required.
|
||||||
ports = make([]uint32, len(app.PortDefinitions))
|
ports = make([]uint32, len(app.PortDefinitions))
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SDMock is the interface for the DigitalOcean mock
|
// SDMock is the interface for the DigitalOcean mock.
|
||||||
type SDMock struct {
|
type SDMock struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
Server *httptest.Server
|
Server *httptest.Server
|
||||||
@ -47,12 +47,12 @@ func NewSDMock(t *testing.T, directory string) *SDMock {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Endpoint returns the URI to the mock server
|
// Endpoint returns the URI to the mock server.
|
||||||
func (m *SDMock) Endpoint() string {
|
func (m *SDMock) Endpoint() string {
|
||||||
return m.Server.URL + "/"
|
return m.Server.URL + "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup creates the mock server
|
// Setup creates the mock server.
|
||||||
func (m *SDMock) Setup() {
|
func (m *SDMock) Setup() {
|
||||||
m.Mux = http.NewServeMux()
|
m.Mux = http.NewServeMux()
|
||||||
m.Server = httptest.NewServer(m.Mux)
|
m.Server = httptest.NewServer(m.Mux)
|
||||||
|
@ -161,7 +161,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
|
|||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
func (d *Discovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
||||||
opts := &nomad.QueryOptions{
|
opts := &nomad.QueryOptions{
|
||||||
AllowStale: d.allowStale,
|
AllowStale: d.allowStale,
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ type NomadSDTestSuite struct {
|
|||||||
Mock *SDMock
|
Mock *SDMock
|
||||||
}
|
}
|
||||||
|
|
||||||
// SDMock is the interface for the nomad mock
|
// SDMock is the interface for the nomad mock.
|
||||||
type SDMock struct {
|
type SDMock struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
Server *httptest.Server
|
Server *httptest.Server
|
||||||
|
@ -36,6 +36,7 @@ const (
|
|||||||
openstackLabelAddressPool = openstackLabelPrefix + "address_pool"
|
openstackLabelAddressPool = openstackLabelPrefix + "address_pool"
|
||||||
openstackLabelInstanceFlavor = openstackLabelPrefix + "instance_flavor"
|
openstackLabelInstanceFlavor = openstackLabelPrefix + "instance_flavor"
|
||||||
openstackLabelInstanceID = openstackLabelPrefix + "instance_id"
|
openstackLabelInstanceID = openstackLabelPrefix + "instance_id"
|
||||||
|
openstackLabelInstanceImage = openstackLabelPrefix + "instance_image"
|
||||||
openstackLabelInstanceName = openstackLabelPrefix + "instance_name"
|
openstackLabelInstanceName = openstackLabelPrefix + "instance_name"
|
||||||
openstackLabelInstanceStatus = openstackLabelPrefix + "instance_status"
|
openstackLabelInstanceStatus = openstackLabelPrefix + "instance_status"
|
||||||
openstackLabelPrivateIP = openstackLabelPrefix + "private_ip"
|
openstackLabelPrivateIP = openstackLabelPrefix + "private_ip"
|
||||||
@ -144,12 +145,18 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||||||
openstackLabelUserID: model.LabelValue(s.UserID),
|
openstackLabelUserID: model.LabelValue(s.UserID),
|
||||||
}
|
}
|
||||||
|
|
||||||
id, ok := s.Flavor["id"].(string)
|
flavorID, ok := s.Flavor["id"].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string")
|
level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
labels[openstackLabelInstanceFlavor] = model.LabelValue(id)
|
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID)
|
||||||
|
|
||||||
|
imageID, ok := s.Image["id"].(string)
|
||||||
|
if ok {
|
||||||
|
labels[openstackLabelInstanceImage] = model.LabelValue(imageID)
|
||||||
|
}
|
||||||
|
|
||||||
for k, v := range s.Metadata {
|
for k, v := range s.Metadata {
|
||||||
name := strutil.SanitizeLabelName(k)
|
name := strutil.SanitizeLabelName(k)
|
||||||
labels[openstackLabelTagPrefix+model.LabelName(name)] = model.LabelValue(v)
|
labels[openstackLabelTagPrefix+model.LabelName(name)] = model.LabelValue(v)
|
||||||
|
@ -73,6 +73,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
|||||||
"__address__": model.LabelValue("10.0.0.32:0"),
|
"__address__": model.LabelValue("10.0.0.32:0"),
|
||||||
"__meta_openstack_instance_flavor": model.LabelValue("1"),
|
"__meta_openstack_instance_flavor": model.LabelValue("1"),
|
||||||
"__meta_openstack_instance_id": model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5"),
|
"__meta_openstack_instance_id": model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5"),
|
||||||
|
"__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"),
|
||||||
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
||||||
"__meta_openstack_instance_name": model.LabelValue("herp"),
|
"__meta_openstack_instance_name": model.LabelValue("herp"),
|
||||||
"__meta_openstack_private_ip": model.LabelValue("10.0.0.32"),
|
"__meta_openstack_private_ip": model.LabelValue("10.0.0.32"),
|
||||||
@ -85,6 +86,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
|||||||
"__address__": model.LabelValue("10.0.0.31:0"),
|
"__address__": model.LabelValue("10.0.0.31:0"),
|
||||||
"__meta_openstack_instance_flavor": model.LabelValue("1"),
|
"__meta_openstack_instance_flavor": model.LabelValue("1"),
|
||||||
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"),
|
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"),
|
||||||
|
"__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"),
|
||||||
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
||||||
"__meta_openstack_instance_name": model.LabelValue("derp"),
|
"__meta_openstack_instance_name": model.LabelValue("derp"),
|
||||||
"__meta_openstack_private_ip": model.LabelValue("10.0.0.31"),
|
"__meta_openstack_private_ip": model.LabelValue("10.0.0.31"),
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SDMock is the interface for the OpenStack mock
|
// SDMock is the interface for the OpenStack mock.
|
||||||
type SDMock struct {
|
type SDMock struct {
|
||||||
t *testing.T
|
t *testing.T
|
||||||
Server *httptest.Server
|
Server *httptest.Server
|
||||||
@ -34,12 +34,12 @@ func NewSDMock(t *testing.T) *SDMock {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Endpoint returns the URI to the mock server
|
// Endpoint returns the URI to the mock server.
|
||||||
func (m *SDMock) Endpoint() string {
|
func (m *SDMock) Endpoint() string {
|
||||||
return m.Server.URL + "/"
|
return m.Server.URL + "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup creates the mock server
|
// Setup creates the mock server.
|
||||||
func (m *SDMock) Setup() {
|
func (m *SDMock) Setup() {
|
||||||
m.Mux = http.NewServeMux()
|
m.Mux = http.NewServeMux()
|
||||||
m.Server = httptest.NewServer(m.Mux)
|
m.Server = httptest.NewServer(m.Mux)
|
||||||
@ -60,7 +60,7 @@ func testHeader(t *testing.T, r *http.Request, header, expected string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleVersionsSuccessfully mocks version call
|
// HandleVersionsSuccessfully mocks version call.
|
||||||
func (m *SDMock) HandleVersionsSuccessfully() {
|
func (m *SDMock) HandleVersionsSuccessfully() {
|
||||||
m.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
m.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||||
fmt.Fprintf(w, `
|
fmt.Fprintf(w, `
|
||||||
@ -88,7 +88,7 @@ func (m *SDMock) HandleVersionsSuccessfully() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleAuthSuccessfully mocks auth call
|
// HandleAuthSuccessfully mocks auth call.
|
||||||
func (m *SDMock) HandleAuthSuccessfully() {
|
func (m *SDMock) HandleAuthSuccessfully() {
|
||||||
m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) {
|
m.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.Header().Add("X-Subject-Token", tokenID)
|
w.Header().Add("X-Subject-Token", tokenID)
|
||||||
@ -236,7 +236,7 @@ const hypervisorListBody = `
|
|||||||
]
|
]
|
||||||
}`
|
}`
|
||||||
|
|
||||||
// HandleHypervisorListSuccessfully mocks os-hypervisors detail call
|
// HandleHypervisorListSuccessfully mocks os-hypervisors detail call.
|
||||||
func (m *SDMock) HandleHypervisorListSuccessfully() {
|
func (m *SDMock) HandleHypervisorListSuccessfully() {
|
||||||
m.Mux.HandleFunc("/os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) {
|
m.Mux.HandleFunc("/os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) {
|
||||||
testMethod(m.t, r, "GET")
|
testMethod(m.t, r, "GET")
|
||||||
@ -533,7 +533,7 @@ const serverListBody = `
|
|||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
// HandleServerListSuccessfully mocks server detail call
|
// HandleServerListSuccessfully mocks server detail call.
|
||||||
func (m *SDMock) HandleServerListSuccessfully() {
|
func (m *SDMock) HandleServerListSuccessfully() {
|
||||||
m.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) {
|
m.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) {
|
||||||
testMethod(m.t, r, "GET")
|
testMethod(m.t, r, "GET")
|
||||||
@ -572,7 +572,7 @@ const listOutput = `
|
|||||||
}
|
}
|
||||||
`
|
`
|
||||||
|
|
||||||
// HandleFloatingIPListSuccessfully mocks floating ips call
|
// HandleFloatingIPListSuccessfully mocks floating ips call.
|
||||||
func (m *SDMock) HandleFloatingIPListSuccessfully() {
|
func (m *SDMock) HandleFloatingIPListSuccessfully() {
|
||||||
m.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) {
|
m.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) {
|
||||||
testMethod(m.t, r, "GET")
|
testMethod(m.t, r, "GET")
|
||||||
|
@ -102,7 +102,7 @@ func (d *dedicatedServerDiscovery) getSource() string {
|
|||||||
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
|
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dedicatedServerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
||||||
client, err := createClient(d.config)
|
client, err := createClient(d.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -84,7 +84,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
if string(r.URL.Path) == "/dedicated/server" {
|
if r.URL.Path == "/dedicated/server" {
|
||||||
dedicatedServersList, err := os.ReadFile("testdata/dedicated_server/dedicated_servers.json")
|
dedicatedServersList, err := os.ReadFile("testdata/dedicated_server/dedicated_servers.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
@ -96,7 +96,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if string(r.URL.Path) == "/dedicated/server/abcde" {
|
if r.URL.Path == "/dedicated/server/abcde" {
|
||||||
dedicatedServer, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_details.json")
|
dedicatedServer, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_details.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
@ -108,7 +108,7 @@ func MockDedicatedAPI(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if string(r.URL.Path) == "/dedicated/server/abcde/ips" {
|
if r.URL.Path == "/dedicated/server/abcde/ips" {
|
||||||
dedicatedServerIPs, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_abcde_ips.json")
|
dedicatedServerIPs, err := os.ReadFile("testdata/dedicated_server/dedicated_servers_abcde_ips.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
@ -117,7 +117,7 @@ func (d *vpsDiscovery) getSource() string {
|
|||||||
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
|
return fmt.Sprintf("%s_%s", d.config.Name(), d.getService())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *vpsDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
||||||
client, err := createClient(d.config)
|
client, err := createClient(d.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -91,7 +91,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
if string(r.URL.Path) == "/vps" {
|
if r.URL.Path == "/vps" {
|
||||||
dedicatedServersList, err := os.ReadFile("testdata/vps/vps.json")
|
dedicatedServersList, err := os.ReadFile("testdata/vps/vps.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
@ -103,7 +103,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if string(r.URL.Path) == "/vps/abc" {
|
if r.URL.Path == "/vps/abc" {
|
||||||
dedicatedServer, err := os.ReadFile("testdata/vps/vps_details.json")
|
dedicatedServer, err := os.ReadFile("testdata/vps/vps_details.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
@ -115,7 +115,7 @@ func MockVpsAPI(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if string(r.URL.Path) == "/vps/abc/ips" {
|
if r.URL.Path == "/vps/abc/ips" {
|
||||||
dedicatedServerIPs, err := os.ReadFile("testdata/vps/vps_abc_ips.json")
|
dedicatedServerIPs, err := os.ReadFile("testdata/vps/vps_abc_ips.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user