Compare commits
54 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 80ef886fb4 | |||
| 93f1c24b82 | |||
| 7c2f38641a | |||
| 6376484b13 | |||
| d1334a6dff | |||
| 505e025d18 | |||
| 571e5c2e3c | |||
| 4ceb7eec52 | |||
| 2e507d5042 | |||
| 1bd5b29963 | |||
| 98ec655f33 | |||
| 1d38cf7f0d | |||
| 891ed6c4b7 | |||
| e067b1de98 | |||
| 4cecab3185 | |||
| 867e8bb98f | |||
| 5abc0d0d91 | |||
| 3972046695 | |||
| ffa5e41bec | |||
| 84bd99f1c1 | |||
| de512fb02e | |||
| 3fca7cf952 | |||
| 8f8ed2bbec | |||
| 69e4b4667b | |||
| 0c016e210a | |||
| 769787ea40 | |||
| b4312a220f | |||
| c75a451b13 | |||
| df816d00e4 | |||
| 782c819727 | |||
| 6fc82629d4 | |||
| fc34b18122 | |||
| ac162e203b | |||
| ddeb970b4d | |||
| a06905e2d3 | |||
| 07bf7b2ae1 | |||
| 0649635639 | |||
| 126e6dbcd8 | |||
| f56fec2c10 | |||
| ca5cf5fe7c | |||
| 06fb3fef43 | |||
| 414524e799 | |||
| acf0da9b80 | |||
| 6f7c525213 | |||
| 684156fdf1 | |||
| 4cda8669a5 | |||
| 14c45b6db2 | |||
| eeddc8cd18 | |||
| 99e4583cd1 | |||
| cbd6b53182 | |||
| 259c7807cb | |||
| d6fa822e89 | |||
| a89377337b | |||
| 3b99370aac |
@@ -1 +1,6 @@
|
||||
* text=auto eol=lf
|
||||
*.gen.ts linguist-generated
|
||||
*_gen.ts linguist-generated
|
||||
*_gen.go linguist-generated
|
||||
**/openapi_snapshots/*.json linguist-generated
|
||||
apps/**/pkg/apis/*_manifest.go linguist-generated
|
||||
|
||||
+6
-15
@@ -151,7 +151,7 @@
|
||||
/pkg/promlib @grafana/oss-big-tent
|
||||
/pkg/storage/ @grafana/grafana-search-and-storage
|
||||
/pkg/storage/secret/ @grafana/grafana-operator-experience-squad
|
||||
/pkg/services/annotations/ @grafana/grafana-search-and-storage
|
||||
/pkg/services/annotations/ @grafana/grafana-backend-services-squad
|
||||
/pkg/services/apikey/ @grafana/identity-squad
|
||||
/pkg/services/cleanup/ @grafana/grafana-backend-group
|
||||
/pkg/services/contexthandler/ @grafana/grafana-backend-group @grafana/grafana-app-platform-squad
|
||||
@@ -181,7 +181,7 @@
|
||||
/pkg/services/search/ @grafana/grafana-search-and-storage
|
||||
/pkg/services/searchusers/ @grafana/grafana-search-and-storage
|
||||
/pkg/services/secrets/ @grafana/grafana-operator-experience-squad
|
||||
/pkg/services/shorturls/ @grafana/grafana-backend-group
|
||||
/pkg/services/shorturls/ @grafana/sharing-squad
|
||||
/pkg/services/sqlstore/ @grafana/grafana-search-and-storage
|
||||
/pkg/services/ssosettings/ @grafana/identity-squad
|
||||
/pkg/services/star/ @grafana/grafana-search-and-storage
|
||||
@@ -199,6 +199,7 @@
|
||||
/pkg/tests/apis/features @grafana/grafana-backend-services-squad
|
||||
/pkg/tests/apis/folder @grafana/grafana-search-and-storage
|
||||
/pkg/tests/apis/iam @grafana/identity-access-team
|
||||
/pkg/tests/apis/shorturl @grafana/sharing-squad
|
||||
/pkg/tests/api/correlations/ @grafana/datapro
|
||||
/pkg/tsdb/grafanads/ @grafana/grafana-backend-group
|
||||
/pkg/tsdb/opentsdb/ @grafana/partner-datasources
|
||||
@@ -241,6 +242,7 @@
|
||||
/devenv/dev-dashboards/panel-library @grafana/dataviz-squad
|
||||
/devenv/dev-dashboards/panel-piechart @grafana/dataviz-squad
|
||||
/devenv/dev-dashboards/panel-stat @grafana/dataviz-squad
|
||||
/devenv/dev-dashboards/panel-status-history @grafana/dataviz-squad
|
||||
/devenv/dev-dashboards/panel-table @grafana/dataviz-squad
|
||||
/devenv/dev-dashboards/panel-timeline @grafana/dataviz-squad
|
||||
/devenv/dev-dashboards/panel-timeseries @grafana/dataviz-squad
|
||||
@@ -472,24 +474,12 @@ i18next.config.ts @grafana/grafana-frontend-platform
|
||||
/e2e-playwright/fixtures/long-trace-response.json @grafana/observability-traces-and-profiling
|
||||
/e2e-playwright/fixtures/tempo-response.json @grafana/oss-big-tent
|
||||
/e2e-playwright/fixtures/prometheus-response.json @grafana/datapro
|
||||
/e2e-playwright/panels-suite/canvas-scene.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/panels-suite/ @grafana/dataviz-squad
|
||||
/e2e-playwright/panels-suite/dashlist.spec.ts @grafana/grafana-search-navigate-organise
|
||||
/e2e-playwright/panels-suite/datagrid-data-change.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/panels-suite/datagrid-editing-features.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/panels-suite/frontend-sandbox-panel.spec.ts @grafana/plugins-platform-frontend
|
||||
/e2e-playwright/panels-suite/geomap-layer-types.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/panels-suite/geomap-map-controls.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/panels-suite/geomap-spatial-operations-transform.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/panels-suite/heatmap.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/panels-suite/panelEdit_base.spec.ts @grafana/dashboards-squad
|
||||
/e2e-playwright/panels-suite/panelEdit_queries.spec.ts @grafana/dashboards-squad
|
||||
/e2e-playwright/panels-suite/panelEdit_transforms.spec.ts @grafana/datapro
|
||||
/e2e-playwright/panels-suite/state-timeline.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/panels-suite/table-footer.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/panels-suite/table-kitchenSink.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/panels-suite/table-markdown.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/panels-suite/table-sparkline.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/panels-suite/table-utils.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/plugin-e2e/ @grafana/oss-big-tent @grafana/partner-datasources
|
||||
/e2e-playwright/plugin-e2e/plugin-e2e-api-tests/ @grafana/plugins-platform-frontend
|
||||
/e2e-playwright/smoke-tests-suite/ @grafana/grafana-frontend-platform
|
||||
@@ -1176,6 +1166,7 @@ embed.go @grafana/grafana-as-code
|
||||
/pkg/registry/ @grafana/grafana-as-code
|
||||
/pkg/registry/apis/ @grafana/grafana-app-platform-squad
|
||||
/pkg/registry/apis/folders @grafana/grafana-search-and-storage
|
||||
/pkg/registry/apis/datasource @grafana/grafana-datasources-core-services
|
||||
/pkg/registry/apis/query @grafana/grafana-datasources-core-services
|
||||
/pkg/registry/apis/secret @grafana/grafana-operator-experience-squad
|
||||
/pkg/registry/apis/userstorage @grafana/grafana-app-platform-squad @grafana/plugins-platform-backend
|
||||
|
||||
@@ -31,6 +31,9 @@ outputs:
|
||||
dockerfile:
|
||||
description: Whether the dockerfile or self have changed in any way
|
||||
value: ${{ steps.changed-files.outputs.dockerfile_any_changed || 'true' }}
|
||||
devenv:
|
||||
description: Whether the devenv or self have changed in any way
|
||||
value: ${{ steps.changed-files.outputs.devenv_any_changed || 'true' }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
@@ -136,6 +139,9 @@ runs:
|
||||
- '.vale.ini'
|
||||
- '.github/actions/change-detection/**'
|
||||
- '${{ inputs.self }}'
|
||||
devenv:
|
||||
- 'devenv/**'
|
||||
- '${{ inputs.self }}'
|
||||
- name: Print all change groups
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -157,3 +163,5 @@ runs:
|
||||
echo " --> ${{ steps.changed-files.outputs.docs_all_changed_files }}"
|
||||
echo "Dockerfile: ${{ steps.changed-files.outputs.dockerfile_any_changed || 'true' }}"
|
||||
echo " --> ${{ steps.changed-files.outputs.dockerfile_all_changed_files }}"
|
||||
echo "devenv: ${{ steps.changed-files.outputs.devenv_any_changed || 'true' }}"
|
||||
echo " --> ${{ steps.changed-files.outputs.devenv_all_changed_files }}"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
extends: ["config:recommended"],
|
||||
enabledManagers: ["npm"],
|
||||
enabledManagers: ["npm", "docker-compose"],
|
||||
ignorePresets: [
|
||||
"github>grafana/grafana-renovate-config//presets/labels",
|
||||
],
|
||||
@@ -26,7 +26,7 @@
|
||||
"@types/slate-react", // we don't want to continue using this on the long run, use Monaco editor instead of Slate
|
||||
"@types/slate", // we don't want to continue using this on the long run, use Monaco editor instead of Slate
|
||||
],
|
||||
includePaths: ["package.json", "packages/**", "public/app/plugins/**"],
|
||||
includePaths: ["package.json", "packages/**", "public/app/plugins/**", "devenv/frontend-service/docker-compose.yaml"],
|
||||
ignorePaths: ["emails/**", "**/mocks/**"],
|
||||
labels: ["area/frontend", "dependencies", "no-changelog"],
|
||||
postUpdateOptions: ["yarnDedupeHighest"],
|
||||
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
}' "$GITHUB_EVENT_PATH" > /tmp/pr_info.json
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: pr_info
|
||||
path: /tmp/pr_info.json
|
||||
|
||||
@@ -193,7 +193,7 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
- name: store build artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: build-artifacts
|
||||
path: ${{ steps.get_dir.outputs.dir }}/ci/packages/*.zip
|
||||
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
run: zip -r ./pr_built_packages.zip ./packages/**/*.tgz
|
||||
|
||||
- name: Upload build output as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: buildPr
|
||||
path: './pr/pr_built_packages.zip'
|
||||
@@ -116,7 +116,7 @@ jobs:
|
||||
run: zip -r ./base_built_packages.zip ./packages/**/*.tgz
|
||||
|
||||
- name: Upload build output as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: buildBase
|
||||
path: './base/base_built_packages.zip'
|
||||
@@ -189,7 +189,7 @@ jobs:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Upload check output as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: levitate
|
||||
path: levitate/
|
||||
|
||||
@@ -94,14 +94,14 @@ jobs:
|
||||
id: artifact
|
||||
|
||||
- name: Upload grafana.tar.gz
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
retention-days: 1
|
||||
name: grafana-tar-gz
|
||||
path: build-dir/grafana.tar.gz
|
||||
|
||||
- name: Upload grafana docker tarball
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
retention-days: 1
|
||||
name: grafana-docker-tar-gz
|
||||
@@ -133,7 +133,7 @@ jobs:
|
||||
# We want a static binary, so we need to set CGO_ENABLED=0
|
||||
CGO_ENABLED=0 go build -o ./e2e-runner ./e2e/
|
||||
echo "artifact=e2e-runner-${{github.run_number}}" >> "$GITHUB_OUTPUT"
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v5
|
||||
id: upload
|
||||
with:
|
||||
retention-days: 1
|
||||
@@ -245,7 +245,7 @@ jobs:
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "suite=$(echo "$SUITE" | sed 's/\//-/g')" >> "$GITHUB_OUTPUT"
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v5
|
||||
if: success() || failure()
|
||||
with:
|
||||
name: ${{ steps.set-suite-name.outputs.suite }}-${{ github.run_number }}
|
||||
@@ -307,7 +307,7 @@ jobs:
|
||||
version: 0.18.8
|
||||
verb: run
|
||||
args: go run ./pkg/build/e2e-playwright --package=grafana.tar.gz --shard=${{ matrix.shard }}/${{ matrix.shardTotal }} --blob-dir=./blob-report
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v5
|
||||
if: success() || failure()
|
||||
with:
|
||||
name: playwright-blob-${{ github.run_number }}-${{ matrix.shard }}
|
||||
@@ -439,7 +439,7 @@ jobs:
|
||||
|
||||
- name: Upload HTML report
|
||||
id: upload-html
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: playwright-html-${{ github.run_number }}
|
||||
path: playwright-report
|
||||
@@ -498,7 +498,7 @@ jobs:
|
||||
args: go run ./pkg/build/a11y --package=grafana.tar.gz --no-threshold-fail --results=./pa11y-ci-results.json
|
||||
- name: Upload pa11y results
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
retention-days: 1
|
||||
name: pa11y-ci-results
|
||||
|
||||
@@ -18,6 +18,7 @@ jobs:
|
||||
contents: read
|
||||
outputs:
|
||||
changed: ${{ steps.detect-changes.outputs.frontend }}
|
||||
devenv-changed: ${{ steps.detect-changes.outputs.devenv }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
@@ -169,3 +170,26 @@ jobs:
|
||||
needs: ${{ toJson(needs) }}
|
||||
failure-message: "One or more unit test jobs have failed"
|
||||
success-message: "All unit tests completed successfully"
|
||||
|
||||
devenv:
|
||||
needs:
|
||||
- detect-changes
|
||||
if: needs.detect-changes.outputs.devenv-changed == 'true'
|
||||
runs-on: ubuntu-x64-large
|
||||
name: "Devenv frontend-service build"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Setup Docker
|
||||
uses: docker/setup-docker-action@efe9e3891a4f7307e689f2100b33a155b900a608 # v4
|
||||
- name: Setup Node.js
|
||||
uses: ./.github/actions/setup-node
|
||||
- name: Install Tilt
|
||||
run: curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | bash
|
||||
- name: Create empty config files # TODO: the tiltfile should conditionally mount these only if they exist, like the enterprise license
|
||||
run: |
|
||||
touch devenv/frontend-service/configs/grafana-api.local.ini
|
||||
touch devenv/frontend-service/configs/frontend-service.local.ini
|
||||
- name: Test frontend-service Tiltfile
|
||||
run: tilt ci --file devenv/frontend-service/Tiltfile
|
||||
|
||||
@@ -34,6 +34,6 @@ jobs:
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: docker/setup-docker-action@3fb92d6d9c634363128c8cce4bc3b2826526370a # v4
|
||||
- uses: docker/setup-docker-action@efe9e3891a4f7307e689f2100b33a155b900a608 # v4
|
||||
- name: Build Dockerfile
|
||||
run: make build-docker-full
|
||||
|
||||
@@ -78,6 +78,7 @@ jobs:
|
||||
# We don't need more than this since it has to wait for the other tests.
|
||||
shard: [
|
||||
1/4, 2/4, 3/4, 4/4,
|
||||
profiled,
|
||||
]
|
||||
fail-fast: false
|
||||
|
||||
@@ -96,13 +97,68 @@ jobs:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- name: Run tests
|
||||
if: matrix.shard != 'profiled'
|
||||
env:
|
||||
SHARD: ${{ matrix.shard }}
|
||||
CGO_ENABLED: 0
|
||||
SKIP_PACKAGES: |-
|
||||
pkg/tests/apis/folder
|
||||
pkg/tests/apis/dashboard
|
||||
run: |
|
||||
set -euo pipefail
|
||||
readarray -t PACKAGES <<< "$(./scripts/ci/backend-tests/pkgs-with-tests-named.sh -b TestIntegration | ./scripts/ci/backend-tests/shard.sh -N"$SHARD" -d-)"
|
||||
# ionice since tests are IO intensive
|
||||
CGO_ENABLED=0 ionice -c2 -n7 go test -p=4 -tags=sqlite -timeout=8m -run '^TestIntegration' "${PACKAGES[@]}"
|
||||
# Build regex pattern like: pkg1$|pkg2$|pkg3$
|
||||
SKIP_PATTERN=$(echo "$SKIP_PACKAGES" | sed '/^$/d' | sed 's|.*|&$|' | paste -sd '|' -)
|
||||
readarray -t PACKAGES <<< "$(./scripts/ci/backend-tests/pkgs-with-tests-named.sh -b TestIntegration | ./scripts/ci/backend-tests/shard.sh -N "$SHARD" -d - | grep -Ev "($SKIP_PATTERN)")"
|
||||
go test -tags=sqlite -timeout=8m -run '^TestIntegration' "${PACKAGES[@]}"
|
||||
- name: Run profiled tests
|
||||
id: run-profiled-tests
|
||||
if: matrix.shard == 'profiled'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
PROFILED_PACKAGES: |-
|
||||
pkg/tests/apis/folder
|
||||
pkg/tests/apis/dashboard
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# Build regex pattern line: pkg1$|pkg2$|pkg3$
|
||||
PROFILE_PATTERN=$(echo "$PROFILED_PACKAGES" | sed '/^$/d' | sed 's|.*|&$|' | paste -sd '|' -)
|
||||
readarray -t PACKAGES <<< "$(./scripts/ci/backend-tests/pkgs-with-tests-named.sh -b TestIntegration | grep -E "($PROFILE_PATTERN)")"
|
||||
if [ ${#PACKAGES[@]} -eq 0 ]; then
|
||||
echo "⚠️ No profiled packages found"
|
||||
exit 0
|
||||
fi
|
||||
mkdir -p profiles
|
||||
EXIT_CODE=0
|
||||
# Run each profiled package sequentially
|
||||
for full_pkg in "${PACKAGES[@]}"; do
|
||||
# Build valid file name
|
||||
pkg_name=$(basename "$full_pkg" | tr '/' '_' | tr '.' '_')
|
||||
echo "📦 Running $full_pkg"
|
||||
set +e
|
||||
go test -tags=sqlite -timeout=8m -run '^TestIntegration' \
|
||||
-outputdir=profiles \
|
||||
-cpuprofile="cpu_${pkg_name}.prof" \
|
||||
-memprofile="mem_${pkg_name}.prof" \
|
||||
-trace="trace_${pkg_name}.out" \
|
||||
"$full_pkg" 2>&1 | tee "profiles/test_${pkg_name}.log"
|
||||
TEST_EXIT=$?
|
||||
set -e
|
||||
if [ $TEST_EXIT -ne 0 ]; then
|
||||
echo "❌ $full_pkg failed with exit code $TEST_EXIT"
|
||||
EXIT_CODE=1
|
||||
else
|
||||
echo "✅ $full_pkg passed"
|
||||
fi
|
||||
done
|
||||
exit $EXIT_CODE
|
||||
- name: Output test profiles and traces
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v4
|
||||
if: (matrix.shard == 'profiled' && !cancelled())
|
||||
with:
|
||||
name: integration-test-profiles-sqlite-nocgo-${{ github.run_number }}
|
||||
path: profiles/
|
||||
retention-days: 7
|
||||
if-no-files-found: ignore
|
||||
mysql:
|
||||
needs: detect-changes
|
||||
if: needs.detect-changes.outputs.changed == 'true'
|
||||
|
||||
@@ -187,12 +187,12 @@ jobs:
|
||||
output: artifacts-${{ matrix.name }}.txt
|
||||
verify: ${{ matrix.verify }}
|
||||
build-id: ${{ github.run_id }}
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: artifacts-list-${{ matrix.name }}
|
||||
path: ${{ steps.build.outputs.file }}
|
||||
retention-days: 1
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: artifacts-${{ matrix.name }}
|
||||
path: ${{ steps.build.outputs.dist-dir }}
|
||||
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
id-token: write
|
||||
needs: detect-changes
|
||||
if: needs.detect-changes.outputs.changed == 'true'
|
||||
name: "Run Storybook a11y tests"
|
||||
name: "Run Storybook a11y tests (light theme)"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
id-token: write
|
||||
needs: detect-changes
|
||||
if: needs.detect-changes.outputs.changed == 'true'
|
||||
name: "Run Storybook a11y tests"
|
||||
name: "Run Storybook a11y tests (dark theme)"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
include ../sdk.mk
|
||||
|
||||
.PHONY: etcd
|
||||
etcd:
|
||||
@docker run -d --name etcd --env ALLOW_NONE_AUTHENTICATION=yes -p 22379:2379 bitnamilegacy/etcd:latest
|
||||
|
||||
.PHONY: generate # Run Grafana App SDK code generation
|
||||
generate: install-app-sdk update-app-sdk
|
||||
@$(APP_SDK_BIN) generate \
|
||||
@@ -7,3 +11,24 @@ generate: install-app-sdk update-app-sdk
|
||||
--gogenpath=./pkg/apis \
|
||||
--grouping=group \
|
||||
--defencoding=none
|
||||
|
||||
.PHONY: run
|
||||
run:
|
||||
@go run ./pkg/standalone/server.go --etcd-servers=http://127.0.0.1:22379 --secure-port 7445
|
||||
|
||||
.PHONY: create-checks
|
||||
create-checks:
|
||||
@echo "Creating plugin check..."
|
||||
@curl -k -X POST https://localhost:7445/apis/advisor.grafana.app/v0alpha1/namespaces/stacks-1/checks \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"kind":"Check","apiVersion":"advisor.grafana.app/v0alpha1","spec":{"data":{}},"metadata":{"generateName":"check-","labels":{"advisor.grafana.app/type":"plugin"},"namespace":"stacks-1"},"status":{"report":{"count":0,"failures":[]}}}' \
|
||||
&& echo "Plugin check created successfully"
|
||||
@echo "Creating datasource check..."
|
||||
@curl -k -X POST https://localhost:7445/apis/advisor.grafana.app/v0alpha1/namespaces/stacks-1/checks \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"kind":"Check","apiVersion":"advisor.grafana.app/v0alpha1","spec":{"data":{}},"metadata":{"generateName":"check-","labels":{"advisor.grafana.app/type":"datasource"},"namespace":"stacks-1"},"status":{"report":{"count":0,"failures":[]}}}' \
|
||||
&& echo "Datasource check created successfully"
|
||||
|
||||
delete-checks:
|
||||
@curl -k -X DELETE https://localhost:7445/apis/advisor.grafana.app/v0alpha1/namespaces/stacks-1/checks \
|
||||
&& echo "All checks deleted successfully"
|
||||
|
||||
@@ -152,3 +152,28 @@ Check [`security_config_step.go`](./pkg/app/checks/configchecks/security_config_
|
||||
## Testing
|
||||
|
||||
Create tests for your check and its steps to ensure they work as expected. Test both successful and failure scenarios.
|
||||
|
||||
## Running the Standalone Mode
|
||||
|
||||
To run the standalone mode, you can use the `make run` command. This will start the advisor app in standalone mode, which means it will not be running in a Kubernetes cluster.
|
||||
|
||||
```bash
|
||||
make etcd # Start etcd in a docker container
|
||||
make run # Start the advisor app in standalone mode
|
||||
```
|
||||
|
||||
This will start the advisor app on port 7445. You can then access the advisor app at `http://localhost:7445`.
|
||||
|
||||
To see some sample checks, you can run the following command:
|
||||
|
||||
```bash
|
||||
make create-checks
|
||||
```
|
||||
|
||||
Then you can see list in the URL: `http://localhost:7445/apis/advisor.grafana.app/v0alpha1/namespaces/stacks-1/checks`
|
||||
|
||||
To delete all checks, you can run the following command:
|
||||
|
||||
```bash
|
||||
make delete-checks
|
||||
```
|
||||
|
||||
+20
-2
@@ -15,6 +15,8 @@ require (
|
||||
github.com/stretchr/testify v1.11.1
|
||||
k8s.io/apimachinery v0.34.1
|
||||
k8s.io/apiserver v0.34.1
|
||||
k8s.io/client-go v0.34.1
|
||||
k8s.io/component-base v0.34.1
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912
|
||||
)
|
||||
|
||||
@@ -43,6 +45,7 @@ replace github.com/grafana/grafana/apps/plugins => ../plugins
|
||||
replace github.com/prometheus/alertmanager => github.com/grafana/prometheus-alertmanager v0.25.1-0.20250911094103-5456b6e45604
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
@@ -55,6 +58,7 @@ require (
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.6 // indirect
|
||||
github.com/VividCortex/mysqlerr v0.0.0-20170204212430-6c6b55f8796f // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
@@ -85,6 +89,7 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cheekybits/genny v1.0.0 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
@@ -101,6 +106,7 @@ require (
|
||||
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/gchaincl/sqlhooks v1.3.0 // indirect
|
||||
github.com/getkin/kin-openapi v0.133.0 // indirect
|
||||
@@ -143,6 +149,7 @@ require (
|
||||
github.com/golang-migrate/migrate/v4 v4.7.0 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.26.1 // indirect
|
||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
@@ -162,6 +169,7 @@ require (
|
||||
github.com/grafana/sqlds/v4 v4.2.7 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20191002090509-6af20e3a5340 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-hclog v1.6.3 // indirect
|
||||
@@ -176,6 +184,7 @@ require (
|
||||
github.com/hashicorp/memberlist v0.5.2 // indirect
|
||||
github.com/hashicorp/yamux v0.1.2 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jaegertracing/jaeger-idl v0.5.0 // indirect
|
||||
github.com/jessevdk/go-flags v1.6.1 // indirect
|
||||
github.com/jmespath-community/go-jmespath v1.1.1 // indirect
|
||||
@@ -250,7 +259,9 @@ require (
|
||||
github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/cobra v1.10.1 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.1 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/tetratelabs/wazero v1.8.2 // indirect
|
||||
github.com/thomaspoignant/go-feature-flag v1.42.0 // indirect
|
||||
@@ -262,6 +273,9 @@ require (
|
||||
github.com/woodsbury/decimal128 v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/zeebo/xxh3 v1.0.2 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.6.4 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.4 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.6.4 // indirect
|
||||
go.mongodb.org/mongo-driver v1.17.4 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect
|
||||
@@ -280,6 +294,8 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/mock v0.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.43.0 // indirect
|
||||
@@ -302,24 +318,26 @@ require (
|
||||
google.golang.org/grpc v1.76.0 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/mail.v2 v2.3.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/src-d/go-errors.v1 v1.0.0 // indirect
|
||||
gopkg.in/telebot.v3 v3.3.8 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/api v0.34.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.34.1 // indirect
|
||||
k8s.io/client-go v0.34.1 // indirect
|
||||
k8s.io/component-base v0.34.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kms v0.34.1 // indirect
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
|
||||
modernc.org/libc v1.66.10 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.39.1 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
|
||||
@@ -335,6 +335,7 @@ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
@@ -463,6 +464,8 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/analysis v0.24.0 h1:vE/VFFkICKyYuTWYnplQ+aVr45vlG6NcZKC7BdIXhsA=
|
||||
github.com/go-openapi/analysis v0.24.0/go.mod h1:GLyoJA+bvmGGaHgpfeDh8ldpGo69fAJg7eeMDMRCIrw=
|
||||
github.com/go-openapi/errors v0.22.3 h1:k6Hxa5Jg1TUyZnOwV2Lh81j8ayNw5VVYLvKrp4zFKFs=
|
||||
@@ -668,6 +671,8 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
|
||||
github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grafana/alerting v0.0.0-20251009192429-9427c24835ae h1:NLPwY3tIP0lg0g9wTRiMcypm6VRXW6W+MOLBsq8JSVA=
|
||||
github.com/grafana/alerting v0.0.0-20251009192429-9427c24835ae/go.mod h1:VGjS5gDwWEADPP6pF/drqLxEImgeuHlEW5u8E5EfIrM=
|
||||
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
|
||||
@@ -811,6 +816,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
|
||||
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
|
||||
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
|
||||
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||
@@ -1048,6 +1055,7 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg
|
||||
github.com/pressly/goose/v3 v3.25.0 h1:6WeYhMWGRCzpyd89SpODFnCBCKz41KrVbRT58nVjGng=
|
||||
github.com/pressly/goose/v3 v3.25.0/go.mod h1:4hC1KrritdCxtuFsqgs1R4AU5bWtTAf+cnWvfhf2DNY=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
@@ -1065,6 +1073,7 @@ github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
@@ -1079,6 +1088,7 @@ github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57J
|
||||
github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
|
||||
github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
@@ -1135,6 +1145,8 @@ github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PX
|
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w=
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw=
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
@@ -1148,6 +1160,7 @@ github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw=
|
||||
@@ -1173,6 +1186,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
@@ -1190,6 +1204,8 @@ github.com/thomaspoignant/go-feature-flag v1.42.0/go.mod h1:y0QiWH7chHWhGATb/+Xq
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tjhop/slog-gokit v0.1.3 h1:6SdexP3UIeg93KLFeiM1Wp1caRwdTLgsD/THxBUy1+o=
|
||||
github.com/tjhop/slog-gokit v0.1.3/go.mod h1:Bbu5v2748qpAWH7k6gse/kw3076IJf6owJmh7yArmJs=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
|
||||
github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
@@ -1217,6 +1233,8 @@ github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcY
|
||||
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@@ -1241,6 +1259,12 @@ go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+
|
||||
go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY=
|
||||
go.etcd.io/etcd/client/v3 v3.6.4 h1:YOMrCfMhRzY8NgtzUsHl8hC2EBSnuqbR3dh84Uryl7A=
|
||||
go.etcd.io/etcd/client/v3 v3.6.4/go.mod h1:jaNNHCyg2FdALyKWnd7hxZXZxZANb0+KGY+YQaEMISo=
|
||||
go.etcd.io/etcd/pkg/v3 v3.6.4 h1:fy8bmXIec1Q35/jRZ0KOes8vuFxbvdN0aAFqmEfJZWA=
|
||||
go.etcd.io/etcd/pkg/v3 v3.6.4/go.mod h1:kKcYWP8gHuBRcteyv6MXWSN0+bVMnfgqiHueIZnKMtE=
|
||||
go.etcd.io/etcd/server/v3 v3.6.4 h1:LsCA7CzjVt+8WGrdsnh6RhC0XqCsLkBly3ve5rTxMAU=
|
||||
go.etcd.io/etcd/server/v3 v3.6.4/go.mod h1:aYCL/h43yiONOv0QIR82kH/2xZ7m+IWYjzRmyQfnCAg=
|
||||
go.etcd.io/raft/v3 v3.6.0 h1:5NtvbDVYpnfZWcIHgGRk9DyzkBIXOi8j+DDp1IcnUWQ=
|
||||
go.etcd.io/raft/v3 v3.6.0/go.mod h1:nLvLevg6+xrVtHUmVaTcTz603gQPHfh7kUAwV6YpfGo=
|
||||
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
|
||||
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
@@ -1393,6 +1417,7 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1805,6 +1830,7 @@ google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090/go.
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 h1:CirRxTOwnRWVLKzDNrs0CXAaVozJoR4G9xvdRecrdpk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
package mockchecks
|
||||
|
||||
import (
|
||||
"github.com/grafana/grafana/apps/advisor/pkg/app/checkregistry/mockchecks/mocksvcs"
|
||||
"github.com/grafana/grafana/apps/advisor/pkg/app/checks"
|
||||
"github.com/grafana/grafana/apps/advisor/pkg/app/checks/datasourcecheck"
|
||||
"github.com/grafana/grafana/apps/advisor/pkg/app/checks/plugincheck"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/plugins/repo"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginchecker"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
|
||||
)
|
||||
|
||||
// mockchecks.CheckRegistry is a mock implementation of the checkregistry.CheckService interface
|
||||
// TODO: Add mocked checks here
|
||||
type CheckRegistry struct {
|
||||
datasourceSvc datasources.DataSourceService
|
||||
pluginStore pluginstore.Store
|
||||
pluginClient plugins.Client
|
||||
pluginRepo repo.Service
|
||||
GrafanaVersion string
|
||||
pluginContextProvider datasourcecheck.PluginContextProvider
|
||||
updateChecker pluginchecker.PluginUpdateChecker
|
||||
pluginErrorResolver plugins.ErrorResolver
|
||||
}
|
||||
|
||||
func (m *CheckRegistry) Checks() []checks.Check {
|
||||
return []checks.Check{
|
||||
datasourcecheck.New(
|
||||
m.datasourceSvc,
|
||||
m.pluginStore,
|
||||
m.pluginContextProvider,
|
||||
m.pluginClient,
|
||||
m.pluginRepo,
|
||||
m.GrafanaVersion,
|
||||
),
|
||||
plugincheck.New(
|
||||
m.pluginStore,
|
||||
m.pluginRepo,
|
||||
m.updateChecker,
|
||||
m.pluginErrorResolver,
|
||||
m.GrafanaVersion,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func New() *CheckRegistry {
|
||||
return &CheckRegistry{
|
||||
datasourceSvc: &mocksvcs.DatasourceSvc{},
|
||||
pluginStore: &mocksvcs.PluginStore{},
|
||||
pluginClient: &mocksvcs.PluginClient{},
|
||||
pluginRepo: &mocksvcs.PluginRepo{},
|
||||
pluginContextProvider: &mocksvcs.PluginContextProvider{},
|
||||
updateChecker: &mocksvcs.UpdateChecker{},
|
||||
pluginErrorResolver: &mocksvcs.PluginErrorResolver{},
|
||||
GrafanaVersion: "1.0.0",
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
package mocksvcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
)
|
||||
|
||||
var dss = map[string]*datasources.DataSource{
|
||||
"prometheus-uid": {
|
||||
ID: 1,
|
||||
UID: "prometheus-uid",
|
||||
Name: "Prometheus",
|
||||
Type: "prometheus",
|
||||
},
|
||||
"mysql-uid": {
|
||||
ID: 2,
|
||||
UID: "mysql-uid",
|
||||
Name: "MySQL",
|
||||
Type: "mysql",
|
||||
},
|
||||
"unknown-uid": {
|
||||
ID: 3,
|
||||
UID: "unknown-uid",
|
||||
Name: "Unknown",
|
||||
Type: "unknown",
|
||||
},
|
||||
}
|
||||
|
||||
type DatasourceSvc struct {
|
||||
datasources.DataSourceService
|
||||
}
|
||||
|
||||
func (m *DatasourceSvc) GetDataSources(ctx context.Context, query *datasources.GetDataSourcesQuery) ([]*datasources.DataSource, error) {
|
||||
sources := make([]*datasources.DataSource, 0, len(dss))
|
||||
for _, ds := range dss {
|
||||
sources = append(sources, ds)
|
||||
}
|
||||
return sources, nil
|
||||
}
|
||||
|
||||
func (m *DatasourceSvc) GetDataSource(ctx context.Context, query *datasources.GetDataSourceQuery) (*datasources.DataSource, error) {
|
||||
return dss[query.UID], nil
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
package mocksvcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
)
|
||||
|
||||
type PluginClient struct {
|
||||
plugins.Client
|
||||
}
|
||||
|
||||
func (m *PluginClient) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {
|
||||
return &backend.CheckHealthResult{
|
||||
Status: backend.HealthStatusOk,
|
||||
Message: "Plugin is healthy",
|
||||
}, nil
|
||||
}
|
||||
@@ -0,0 +1,53 @@
|
||||
package mocksvcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/identity"
|
||||
"github.com/grafana/grafana/pkg/services/datasources"
|
||||
)
|
||||
|
||||
type PluginContextProvider struct {
|
||||
}
|
||||
|
||||
// ACTUALLY USED by datasourcecheck
|
||||
func (m *PluginContextProvider) GetWithDataSource(ctx context.Context, pluginID string, user identity.Requester, ds *datasources.DataSource) (backend.PluginContext, error) {
|
||||
// Create a plugin context with sample data based on the datasource
|
||||
pluginContext := backend.PluginContext{
|
||||
PluginID: pluginID,
|
||||
PluginVersion: "1.0.0",
|
||||
OrgID: 1,
|
||||
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{
|
||||
ID: ds.ID,
|
||||
UID: ds.UID,
|
||||
Name: ds.Name,
|
||||
URL: ds.URL,
|
||||
JSONData: []byte(`{
|
||||
"httpMethod": "GET",
|
||||
"timeout": "30s",
|
||||
"keepCookies": []
|
||||
}`),
|
||||
DecryptedSecureJSONData: map[string]string{
|
||||
"password": "sample-password",
|
||||
"apiKey": "sample-api-key",
|
||||
},
|
||||
},
|
||||
GrafanaConfig: backend.NewGrafanaCfg(map[string]string{
|
||||
"app_url": "http://localhost:3000",
|
||||
"default_timezone": "UTC",
|
||||
}),
|
||||
}
|
||||
|
||||
// Add user context if provided
|
||||
if user != nil && !user.IsNil() {
|
||||
pluginContext.User = &backend.User{
|
||||
Login: user.GetLogin(),
|
||||
Name: user.GetName(),
|
||||
Email: user.GetEmail(),
|
||||
Role: string(user.GetOrgRole()),
|
||||
}
|
||||
}
|
||||
|
||||
return pluginContext, nil
|
||||
}
|
||||
@@ -0,0 +1,19 @@
|
||||
package mocksvcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
)
|
||||
|
||||
type PluginErrorResolver struct {
|
||||
}
|
||||
|
||||
// Assume no plugin with errors
|
||||
func (m *PluginErrorResolver) PluginErrors(ctx context.Context) []*plugins.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PluginErrorResolver) PluginError(ctx context.Context, pluginID string) *plugins.Error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,26 @@
|
||||
package mocksvcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana/pkg/plugins/repo"
|
||||
)
|
||||
|
||||
type PluginRepo struct {
|
||||
repo.Service
|
||||
}
|
||||
|
||||
func (m *PluginRepo) GetPluginsInfo(ctx context.Context, options repo.GetPluginsInfoOptions, compatOpts repo.CompatOpts) ([]repo.PluginInfo, error) {
|
||||
return []repo.PluginInfo{
|
||||
{
|
||||
ID: 1,
|
||||
Slug: "grafana-piechart-panel",
|
||||
Version: "1.6.0",
|
||||
},
|
||||
{
|
||||
ID: 2,
|
||||
Slug: "prometheus",
|
||||
Version: "10.0.0",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -0,0 +1,114 @@
|
||||
package mocksvcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
|
||||
)
|
||||
|
||||
type PluginStore struct {
|
||||
}
|
||||
|
||||
var ps = map[string]pluginstore.Plugin{
|
||||
"prometheus": {
|
||||
JSONData: plugins.JSONData{
|
||||
ID: "prometheus",
|
||||
Type: plugins.TypeDataSource,
|
||||
Name: "Prometheus",
|
||||
Info: plugins.Info{
|
||||
Author: plugins.InfoLink{
|
||||
Name: "Grafana Labs",
|
||||
},
|
||||
Version: "10.0.0",
|
||||
},
|
||||
Category: "Time series databases",
|
||||
State: plugins.ReleaseStateAlpha,
|
||||
Backend: true,
|
||||
Metrics: true,
|
||||
Logs: true,
|
||||
Alerting: true,
|
||||
Explore: true,
|
||||
},
|
||||
Class: plugins.ClassCore,
|
||||
Signature: plugins.SignatureStatusInternal,
|
||||
SignatureType: plugins.SignatureTypeGrafana,
|
||||
SignatureOrg: "grafana.com",
|
||||
},
|
||||
"test-datasource": {
|
||||
JSONData: plugins.JSONData{
|
||||
ID: "grafana-piechart-panel",
|
||||
Type: plugins.TypePanel,
|
||||
Name: "Pie Chart",
|
||||
Info: plugins.Info{
|
||||
Author: plugins.InfoLink{
|
||||
Name: "Grafana Labs",
|
||||
},
|
||||
Version: "1.6.0",
|
||||
},
|
||||
Category: "Visualization",
|
||||
State: plugins.ReleaseStateAlpha,
|
||||
},
|
||||
Class: plugins.ClassCore,
|
||||
Signature: plugins.SignatureStatusInternal,
|
||||
SignatureType: plugins.SignatureTypeGrafana,
|
||||
SignatureOrg: "grafana.com",
|
||||
},
|
||||
"grafana-piechart-panel": {
|
||||
JSONData: plugins.JSONData{
|
||||
ID: "prometheus",
|
||||
Type: plugins.TypeDataSource,
|
||||
Name: "Prometheus",
|
||||
Info: plugins.Info{
|
||||
Author: plugins.InfoLink{
|
||||
Name: "Grafana Labs",
|
||||
},
|
||||
Version: "10.0.0",
|
||||
},
|
||||
Category: "Time series databases",
|
||||
State: plugins.ReleaseStateAlpha,
|
||||
Backend: true,
|
||||
Metrics: true,
|
||||
Logs: true,
|
||||
Alerting: true,
|
||||
Explore: true,
|
||||
},
|
||||
Class: plugins.ClassCore,
|
||||
Signature: plugins.SignatureStatusInternal,
|
||||
SignatureType: plugins.SignatureTypeGrafana,
|
||||
SignatureOrg: "grafana.com",
|
||||
},
|
||||
"test-app": {
|
||||
JSONData: plugins.JSONData{
|
||||
ID: "test-app",
|
||||
Type: plugins.TypeApp,
|
||||
Name: "Test App",
|
||||
Info: plugins.Info{
|
||||
Author: plugins.InfoLink{
|
||||
Name: "Test Author",
|
||||
},
|
||||
Version: "2.0.0",
|
||||
},
|
||||
Category: "Application",
|
||||
State: plugins.ReleaseStateAlpha,
|
||||
AutoEnabled: true,
|
||||
},
|
||||
Class: plugins.ClassExternal,
|
||||
Signature: plugins.SignatureStatusValid,
|
||||
SignatureType: plugins.SignatureTypeCommercial,
|
||||
SignatureOrg: "test.com",
|
||||
},
|
||||
}
|
||||
|
||||
func (s *PluginStore) Plugin(ctx context.Context, pluginID string) (pluginstore.Plugin, bool) {
|
||||
p, ok := ps[pluginID]
|
||||
return p, ok
|
||||
}
|
||||
|
||||
func (s *PluginStore) Plugins(ctx context.Context, pluginTypes ...plugins.Type) []pluginstore.Plugin {
|
||||
plugins := make([]pluginstore.Plugin, 0, len(ps))
|
||||
for _, p := range ps {
|
||||
plugins = append(plugins, p)
|
||||
}
|
||||
return plugins
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
package mocksvcs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
|
||||
)
|
||||
|
||||
type UpdateChecker struct {
|
||||
}
|
||||
|
||||
func (m *UpdateChecker) IsUpdatable(ctx context.Context, plugin pluginstore.Plugin) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *UpdateChecker) CanUpdate(pluginId string, currentVersion string, targetVersion string, onlyMinor bool) bool {
|
||||
return true
|
||||
}
|
||||
@@ -26,7 +26,7 @@ const (
|
||||
type check struct {
|
||||
DatasourceSvc datasources.DataSourceService
|
||||
PluginStore pluginstore.Store
|
||||
PluginContextProvider pluginContextProvider
|
||||
PluginContextProvider PluginContextProvider
|
||||
PluginClient plugins.Client
|
||||
PluginRepo repo.Service
|
||||
GrafanaVersion string
|
||||
@@ -37,7 +37,7 @@ type check struct {
|
||||
func New(
|
||||
datasourceSvc datasources.DataSourceService,
|
||||
pluginStore pluginstore.Store,
|
||||
pluginContextProvider pluginContextProvider,
|
||||
pluginContextProvider PluginContextProvider,
|
||||
pluginClient plugins.Client,
|
||||
pluginRepo repo.Service,
|
||||
grafanaVersion string,
|
||||
@@ -168,6 +168,6 @@ func (c *check) canBeInstalled(ctx context.Context, pluginType string) (bool, er
|
||||
return isAvailableInRepo, nil
|
||||
}
|
||||
|
||||
type pluginContextProvider interface {
|
||||
type PluginContextProvider interface {
|
||||
GetWithDataSource(ctx context.Context, pluginID string, user identity.Requester, ds *datasources.DataSource) (backend.PluginContext, error)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
type healthCheckStep struct {
|
||||
PluginContextProvider pluginContextProvider
|
||||
PluginContextProvider PluginContextProvider
|
||||
PluginClient plugins.Client
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,58 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/component-base/cli"
|
||||
|
||||
"github.com/grafana/grafana-app-sdk/app"
|
||||
"github.com/grafana/grafana-app-sdk/k8s/apiserver"
|
||||
"github.com/grafana/grafana-app-sdk/k8s/apiserver/cmd/server"
|
||||
"github.com/grafana/grafana-app-sdk/logging"
|
||||
"github.com/grafana/grafana-app-sdk/simple"
|
||||
"github.com/grafana/grafana/apps/advisor/pkg/apis"
|
||||
advisorapp "github.com/grafana/grafana/apps/advisor/pkg/app"
|
||||
"github.com/grafana/grafana/apps/advisor/pkg/app/checkregistry"
|
||||
"github.com/grafana/grafana/apps/advisor/pkg/app/checkregistry/mockchecks"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logging.DefaultLogger = logging.NewSLogLogger(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelDebug,
|
||||
}))
|
||||
provider := simple.NewAppProvider(apis.LocalManifest(), nil, advisorapp.New)
|
||||
config := app.Config{
|
||||
KubeConfig: rest.Config{}, // this will be replaced by the apiserver loopback config
|
||||
ManifestData: *apis.LocalManifest().ManifestData,
|
||||
SpecificConfig: checkregistry.AdvisorAppConfig{
|
||||
CheckRegistry: mockchecks.New(),
|
||||
PluginConfig: map[string]string{},
|
||||
StackID: "1", // Numeric stack ID for standalone mode
|
||||
OrgService: nil, // Not needed when StackID is set
|
||||
},
|
||||
}
|
||||
installer, err := apiserver.NewDefaultAppInstaller(provider, config, &apis.GoTypeAssociator{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ctx := genericapiserver.SetupSignalContext()
|
||||
opts := apiserver.NewOptions([]apiserver.AppInstaller{installer})
|
||||
opts.RecommendedOptions.Authentication = nil
|
||||
opts.RecommendedOptions.Authorization = nil
|
||||
opts.RecommendedOptions.CoreAPI = nil
|
||||
opts.RecommendedOptions.EgressSelector = nil
|
||||
opts.RecommendedOptions.Admission.Plugins = admission.NewPlugins()
|
||||
opts.RecommendedOptions.Admission.RecommendedPluginOrder = []string{}
|
||||
opts.RecommendedOptions.Admission.EnablePlugins = []string{}
|
||||
opts.RecommendedOptions.Features.EnablePriorityAndFairness = false
|
||||
opts.RecommendedOptions.ExtraAdmissionInitializers = func(_ *genericapiserver.RecommendedConfig) ([]admission.PluginInitializer, error) {
|
||||
return nil, nil
|
||||
}
|
||||
cmd := server.NewCommandStartServer(ctx, opts)
|
||||
code := cli.Run(cmd)
|
||||
os.Exit(code)
|
||||
}
|
||||
@@ -3,8 +3,9 @@ package dashboard
|
||||
// Information about how the requesting user can use a given dashboard
|
||||
type DashboardAccess struct {
|
||||
// Metadata fields
|
||||
Slug string `json:"slug,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Slug string `json:"slug,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
IsPublic bool `json:"isPublic"`
|
||||
|
||||
// The permissions part
|
||||
CanSave bool `json:"canSave"`
|
||||
|
||||
@@ -12,8 +12,9 @@ type DashboardWithAccessInfo struct {
|
||||
// +k8s:deepcopy-gen=true
|
||||
type DashboardAccess struct {
|
||||
// Metadata fields
|
||||
Slug string `json:"slug,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Slug string `json:"slug,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
IsPublic bool `json:"isPublic"`
|
||||
|
||||
// The permissions part
|
||||
CanSave bool `json:"canSave"`
|
||||
|
||||
@@ -112,6 +112,7 @@ func Convert_dashboard_AnnotationPermission_To_v0alpha1_AnnotationPermission(in
|
||||
func autoConvert_v0alpha1_DashboardAccess_To_dashboard_DashboardAccess(in *DashboardAccess, out *dashboard.DashboardAccess, s conversion.Scope) error {
|
||||
out.Slug = in.Slug
|
||||
out.Url = in.Url
|
||||
out.IsPublic = in.IsPublic
|
||||
out.CanSave = in.CanSave
|
||||
out.CanEdit = in.CanEdit
|
||||
out.CanAdmin = in.CanAdmin
|
||||
@@ -129,6 +130,7 @@ func Convert_v0alpha1_DashboardAccess_To_dashboard_DashboardAccess(in *Dashboard
|
||||
func autoConvert_dashboard_DashboardAccess_To_v0alpha1_DashboardAccess(in *dashboard.DashboardAccess, out *DashboardAccess, s conversion.Scope) error {
|
||||
out.Slug = in.Slug
|
||||
out.Url = in.Url
|
||||
out.IsPublic = in.IsPublic
|
||||
out.CanSave = in.CanSave
|
||||
out.CanEdit = in.CanEdit
|
||||
out.CanAdmin = in.CanAdmin
|
||||
|
||||
@@ -170,6 +170,13 @@ func schema_pkg_apis_dashboard_v0alpha1_DashboardAccess(ref common.ReferenceCall
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"isPublic": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: false,
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"canSave": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "The permissions part",
|
||||
@@ -212,7 +219,7 @@ func schema_pkg_apis_dashboard_v0alpha1_DashboardAccess(ref common.ReferenceCall
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"canSave", "canEdit", "canAdmin", "canStar", "canDelete", "annotationsPermissions"},
|
||||
Required: []string{"isPublic", "canSave", "canEdit", "canAdmin", "canStar", "canDelete", "annotationsPermissions"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
|
||||
@@ -123,8 +123,9 @@ type DashboardWithAccessInfo struct {
|
||||
// +k8s:deepcopy-gen=true
|
||||
type DashboardAccess struct {
|
||||
// Metadata fields
|
||||
Slug string `json:"slug,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Slug string `json:"slug,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
IsPublic bool `json:"isPublic"`
|
||||
|
||||
// The permissions part
|
||||
CanSave bool `json:"canSave"`
|
||||
|
||||
@@ -118,6 +118,7 @@ func Convert_dashboard_AnnotationPermission_To_v1beta1_AnnotationPermission(in *
|
||||
func autoConvert_v1beta1_DashboardAccess_To_dashboard_DashboardAccess(in *DashboardAccess, out *dashboard.DashboardAccess, s conversion.Scope) error {
|
||||
out.Slug = in.Slug
|
||||
out.Url = in.Url
|
||||
out.IsPublic = in.IsPublic
|
||||
out.CanSave = in.CanSave
|
||||
out.CanEdit = in.CanEdit
|
||||
out.CanAdmin = in.CanAdmin
|
||||
@@ -135,6 +136,7 @@ func Convert_v1beta1_DashboardAccess_To_dashboard_DashboardAccess(in *DashboardA
|
||||
func autoConvert_dashboard_DashboardAccess_To_v1beta1_DashboardAccess(in *dashboard.DashboardAccess, out *DashboardAccess, s conversion.Scope) error {
|
||||
out.Slug = in.Slug
|
||||
out.Url = in.Url
|
||||
out.IsPublic = in.IsPublic
|
||||
out.CanSave = in.CanSave
|
||||
out.CanEdit = in.CanEdit
|
||||
out.CanAdmin = in.CanAdmin
|
||||
|
||||
@@ -165,6 +165,13 @@ func schema_pkg_apis_dashboard_v1beta1_DashboardAccess(ref common.ReferenceCallb
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"isPublic": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: false,
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"canSave": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "The permissions part",
|
||||
@@ -207,7 +214,7 @@ func schema_pkg_apis_dashboard_v1beta1_DashboardAccess(ref common.ReferenceCallb
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"canSave", "canEdit", "canAdmin", "canStar", "canDelete", "annotationsPermissions"},
|
||||
Required: []string{"isPublic", "canSave", "canEdit", "canAdmin", "canStar", "canDelete", "annotationsPermissions"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
|
||||
@@ -123,8 +123,9 @@ type DashboardWithAccessInfo struct {
|
||||
// +k8s:deepcopy-gen=true
|
||||
type DashboardAccess struct {
|
||||
// Metadata fields
|
||||
Slug string `json:"slug,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Slug string `json:"slug,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
IsPublic bool `json:"isPublic"`
|
||||
|
||||
// The permissions part
|
||||
CanSave bool `json:"canSave"`
|
||||
|
||||
@@ -118,6 +118,7 @@ func Convert_dashboard_AnnotationPermission_To_v2alpha1_AnnotationPermission(in
|
||||
func autoConvert_v2alpha1_DashboardAccess_To_dashboard_DashboardAccess(in *DashboardAccess, out *dashboard.DashboardAccess, s conversion.Scope) error {
|
||||
out.Slug = in.Slug
|
||||
out.Url = in.Url
|
||||
out.IsPublic = in.IsPublic
|
||||
out.CanSave = in.CanSave
|
||||
out.CanEdit = in.CanEdit
|
||||
out.CanAdmin = in.CanAdmin
|
||||
@@ -135,6 +136,7 @@ func Convert_v2alpha1_DashboardAccess_To_dashboard_DashboardAccess(in *Dashboard
|
||||
func autoConvert_dashboard_DashboardAccess_To_v2alpha1_DashboardAccess(in *dashboard.DashboardAccess, out *DashboardAccess, s conversion.Scope) error {
|
||||
out.Slug = in.Slug
|
||||
out.Url = in.Url
|
||||
out.IsPublic = in.IsPublic
|
||||
out.CanSave = in.CanSave
|
||||
out.CanEdit = in.CanEdit
|
||||
out.CanAdmin = in.CanAdmin
|
||||
|
||||
@@ -265,6 +265,13 @@ func schema_pkg_apis_dashboard_v2alpha1_DashboardAccess(ref common.ReferenceCall
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"isPublic": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: false,
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"canSave": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "The permissions part",
|
||||
@@ -307,7 +314,7 @@ func schema_pkg_apis_dashboard_v2alpha1_DashboardAccess(ref common.ReferenceCall
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"canSave", "canEdit", "canAdmin", "canStar", "canDelete", "annotationsPermissions"},
|
||||
Required: []string{"isPublic", "canSave", "canEdit", "canAdmin", "canStar", "canDelete", "annotationsPermissions"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
|
||||
@@ -123,8 +123,9 @@ type DashboardWithAccessInfo struct {
|
||||
// +k8s:deepcopy-gen=true
|
||||
type DashboardAccess struct {
|
||||
// Metadata fields
|
||||
Slug string `json:"slug,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Slug string `json:"slug,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
IsPublic bool `json:"isPublic"`
|
||||
|
||||
// The permissions part
|
||||
CanSave bool `json:"canSave"`
|
||||
|
||||
@@ -118,6 +118,7 @@ func Convert_dashboard_AnnotationPermission_To_v2beta1_AnnotationPermission(in *
|
||||
func autoConvert_v2beta1_DashboardAccess_To_dashboard_DashboardAccess(in *DashboardAccess, out *dashboard.DashboardAccess, s conversion.Scope) error {
|
||||
out.Slug = in.Slug
|
||||
out.Url = in.Url
|
||||
out.IsPublic = in.IsPublic
|
||||
out.CanSave = in.CanSave
|
||||
out.CanEdit = in.CanEdit
|
||||
out.CanAdmin = in.CanAdmin
|
||||
@@ -135,6 +136,7 @@ func Convert_v2beta1_DashboardAccess_To_dashboard_DashboardAccess(in *DashboardA
|
||||
func autoConvert_dashboard_DashboardAccess_To_v2beta1_DashboardAccess(in *dashboard.DashboardAccess, out *DashboardAccess, s conversion.Scope) error {
|
||||
out.Slug = in.Slug
|
||||
out.Url = in.Url
|
||||
out.IsPublic = in.IsPublic
|
||||
out.CanSave = in.CanSave
|
||||
out.CanEdit = in.CanEdit
|
||||
out.CanAdmin = in.CanAdmin
|
||||
|
||||
@@ -269,6 +269,13 @@ func schema_pkg_apis_dashboard_v2beta1_DashboardAccess(ref common.ReferenceCallb
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"isPublic": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: false,
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"canSave": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "The permissions part",
|
||||
@@ -311,7 +318,7 @@ func schema_pkg_apis_dashboard_v2beta1_DashboardAccess(ref common.ReferenceCallb
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"canSave", "canEdit", "canAdmin", "canStar", "canDelete", "annotationsPermissions"},
|
||||
Required: []string{"isPublic", "canSave", "canEdit", "canAdmin", "canStar", "canDelete", "annotationsPermissions"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
|
||||
+1158
File diff suppressed because it is too large
Load Diff
@@ -122,6 +122,8 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+
|
||||
github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk=
|
||||
github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw=
|
||||
@@ -427,6 +429,10 @@ github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@@ -436,6 +442,8 @@ github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03V
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
|
||||
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
@@ -507,6 +515,8 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
|
||||
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
|
||||
github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
|
||||
@@ -595,6 +605,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-openapi/analysis v0.24.0 h1:vE/VFFkICKyYuTWYnplQ+aVr45vlG6NcZKC7BdIXhsA=
|
||||
github.com/go-openapi/analysis v0.24.0/go.mod h1:GLyoJA+bvmGGaHgpfeDh8ldpGo69fAJg7eeMDMRCIrw=
|
||||
github.com/go-openapi/errors v0.22.3 h1:k6Hxa5Jg1TUyZnOwV2Lh81j8ayNw5VVYLvKrp4zFKFs=
|
||||
@@ -1106,6 +1118,8 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||
github.com/m3db/prometheus_remote_client_golang v0.4.4 h1:DsAIjVKoCp7Ym35tAOFL1OuMLIdIikAEHeNPHY+yyM8=
|
||||
github.com/m3db/prometheus_remote_client_golang v0.4.4/go.mod h1:wHfVbA3eAK6dQvKjCkHhusWYegCk3bDGkA15zymSHdc=
|
||||
github.com/madflojo/testcerts v1.4.0 h1:I09gN0C1ly9IgeVNcAqKk8RAKIJTe3QnFrrPBDyvzN4=
|
||||
@@ -1114,6 +1128,8 @@ github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
|
||||
github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE=
|
||||
github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20240118010651-0ba75a80ca38 h1:hQWBtNqRYrI7CWIaUSXXtNKR90KzcUA5uiuxFVWw7sU=
|
||||
@@ -1197,8 +1213,20 @@ github.com/mithrandie/ternary v1.1.1 h1:k/joD6UGVYxHixYmSR8EGgDFNONBMqyD373xT4QR
|
||||
github.com/mithrandie/ternary v1.1.1/go.mod h1:0D9Ba3+09K2TdSZO7/bFCC0GjSXetCvYuYq0u8FY/1g=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
|
||||
github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
|
||||
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/mocktools/go-smtp-mock/v2 v2.5.1 h1:QcMJMChSgG1olVj4o6xxQFdrWzRjYNrcq660HAjd0wA=
|
||||
github.com/mocktools/go-smtp-mock/v2 v2.5.1/go.mod h1:Rr8M2njlxx//l5INl2+uESnsL2lDsL24teEykCrGfmE=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -1212,6 +1240,8 @@ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWu
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
|
||||
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
@@ -1319,6 +1349,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/pressly/goose/v3 v3.25.0 h1:6WeYhMWGRCzpyd89SpODFnCBCKz41KrVbRT58nVjGng=
|
||||
github.com/pressly/goose/v3 v3.25.0/go.mod h1:4hC1KrritdCxtuFsqgs1R4AU5bWtTAf+cnWvfhf2DNY=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
@@ -1419,6 +1451,8 @@ github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah
|
||||
github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas=
|
||||
github.com/shadowspore/fossil-delta v0.0.0-20241213113458-1d797d70cbe3 h1:/4/IJi5iyTdh6mqOUaASW148HQpujYiHl0Wl78dSOSc=
|
||||
github.com/shadowspore/fossil-delta v0.0.0-20241213113458-1d797d70cbe3/go.mod h1:aJIMhRsunltJR926EB2MUg8qHemFQDreSB33pyto2Ps=
|
||||
github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs=
|
||||
github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
|
||||
@@ -1498,6 +1532,8 @@ github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD
|
||||
github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/testcontainers/testcontainers-go v0.36.0 h1:YpffyLuHtdp5EUsI5mT4sRw8GZhO/5ozyDT1xWGXt00=
|
||||
github.com/testcontainers/testcontainers-go v0.36.0/go.mod h1:yk73GVJ0KUZIHUtFna6MO7QS144qYpoY8lEEtU9Hed0=
|
||||
github.com/tetratelabs/wazero v1.8.2 h1:yIgLR/b2bN31bjxwXHD8a3d+BogigR952csSDdLYEv4=
|
||||
github.com/tetratelabs/wazero v1.8.2/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs=
|
||||
github.com/thejerf/slogassert v0.3.4 h1:VoTsXixRbXMrRSSxDjYTiEDCM4VWbsYPW5rB/hX24kM=
|
||||
@@ -1507,6 +1543,10 @@ github.com/thomaspoignant/go-feature-flag v1.42.0/go.mod h1:y0QiWH7chHWhGATb/+Xq
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tjhop/slog-gokit v0.1.3 h1:6SdexP3UIeg93KLFeiM1Wp1caRwdTLgsD/THxBUy1+o=
|
||||
github.com/tjhop/slog-gokit v0.1.3/go.mod h1:Bbu5v2748qpAWH7k6gse/kw3076IJf6owJmh7yArmJs=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
@@ -1562,6 +1602,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/zclconf/go-cty v1.16.3 h1:osr++gw2T61A8KVYHoQiFbFd1Lh3JOCXc/jFLJXKTxk=
|
||||
github.com/zclconf/go-cty v1.16.3/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
||||
github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
|
||||
|
||||
@@ -6,6 +6,7 @@ require (
|
||||
github.com/grafana/grafana-app-sdk v0.48.1
|
||||
github.com/grafana/grafana-app-sdk/logging v0.48.1
|
||||
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250428110029-a8ea72012bde
|
||||
github.com/stretchr/testify v1.11.1
|
||||
k8s.io/apimachinery v0.34.1
|
||||
k8s.io/apiserver v0.34.1
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
|
||||
@@ -93,6 +93,7 @@ func equalStringPointers(a, b *string) bool {
|
||||
type InstallRegistrar struct {
|
||||
clientGenerator resource.ClientGenerator
|
||||
client *pluginsv0alpha1.PluginClient
|
||||
clientErr error
|
||||
clientOnce sync.Once
|
||||
}
|
||||
|
||||
@@ -107,20 +108,21 @@ func (r *InstallRegistrar) GetClient() (*pluginsv0alpha1.PluginClient, error) {
|
||||
r.clientOnce.Do(func() {
|
||||
client, err := pluginsv0alpha1.NewPluginClientFromGenerator(r.clientGenerator)
|
||||
if err != nil {
|
||||
r.clientErr = err
|
||||
r.client = nil
|
||||
return
|
||||
}
|
||||
r.client = client
|
||||
})
|
||||
|
||||
return r.client, nil
|
||||
return r.client, r.clientErr
|
||||
}
|
||||
|
||||
// Register creates or updates a plugin install in the registry.
|
||||
func (r *InstallRegistrar) Register(ctx context.Context, namespace string, install *PluginInstall) error {
|
||||
client, err := r.GetClient()
|
||||
if err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
identifier := resource.Identifier{
|
||||
Namespace: namespace,
|
||||
@@ -132,9 +134,12 @@ func (r *InstallRegistrar) Register(ctx context.Context, namespace string, insta
|
||||
return err
|
||||
}
|
||||
|
||||
if existing != nil && install.ShouldUpdate(existing) {
|
||||
_, err = client.Update(ctx, install.ToPluginInstallV0Alpha1(namespace), resource.UpdateOptions{ResourceVersion: existing.ResourceVersion})
|
||||
return err
|
||||
if existing != nil {
|
||||
if install.ShouldUpdate(existing) {
|
||||
_, err = client.Update(ctx, install.ToPluginInstallV0Alpha1(namespace), resource.UpdateOptions{ResourceVersion: existing.ResourceVersion})
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = client.Create(ctx, install.ToPluginInstallV0Alpha1(namespace), resource.CreateOptions{})
|
||||
@@ -155,6 +160,10 @@ func (r *InstallRegistrar) Unregister(ctx context.Context, namespace string, nam
|
||||
if err != nil && !errorsK8s.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
// if the plugin doesn't exist, nothing to unregister
|
||||
if existing == nil {
|
||||
return nil
|
||||
}
|
||||
// if the source is different, do not unregister
|
||||
if existingSource, ok := existing.Annotations[PluginInstallSourceAnnotation]; ok && existingSource != source {
|
||||
return nil
|
||||
|
||||
@@ -0,0 +1,908 @@
|
||||
package install
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana-app-sdk/resource"
|
||||
"github.com/stretchr/testify/require"
|
||||
errorsK8s "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
pluginsv0alpha1 "github.com/grafana/grafana/apps/plugins/pkg/apis/plugins/v0alpha1"
|
||||
)
|
||||
|
||||
func TestPluginInstall_ShouldUpdate(t *testing.T) {
|
||||
baseExisting := &pluginsv0alpha1.Plugin{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "org-1",
|
||||
Name: "plugin-1",
|
||||
Annotations: map[string]string{
|
||||
PluginInstallSourceAnnotation: SourcePluginStore,
|
||||
},
|
||||
},
|
||||
Spec: pluginsv0alpha1.PluginSpec{
|
||||
Id: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
Class: pluginsv0alpha1.PluginSpecClass(ClassExternal),
|
||||
},
|
||||
}
|
||||
|
||||
baseInstall := PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyInstall func(*PluginInstall)
|
||||
modifyExisting func(*pluginsv0alpha1.Plugin)
|
||||
expectUpdate bool
|
||||
}{
|
||||
{
|
||||
name: "no changes",
|
||||
expectUpdate: false,
|
||||
},
|
||||
{
|
||||
name: "version differs",
|
||||
modifyInstall: func(pi *PluginInstall) {
|
||||
pi.Version = "2.0.0"
|
||||
},
|
||||
expectUpdate: true,
|
||||
},
|
||||
{
|
||||
name: "class differs",
|
||||
modifyInstall: func(pi *PluginInstall) {
|
||||
pi.Class = ClassCore
|
||||
},
|
||||
expectUpdate: true,
|
||||
},
|
||||
{
|
||||
name: "url differs",
|
||||
modifyInstall: func(pi *PluginInstall) {
|
||||
pi.URL = "https://example.com/plugin.zip"
|
||||
},
|
||||
expectUpdate: true,
|
||||
},
|
||||
{
|
||||
name: "source differs",
|
||||
modifyExisting: func(existing *pluginsv0alpha1.Plugin) {
|
||||
existing.Annotations[PluginInstallSourceAnnotation] = SourceUnknown
|
||||
},
|
||||
expectUpdate: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
existing := baseExisting.DeepCopy()
|
||||
install := baseInstall
|
||||
|
||||
if tt.modifyExisting != nil {
|
||||
tt.modifyExisting(existing)
|
||||
}
|
||||
if tt.modifyInstall != nil {
|
||||
tt.modifyInstall(&install)
|
||||
}
|
||||
|
||||
require.Equal(t, tt.expectUpdate, install.ShouldUpdate(existing))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstallRegistrar_Register(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
install *PluginInstall
|
||||
existing *pluginsv0alpha1.Plugin
|
||||
existingErr error
|
||||
expectedCreates int
|
||||
expectedUpdates int
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "creates plugin when not found",
|
||||
install: &PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
existingErr: errorsK8s.NewNotFound(pluginGroupResource(), "plugin-1"),
|
||||
expectedCreates: 1,
|
||||
},
|
||||
{
|
||||
name: "updates plugin when fields change",
|
||||
install: &PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "2.0.0",
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
existing: &pluginsv0alpha1.Plugin{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "org-1",
|
||||
Name: "plugin-1",
|
||||
ResourceVersion: "7",
|
||||
Annotations: map[string]string{
|
||||
PluginInstallSourceAnnotation: SourcePluginStore,
|
||||
},
|
||||
},
|
||||
Spec: pluginsv0alpha1.PluginSpec{
|
||||
Id: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
Class: pluginsv0alpha1.PluginSpecClass(ClassExternal),
|
||||
},
|
||||
},
|
||||
expectedUpdates: 1,
|
||||
},
|
||||
{
|
||||
name: "skips create when plugin matches",
|
||||
install: &PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
existing: &pluginsv0alpha1.Plugin{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "org-1",
|
||||
Name: "plugin-1",
|
||||
ResourceVersion: "9",
|
||||
Annotations: map[string]string{
|
||||
PluginInstallSourceAnnotation: SourcePluginStore,
|
||||
},
|
||||
},
|
||||
Spec: pluginsv0alpha1.PluginSpec{
|
||||
Id: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
Class: pluginsv0alpha1.PluginSpecClass(ClassExternal),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "returns error on unexpected get failure",
|
||||
install: &PluginInstall{
|
||||
ID: "plugin-err",
|
||||
Version: "1.0.0",
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
existingErr: errorsK8s.NewInternalError(errors.New("boom")),
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
createCalls := 0
|
||||
updateCalls := 0
|
||||
var receivedResourceVersions []string
|
||||
var updatedPlugins []*pluginsv0alpha1.Plugin
|
||||
|
||||
fakeClient := &fakePluginInstallClient{
|
||||
getFunc: func(context.Context, resource.Identifier) (*pluginsv0alpha1.Plugin, error) {
|
||||
if tt.existingErr != nil {
|
||||
return nil, tt.existingErr
|
||||
}
|
||||
if tt.existing == nil {
|
||||
return nil, errorsK8s.NewNotFound(pluginGroupResource(), "plugin-1")
|
||||
}
|
||||
return tt.existing.DeepCopy(), nil
|
||||
},
|
||||
createFunc: func(context.Context, *pluginsv0alpha1.Plugin, resource.CreateOptions) (*pluginsv0alpha1.Plugin, error) {
|
||||
createCalls++
|
||||
return tt.install.ToPluginInstallV0Alpha1("org-1"), nil
|
||||
},
|
||||
updateFunc: func(_ context.Context, obj *pluginsv0alpha1.Plugin, opts resource.UpdateOptions) (*pluginsv0alpha1.Plugin, error) {
|
||||
updateCalls++
|
||||
receivedResourceVersions = append(receivedResourceVersions, opts.ResourceVersion)
|
||||
updatedPlugins = append(updatedPlugins, obj)
|
||||
return obj, nil
|
||||
},
|
||||
}
|
||||
|
||||
registrar := NewInstallRegistrar(&fakeClientGenerator{client: fakeClient})
|
||||
|
||||
err := registrar.Register(ctx, "org-1", tt.install)
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedCreates, createCalls)
|
||||
require.Equal(t, tt.expectedUpdates, updateCalls)
|
||||
|
||||
if tt.expectedUpdates > 0 {
|
||||
require.Equal(t, []string{tt.existing.ResourceVersion}, receivedResourceVersions)
|
||||
require.Len(t, updatedPlugins, 1)
|
||||
require.Equal(t, tt.install.Version, updatedPlugins[0].Spec.Version)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func pluginGroupResource() schema.GroupResource {
|
||||
return schema.GroupResource{Group: pluginsv0alpha1.APIGroup, Resource: "plugininstalls"}
|
||||
}
|
||||
|
||||
type fakePluginInstallClient struct {
|
||||
listAllFunc func(ctx context.Context, namespace string, opts resource.ListOptions) (*pluginsv0alpha1.PluginList, error)
|
||||
getFunc func(ctx context.Context, identifier resource.Identifier) (*pluginsv0alpha1.Plugin, error)
|
||||
createFunc func(ctx context.Context, obj *pluginsv0alpha1.Plugin, opts resource.CreateOptions) (*pluginsv0alpha1.Plugin, error)
|
||||
updateFunc func(ctx context.Context, obj *pluginsv0alpha1.Plugin, opts resource.UpdateOptions) (*pluginsv0alpha1.Plugin, error)
|
||||
deleteFunc func(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error
|
||||
}
|
||||
|
||||
func (f *fakePluginInstallClient) Get(ctx context.Context, identifier resource.Identifier) (*pluginsv0alpha1.Plugin, error) {
|
||||
if f.getFunc != nil {
|
||||
return f.getFunc(ctx, identifier)
|
||||
}
|
||||
return nil, errorsK8s.NewNotFound(pluginGroupResource(), identifier.Name)
|
||||
}
|
||||
|
||||
func (f *fakePluginInstallClient) ListAll(ctx context.Context, namespace string, opts resource.ListOptions) (*pluginsv0alpha1.PluginList, error) {
|
||||
if f.listAllFunc != nil {
|
||||
return f.listAllFunc(ctx, namespace, opts)
|
||||
}
|
||||
return &pluginsv0alpha1.PluginList{}, nil
|
||||
}
|
||||
|
||||
func (f *fakePluginInstallClient) List(ctx context.Context, namespace string, opts resource.ListOptions) (*pluginsv0alpha1.PluginList, error) {
|
||||
return f.ListAll(ctx, namespace, opts)
|
||||
}
|
||||
|
||||
func (f *fakePluginInstallClient) Create(ctx context.Context, obj *pluginsv0alpha1.Plugin, opts resource.CreateOptions) (*pluginsv0alpha1.Plugin, error) {
|
||||
if f.createFunc != nil {
|
||||
return f.createFunc(ctx, obj, opts)
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (f *fakePluginInstallClient) Update(ctx context.Context, obj *pluginsv0alpha1.Plugin, opts resource.UpdateOptions) (*pluginsv0alpha1.Plugin, error) {
|
||||
if f.updateFunc != nil {
|
||||
return f.updateFunc(ctx, obj, opts)
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (f *fakePluginInstallClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus pluginsv0alpha1.PluginStatus, opts resource.UpdateOptions) (*pluginsv0alpha1.Plugin, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fakePluginInstallClient) Patch(ctx context.Context, identifier resource.Identifier, req resource.PatchRequest, opts resource.PatchOptions) (*pluginsv0alpha1.Plugin, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fakePluginInstallClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error {
|
||||
if f.deleteFunc != nil {
|
||||
return f.deleteFunc(ctx, identifier, opts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeClientGenerator struct {
|
||||
client *fakePluginInstallClient
|
||||
shouldError bool
|
||||
}
|
||||
|
||||
func (f *fakeClientGenerator) ClientFor(resource.Kind) (resource.Client, error) {
|
||||
if f.shouldError {
|
||||
return nil, errors.New("client generation failed")
|
||||
}
|
||||
return &fakeResourceClient{client: f.client}, nil
|
||||
}
|
||||
|
||||
type fakeResourceClient struct {
|
||||
client *fakePluginInstallClient
|
||||
}
|
||||
|
||||
func (f *fakeResourceClient) Get(ctx context.Context, identifier resource.Identifier) (resource.Object, error) {
|
||||
return f.client.Get(ctx, identifier)
|
||||
}
|
||||
|
||||
func (f *fakeResourceClient) GetInto(ctx context.Context, identifier resource.Identifier, into resource.Object) error {
|
||||
obj, err := f.client.Get(ctx, identifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if target, ok := into.(*pluginsv0alpha1.Plugin); ok {
|
||||
*target = *obj
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeResourceClient) List(ctx context.Context, namespace string, options resource.ListOptions) (resource.ListObject, error) {
|
||||
return f.client.ListAll(ctx, namespace, options)
|
||||
}
|
||||
|
||||
func (f *fakeResourceClient) ListInto(ctx context.Context, namespace string, options resource.ListOptions, into resource.ListObject) error {
|
||||
list, err := f.client.ListAll(ctx, namespace, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if target, ok := into.(*pluginsv0alpha1.PluginList); ok {
|
||||
*target = *list
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeResourceClient) Create(ctx context.Context, identifier resource.Identifier, obj resource.Object, options resource.CreateOptions) (resource.Object, error) {
|
||||
plugin := obj.(*pluginsv0alpha1.Plugin)
|
||||
return f.client.Create(ctx, plugin, options)
|
||||
}
|
||||
|
||||
func (f *fakeResourceClient) CreateInto(ctx context.Context, identifier resource.Identifier, obj resource.Object, options resource.CreateOptions, into resource.Object) error {
|
||||
created, err := f.Create(ctx, identifier, obj, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if plugin, ok := created.(*pluginsv0alpha1.Plugin); ok {
|
||||
if target, ok := into.(*pluginsv0alpha1.Plugin); ok {
|
||||
*target = *plugin
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeResourceClient) Update(ctx context.Context, identifier resource.Identifier, obj resource.Object, options resource.UpdateOptions) (resource.Object, error) {
|
||||
plugin := obj.(*pluginsv0alpha1.Plugin)
|
||||
return f.client.Update(ctx, plugin, options)
|
||||
}
|
||||
|
||||
func (f *fakeResourceClient) UpdateInto(ctx context.Context, identifier resource.Identifier, obj resource.Object, options resource.UpdateOptions, into resource.Object) error {
|
||||
updated, err := f.Update(ctx, identifier, obj, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if plugin, ok := updated.(*pluginsv0alpha1.Plugin); ok {
|
||||
if target, ok := into.(*pluginsv0alpha1.Plugin); ok {
|
||||
*target = *plugin
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeResourceClient) Patch(ctx context.Context, identifier resource.Identifier, patch resource.PatchRequest, options resource.PatchOptions) (resource.Object, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fakeResourceClient) PatchInto(ctx context.Context, identifier resource.Identifier, patch resource.PatchRequest, options resource.PatchOptions, into resource.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeResourceClient) Delete(ctx context.Context, identifier resource.Identifier, options resource.DeleteOptions) error {
|
||||
return f.client.Delete(ctx, identifier, options)
|
||||
}
|
||||
|
||||
func (f *fakeResourceClient) SubresourceRequest(ctx context.Context, identifier resource.Identifier, req resource.CustomRouteRequestOptions) ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeResourceClient) Watch(ctx context.Context, namespace string, options resource.WatchOptions) (resource.WatchResponse, error) {
|
||||
return &fakeWatchResponse{}, nil
|
||||
}
|
||||
|
||||
type fakeWatchResponse struct{}
|
||||
|
||||
func (f *fakeWatchResponse) Stop() {}
|
||||
|
||||
func (f *fakeWatchResponse) WatchEvents() <-chan resource.WatchEvent {
|
||||
ch := make(chan resource.WatchEvent)
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func TestPluginInstall_ToPluginInstallV0Alpha1(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
install PluginInstall
|
||||
namespace string
|
||||
validate func(*testing.T, *pluginsv0alpha1.Plugin)
|
||||
}{
|
||||
{
|
||||
name: "empty URL creates nil pointer",
|
||||
install: PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
namespace: "org-1",
|
||||
validate: func(t *testing.T, p *pluginsv0alpha1.Plugin) {
|
||||
require.Nil(t, p.Spec.Url)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "non-empty URL creates pointer",
|
||||
install: PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
URL: "https://example.com/plugin.zip",
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
namespace: "org-1",
|
||||
validate: func(t *testing.T, p *pluginsv0alpha1.Plugin) {
|
||||
require.NotNil(t, p.Spec.Url)
|
||||
require.Equal(t, "https://example.com/plugin.zip", *p.Spec.Url)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "core class is mapped correctly",
|
||||
install: PluginInstall{
|
||||
ID: "plugin-core",
|
||||
Version: "2.0.0",
|
||||
Class: ClassCore,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
namespace: "org-2",
|
||||
validate: func(t *testing.T, p *pluginsv0alpha1.Plugin) {
|
||||
require.Equal(t, pluginsv0alpha1.PluginSpecClass(ClassCore), p.Spec.Class)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cdn class is mapped correctly",
|
||||
install: PluginInstall{
|
||||
ID: "plugin-cdn",
|
||||
Version: "3.0.0",
|
||||
Class: ClassCDN,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
namespace: "org-3",
|
||||
validate: func(t *testing.T, p *pluginsv0alpha1.Plugin) {
|
||||
require.Equal(t, pluginsv0alpha1.PluginSpecClass(ClassCDN), p.Spec.Class)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "source annotation is set correctly",
|
||||
install: PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
Class: ClassExternal,
|
||||
Source: SourceUnknown,
|
||||
},
|
||||
namespace: "org-1",
|
||||
validate: func(t *testing.T, p *pluginsv0alpha1.Plugin) {
|
||||
require.Equal(t, SourceUnknown, p.Annotations[PluginInstallSourceAnnotation])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace and name are set correctly",
|
||||
install: PluginInstall{
|
||||
ID: "my-plugin",
|
||||
Version: "1.0.0",
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
namespace: "my-namespace",
|
||||
validate: func(t *testing.T, p *pluginsv0alpha1.Plugin) {
|
||||
require.Equal(t, "my-namespace", p.Namespace)
|
||||
require.Equal(t, "my-plugin", p.Name)
|
||||
require.Equal(t, "my-plugin", p.Spec.Id)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.install.ToPluginInstallV0Alpha1(tt.namespace)
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, tt.namespace, result.Namespace)
|
||||
require.Equal(t, tt.install.ID, result.Name)
|
||||
require.Equal(t, tt.install.ID, result.Spec.Id)
|
||||
require.Equal(t, tt.install.Version, result.Spec.Version)
|
||||
tt.validate(t, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEqualStringPointers(t *testing.T) {
|
||||
str1 := "value1"
|
||||
str2 := "value2"
|
||||
str3 := "value1"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *string
|
||||
b *string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "both nil",
|
||||
a: nil,
|
||||
b: nil,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "first nil, second non-nil",
|
||||
a: nil,
|
||||
b: &str1,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "first non-nil, second nil",
|
||||
a: &str1,
|
||||
b: nil,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "both non-nil with same value",
|
||||
a: &str1,
|
||||
b: &str3,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "both non-nil with different values",
|
||||
a: &str1,
|
||||
b: &str2,
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := equalStringPointers(tt.a, tt.b)
|
||||
require.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginInstall_ShouldUpdate_URLTransitions(t *testing.T) {
|
||||
existingURL := "https://old.example.com/plugin.zip"
|
||||
newURL := "https://new.example.com/plugin.zip"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
install PluginInstall
|
||||
existingURL *string
|
||||
expectUpdate bool
|
||||
}{
|
||||
{
|
||||
name: "URL transition from nil to non-nil",
|
||||
install: PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
URL: newURL,
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
existingURL: nil,
|
||||
expectUpdate: true,
|
||||
},
|
||||
{
|
||||
name: "URL transition from non-nil to nil",
|
||||
install: PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
URL: "",
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
existingURL: &existingURL,
|
||||
expectUpdate: true,
|
||||
},
|
||||
{
|
||||
name: "URL stays nil",
|
||||
install: PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
URL: "",
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
existingURL: nil,
|
||||
expectUpdate: false,
|
||||
},
|
||||
{
|
||||
name: "URL stays same non-nil value",
|
||||
install: PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
URL: existingURL,
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
existingURL: &existingURL,
|
||||
expectUpdate: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
existing := &pluginsv0alpha1.Plugin{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "org-1",
|
||||
Name: "plugin-1",
|
||||
Annotations: map[string]string{
|
||||
PluginInstallSourceAnnotation: SourcePluginStore,
|
||||
},
|
||||
},
|
||||
Spec: pluginsv0alpha1.PluginSpec{
|
||||
Id: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
Url: tt.existingURL,
|
||||
Class: pluginsv0alpha1.PluginSpecClass(ClassExternal),
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, tt.expectUpdate, tt.install.ShouldUpdate(existing))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstallRegistrar_GetClient(t *testing.T) {
|
||||
t.Run("successfully creates client on first call", func(t *testing.T) {
|
||||
fakeClient := &fakePluginInstallClient{}
|
||||
generator := &fakeClientGenerator{client: fakeClient}
|
||||
registrar := NewInstallRegistrar(generator)
|
||||
|
||||
client, err := registrar.GetClient()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, client)
|
||||
})
|
||||
|
||||
t.Run("returns same client on subsequent calls", func(t *testing.T) {
|
||||
fakeClient := &fakePluginInstallClient{}
|
||||
generator := &fakeClientGenerator{client: fakeClient}
|
||||
registrar := NewInstallRegistrar(generator)
|
||||
|
||||
client1, err1 := registrar.GetClient()
|
||||
require.NoError(t, err1)
|
||||
|
||||
client2, err2 := registrar.GetClient()
|
||||
require.NoError(t, err2)
|
||||
|
||||
require.Equal(t, client1, client2)
|
||||
})
|
||||
|
||||
t.Run("returns error when client generation fails", func(t *testing.T) {
|
||||
generator := &fakeClientGenerator{client: nil, shouldError: true}
|
||||
registrar := NewInstallRegistrar(generator)
|
||||
|
||||
client, err := registrar.GetClient()
|
||||
require.Error(t, err)
|
||||
require.Nil(t, client)
|
||||
})
|
||||
}
|
||||
|
||||
func TestInstallRegistrar_Register_ErrorCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
install *PluginInstall
|
||||
setupClient func(*fakePluginInstallClient)
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "create fails",
|
||||
install: &PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
setupClient: func(fc *fakePluginInstallClient) {
|
||||
fc.getFunc = func(context.Context, resource.Identifier) (*pluginsv0alpha1.Plugin, error) {
|
||||
return nil, errorsK8s.NewNotFound(pluginGroupResource(), "plugin-1")
|
||||
}
|
||||
fc.createFunc = func(context.Context, *pluginsv0alpha1.Plugin, resource.CreateOptions) (*pluginsv0alpha1.Plugin, error) {
|
||||
return nil, errors.New("create failed")
|
||||
}
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "update fails",
|
||||
install: &PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "2.0.0",
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
},
|
||||
setupClient: func(fc *fakePluginInstallClient) {
|
||||
fc.getFunc = func(context.Context, resource.Identifier) (*pluginsv0alpha1.Plugin, error) {
|
||||
return &pluginsv0alpha1.Plugin{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "org-1",
|
||||
Name: "plugin-1",
|
||||
ResourceVersion: "5",
|
||||
Annotations: map[string]string{
|
||||
PluginInstallSourceAnnotation: SourcePluginStore,
|
||||
},
|
||||
},
|
||||
Spec: pluginsv0alpha1.PluginSpec{
|
||||
Id: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
Class: pluginsv0alpha1.PluginSpecClass(ClassExternal),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
fc.updateFunc = func(context.Context, *pluginsv0alpha1.Plugin, resource.UpdateOptions) (*pluginsv0alpha1.Plugin, error) {
|
||||
return nil, errors.New("update failed")
|
||||
}
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeClient := &fakePluginInstallClient{}
|
||||
tt.setupClient(fakeClient)
|
||||
|
||||
registrar := NewInstallRegistrar(&fakeClientGenerator{client: fakeClient})
|
||||
|
||||
err := registrar.Register(ctx, "org-1", tt.install)
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstallRegistrar_Unregister(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespace string
|
||||
pluginName string
|
||||
source Source
|
||||
existing *pluginsv0alpha1.Plugin
|
||||
existingErr error
|
||||
expectedCalls int
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "successfully deletes plugin with matching source",
|
||||
namespace: "org-1",
|
||||
pluginName: "plugin-1",
|
||||
source: SourcePluginStore,
|
||||
existing: &pluginsv0alpha1.Plugin{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "org-1",
|
||||
Name: "plugin-1",
|
||||
Annotations: map[string]string{
|
||||
PluginInstallSourceAnnotation: SourcePluginStore,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCalls: 1,
|
||||
},
|
||||
{
|
||||
name: "plugin not found should not error",
|
||||
namespace: "org-1",
|
||||
pluginName: "plugin-nonexistent",
|
||||
source: SourcePluginStore,
|
||||
existingErr: errorsK8s.NewNotFound(pluginGroupResource(), "plugin-nonexistent"),
|
||||
expectedCalls: 0,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "skips delete when source doesn't match",
|
||||
namespace: "org-1",
|
||||
pluginName: "plugin-1",
|
||||
source: SourcePluginStore,
|
||||
existing: &pluginsv0alpha1.Plugin{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "org-1",
|
||||
Name: "plugin-1",
|
||||
Annotations: map[string]string{
|
||||
PluginInstallSourceAnnotation: SourceUnknown,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCalls: 0,
|
||||
},
|
||||
{
|
||||
name: "returns error on unexpected get failure",
|
||||
namespace: "org-1",
|
||||
pluginName: "plugin-err",
|
||||
source: SourcePluginStore,
|
||||
existingErr: errorsK8s.NewInternalError(errors.New("get failed")),
|
||||
expectedCalls: 0,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "delete failure returns error",
|
||||
namespace: "org-1",
|
||||
pluginName: "plugin-1",
|
||||
source: SourcePluginStore,
|
||||
existing: &pluginsv0alpha1.Plugin{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "org-1",
|
||||
Name: "plugin-1",
|
||||
Annotations: map[string]string{
|
||||
PluginInstallSourceAnnotation: SourcePluginStore,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedCalls: 1,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "handles missing source annotation",
|
||||
namespace: "org-1",
|
||||
pluginName: "plugin-1",
|
||||
source: SourcePluginStore,
|
||||
existing: &pluginsv0alpha1.Plugin{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "org-1",
|
||||
Name: "plugin-1",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
expectedCalls: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
deleteCalls := 0
|
||||
|
||||
fakeClient := &fakePluginInstallClient{
|
||||
getFunc: func(context.Context, resource.Identifier) (*pluginsv0alpha1.Plugin, error) {
|
||||
if tt.existingErr != nil {
|
||||
return nil, tt.existingErr
|
||||
}
|
||||
if tt.existing == nil {
|
||||
return nil, errorsK8s.NewNotFound(pluginGroupResource(), tt.pluginName)
|
||||
}
|
||||
return tt.existing.DeepCopy(), nil
|
||||
},
|
||||
deleteFunc: func(context.Context, resource.Identifier, resource.DeleteOptions) error {
|
||||
deleteCalls++
|
||||
if tt.name == "delete failure returns error" {
|
||||
return errors.New("delete failed")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
registrar := NewInstallRegistrar(&fakeClientGenerator{client: fakeClient})
|
||||
|
||||
err := registrar.Unregister(ctx, tt.namespace, tt.pluginName, tt.source)
|
||||
|
||||
require.Equal(t, tt.expectedCalls, deleteCalls)
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstallRegistrar_GetClientError(t *testing.T) {
|
||||
t.Run("Register returns error with nil client", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
generator := &fakeClientGenerator{client: nil, shouldError: true}
|
||||
registrar := NewInstallRegistrar(generator)
|
||||
|
||||
install := &PluginInstall{
|
||||
ID: "plugin-1",
|
||||
Version: "1.0.0",
|
||||
Class: ClassExternal,
|
||||
Source: SourcePluginStore,
|
||||
}
|
||||
|
||||
err := registrar.Register(ctx, "org-1", install)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("Unregister returns error with nil client", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
generator := &fakeClientGenerator{client: nil, shouldError: true}
|
||||
registrar := NewInstallRegistrar(generator)
|
||||
|
||||
err := registrar.Unregister(ctx, "org-1", "plugin-1", SourcePluginStore)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -86,7 +86,7 @@ services:
|
||||
- 'alloy.logs=true'
|
||||
|
||||
alloy:
|
||||
image: grafana/alloy:latest
|
||||
image: grafana/alloy:v1.11.2
|
||||
volumes:
|
||||
- ./configs/alloy:/alloy-config
|
||||
- /var/run/docker.sock:/var/run/docker.sock # To scrape Docker container logs
|
||||
@@ -104,7 +104,7 @@ services:
|
||||
- 'alloy.logs=true'
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus
|
||||
image: prom/prometheus:v3.7.2
|
||||
volumes:
|
||||
- prometheus-data:/prometheus
|
||||
command:
|
||||
@@ -116,7 +116,7 @@ services:
|
||||
- 'alloy.logs=true'
|
||||
|
||||
loki:
|
||||
image: grafana/loki
|
||||
image: grafana/loki:3.5.7
|
||||
volumes:
|
||||
- loki-data:/loki
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
@@ -124,7 +124,7 @@ services:
|
||||
- 'alloy.logs=true'
|
||||
|
||||
tempo-init:
|
||||
image: busybox
|
||||
image: busybox:1.37.0
|
||||
user: root
|
||||
entrypoint:
|
||||
- 'chown'
|
||||
@@ -134,7 +134,7 @@ services:
|
||||
- tempo-data:/var/tempo
|
||||
|
||||
tempo:
|
||||
image: grafana/tempo
|
||||
image: grafana/tempo:2.9.0
|
||||
volumes:
|
||||
- tempo-data:/var/lib/tempo
|
||||
- ./configs/tempo.yaml:/etc/tempo/tempo.yaml
|
||||
|
||||
@@ -96,6 +96,7 @@
|
||||
"rows-to-fields": (import '../dev-dashboards/transforms/rows-to-fields.json'),
|
||||
"shared_queries": (import '../dev-dashboards/panel-common/shared_queries.json'),
|
||||
"slow_queries_and_annotations": (import '../dev-dashboards/scenarios/slow_queries_and_annotations.json'),
|
||||
"status-history-thresholds-mappings": (import '../dev-dashboards/panel-status-history/status-history-thresholds-mappings.json'),
|
||||
"table_footer": (import '../dev-dashboards/panel-table/table_footer.json'),
|
||||
"table_kitchen_sink": (import '../dev-dashboards/panel-table/table_kitchen_sink.json'),
|
||||
"table_markdown": (import '../dev-dashboards/panel-table/table_markdown.json'),
|
||||
|
||||
@@ -141,6 +141,20 @@ Alternatively, you can use the `index()` function to retrieve the query value:
|
||||
{{ index $values "B" }} CPU usage for {{ index $labels "instance" }} over the last 5 minutes.
|
||||
```
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
|
||||
Variable names that start with a number (for example, `1B`) are not [valid identifiers in Go templates](https://go.dev/ref/spec#Identifiers).
|
||||
|
||||
To access a value or label whose key starts with a number, use the `index` function:
|
||||
|
||||
```
|
||||
{{ index $values "1B" }} CPU usage for {{ index $labels "1instance" }} over the last 5 minutes.
|
||||
```
|
||||
|
||||
Using `{{ $values.1B.Value }}` is invalid and causes the template code to render as plain text.
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
#### $value
|
||||
|
||||
The `$value` variable is a string containing the labels and values of all instant queries; threshold, reduce and math expressions, and classic conditions in the alert rule.
|
||||
|
||||
@@ -40,8 +40,7 @@ To set up file sync with local with local files, you need to:
|
||||
Local file provisioning using **Administration** > **Provisioning** will eventually replace the traditional methods Grafana has used for referencing local file systems for dashboard files.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
For production system, we recommend using the `folderFromFilesStructure` capability instead of **Administration** > **Provisioning** to include dashboards from a local file system in your Grafana instance.
|
||||
Refer to [Provision Grafana](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#provision-folders-structure-from-filesystem-to-grafana) for more information.
|
||||
For production systems, use the `folderFromFilesStructure` capability instead of **Administration** > **Provisioning** to include dashboards from a local file system in your Grafana instance. Refer to [Provision Grafana](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#provision-folders-structure-from-filesystem-to-grafana) for more information.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Limitations
|
||||
@@ -125,6 +124,18 @@ The set up process verifies the path and provides an error message if a problem
|
||||
|
||||
### Choose what to synchronize
|
||||
|
||||
#### Synchronization limitations
|
||||
|
||||
Full instance sync is not available in Grafana Cloud.
|
||||
|
||||
In Grafana OSS/Enterprise:
|
||||
|
||||
- If you try to perform a full instance sync with resources that contain alerts or panels, the connection will be blocked.
|
||||
- You won't be able to create new alerts or library panels after setup is completed.
|
||||
- If you opted for full instance sync and want to use alerts and library panels, you'll have to delete the provisioned repository and connect again with folder sync.
|
||||
|
||||
#### Set up synchronization
|
||||
|
||||
Choose to either sync your entire organization resources with external storage, or to sync certain resources to a new Grafana folder (with up to 10 connections).
|
||||
|
||||
- Choose **Sync all resources with external storage** if you want to sync and manage your entire Grafana instance through external storage. With this option, all of your dashboards are synced to that one repository. You can only have one provisioned connection with this selection, and you won't have the option of setting up additional repositories to connect to.
|
||||
|
||||
@@ -61,6 +61,8 @@ Refer to [Known limitations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
### Requirements
|
||||
|
||||
To set up Git Sync, you need:
|
||||
|
||||
- Administration rights in your Grafana organization.
|
||||
@@ -128,34 +130,37 @@ To connect your GitHub repository, follow these steps:
|
||||
|
||||
### Choose what to synchronize
|
||||
|
||||
In this step you can decide which elements to synchronize. Keep in mind the available options depend on the status of your Grafana instance.
|
||||
|
||||
- If the instance contains resources in an incompatible data format, you'll have to migrate all the data using instance sync. Folder sync won't be supported.
|
||||
- If there is already another connection using folder sync, instance sync won't be offered.
|
||||
|
||||
#### Synchronization limitations
|
||||
|
||||
Git Sync only supports dashboards and folders. Alerts, panels, and other resources are not supported yet.
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
|
||||
Git Sync only works with specific folders for the moment. Full-instance sync is not currently supported. Refer to [Supported resources](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync#supported-resources) for more details about which resources you can sync.
|
||||
Refer to [Known limitations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync#known-limitations/) before using Git Sync. Refer to [Supported resources](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/intro-git-sync#supported-resources) for details about which resources you can sync.
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
In this step you can decide which elements to synchronize. Keep in mind the available options depend on the status of your GitHub repository. The first time you connect Grafana with a GitHub repository, you need to synchronize with external storage. If you are syncing with a new or empty repository, you won't have an option to migrate dashboards.
|
||||
Full instance sync is not available in Grafana Cloud.
|
||||
|
||||
1. Choose to either sync your entire organization resources with external storage, or to sync certain resources to a new Grafana folder (with up to 10 connections).
|
||||
In Grafana OSS/Enterprise:
|
||||
|
||||
- If you try to perform a full instance sync with resources that contain alerts or panels, Git Sync will block the connection.
|
||||
- You won't be able to create new alerts or library panels after the setup is completed.
|
||||
- If you opted for full instance sync and want to use alerts and library panels, you'll have to delete the synced repository and connect again with folder sync.
|
||||
|
||||
#### Set up synchronization
|
||||
|
||||
To set up synchronization, choose to either sync your entire organization resources with external storage, or to sync certain resources to a new Grafana folder (with up to 10 connections).
|
||||
|
||||
- Choose **Sync all resources with external storage** if you want to sync and manage your entire Grafana instance through external storage. With this option, all of your dashboards are synced to that one repository. You can only have one provisioned connection with this selection, and you won't have the option of setting up additional repositories to connect to.
|
||||
|
||||
- Choose **Sync external storage to new Grafana folder** to sync external resources into a new folder without affecting the rest of your instance. You can repeat this process for up to 10 connections.
|
||||
|
||||
1. Enter a **Display name** for the repository connection. Resources stored in this connection appear under the chosen display name in the Grafana UI.
|
||||
1. Click **Synchronize** to continue.
|
||||
|
||||
<!-- ### Synchronize with external storage
|
||||
|
||||
{{< admonition type="note">}}
|
||||
During the synchronization process, your dashboards will be temporarily unavailable.
|
||||
No data or configuration will be lost.
|
||||
However, no one will be able to create, edit, or delete resources during this process.
|
||||
In the last step, the resources will disappear and will reappear and be managed through external storage.
|
||||
{{< /admonition >}}
|
||||
|
||||
1. Select **History** to include commits for each historical value in the synchronized data.
|
||||
1. Select **Begin synchronization** to continue. -->
|
||||
Next, enter a **Display name** for the repository connection. Resources stored in this connection appear under the chosen display name in the Grafana UI. Click **Synchronize** to continue.
|
||||
|
||||
### Choose additional settings
|
||||
|
||||
@@ -163,7 +168,6 @@ Finally, you can set up how often your configured storage is polled for updates.
|
||||
|
||||
1. For **Update instance interval (seconds)**, enter how often you want the instance to pull updates from GitHub. The default value is 60 seconds.
|
||||
1. Optional: Select **Read only** to ensure resources can't be modified in Grafana.
|
||||
<!-- No workflow option listed in the UI. 1. For **Workflows**, select the GitHub workflows that you want to allow to run in the repository. Both **Branch** and **Write** are selected by default. -->
|
||||
1. Optional: If you have the Grafana Image Renderer plugin configured, you can **Enable dashboards previews in pull requests**. If image rendering is not available, then you can't select this option. For more information, refer to the [Image Renderer service](https://github.com/grafana/grafana-image-renderer).
|
||||
1. Select **Finish** to proceed.
|
||||
|
||||
@@ -179,23 +183,11 @@ You can extend Git Sync by getting instant updates and pull requests using webho
|
||||
|
||||
### Set up webhooks for realtime notification and pull request integration
|
||||
|
||||
When connecting to a GitHub repository, Git Sync use webhooks to enable real-time updates from GitHub public repositories or enable the pull request integration.
|
||||
Without webhooks, the polling interval is set in the final configuration screen (default is 60 seconds).
|
||||
Your Grafana instance must be exposed to the public internet.
|
||||
You can do this via port forwarding and DNS, a tool such as `ngrok`, or any other method you prefer.
|
||||
When connecting to a GitHub repository, Git Sync uses webhooks to enable real-time updates from GitHub public repositories or enable pull request integrations. Without webhooks, the polling interval is set in the final configuration screen, and the default is 60 seconds. If you use local storage, then Git Sync only provides periodic pulling.
|
||||
|
||||
The permissions set in your GitHub access token provide the authorization for this communication.
|
||||
You can set up webhooks with whichever service or tooling you prefer. You can use Cloudflare Tunnels with a Cloudflare-managed domain, port-forwarding and DNS options, or a tool such as `ngrok`.
|
||||
|
||||
If you use local storage, then Git Sync only provides periodic pulling.
|
||||
|
||||
<!-- Grafana Cloud support not available yet
|
||||
{{< admonition type="note" >}}
|
||||
Webhooks are automatically available for Grafana Cloud users.
|
||||
{{< /admonition >}}
|
||||
-->
|
||||
|
||||
Set up webhooks with whichever service or tooling you prefer.
|
||||
For example, you can use Cloudflare Tunnels with a Cloudflare-managed domain, port-forwarding and DNS options, or a tool such as `ngrok`.
|
||||
To set up webhooks you need to expose your Grafana instance to the public Internet. You can do this via port forwarding and DNS, a tool such as `ngrok`, or any other method you prefer. The permissions set in your GitHub access token provide the authorization for this communication.
|
||||
|
||||
After you have the public URL, you can add it to your Grafana configuration file:
|
||||
|
||||
@@ -204,23 +196,22 @@ After you have the public URL, you can add it to your Grafana configuration file
|
||||
root_url = https://PUBLIC_DOMAIN.HERE
|
||||
```
|
||||
|
||||
You can check the configured webhooks in the **View** link for your GitHub repository from **Administration** > **Provisioning**.
|
||||
To check the configured webhooks, go to **Administration** > **Provisioning** and click the **View** link for your GitHub repository.
|
||||
|
||||
#### Necessary paths
|
||||
#### Expose necessary paths only
|
||||
|
||||
If your security setup does not permit publicly exposing the Grafana instance, you can either choose to allowlist the GitHub IP addresses, or expose only the necessary paths.
|
||||
If your security setup does not permit publicly exposing the Grafana instance, you can either choose to `allowlist` the GitHub IP addresses, or expose only the necessary paths.
|
||||
|
||||
The necessary paths required to be exposed are (RegExp):
|
||||
The necessary paths required to be exposed are, in RegExp:
|
||||
|
||||
- `/apis/provisioning\.grafana\.app/v0(alpha1)?/namespaces/[^/]+/repositories/[^/]+/(webhook|render/.*)$`
|
||||
<!-- TODO: Path for the blob storage for image rendering? @ryantxu would know this best. -->
|
||||
|
||||
### Set up image rendering for dashboard previews
|
||||
|
||||
By setting up image rendering, you can add visual previews of dashboard updates directly in pull requests.
|
||||
Image rendering also requires webhooks.
|
||||
Set up image rendering to add visual previews of dashboard updates directly in pull requests. Image rendering also requires webhooks.
|
||||
|
||||
You can enable this capability by installing the Grafana Image Renderer in your Grafana instance. For more information and installation instructions, refer to the [Image Renderer service](https://github.com/grafana/grafana-image-renderer).
|
||||
To enable this capability, install the Grafana Image Renderer in your Grafana instance. For more information and installation instructions, refer to the [Image Renderer service](https://github.com/grafana/grafana-image-renderer).
|
||||
|
||||
## Modify configurations after set up is complete
|
||||
|
||||
|
||||
@@ -10,6 +10,12 @@ labels:
|
||||
- enterprise
|
||||
- oss
|
||||
- cloud
|
||||
refs:
|
||||
roles-and-permissions:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/account-management/authentication-and-permissions/cloud-roles/
|
||||
title: Git Sync
|
||||
weight: 100
|
||||
---
|
||||
@@ -63,14 +69,36 @@ With Git Sync, you can make changes to the files in the provisioned folder in Gi
|
||||
|
||||
## Known limitations
|
||||
|
||||
Git Sync is under development and the following limitations apply:
|
||||
{{< admonition type="caution" >}}
|
||||
|
||||
Refer to [Requirements](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/observability-as-code/provision-resources/git-sync-setup#requirements/) to learn what you need to use Git Sync.
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
**Git Sync is under development and the following limitations apply.**
|
||||
|
||||
**Synced resources**
|
||||
|
||||
- You can only sync dashboards and folders. Refer to [Supported resources](#supported-resources) for more information.
|
||||
- If you're using Git Sync in Grafana OSS and Grafana Enterprise, some resources might be in an incompatible data format and can't be synced.
|
||||
- You can only authenticate in GitHub using your Personal Access Token token.
|
||||
- Support for native Git, Git app, and other providers, such as GitLab or Bitbucket, is on the roadmap.
|
||||
- If you're using Git Sync in Grafana OSS and Grafana Enterprise, some resources might be in an incompatible data format and won't be synced.
|
||||
- Full-instance sync is not available in Grafana Cloud and has limitations in Grafana OSS and Grafana Enterprise. Refer to [Choose what to synchronize](../git-sync-setup.md#choose-what-to-synchronize) for more details.
|
||||
- When migrating to full instance sync, during the synchronization process your resources will be temporarily unavailable. No one will be able to create, edit, or delete resources during this process.
|
||||
- If you want to manage existing resources with Git Sync, you need to save them as JSON files and commit them to the synced repository. Open a PR to import, copy, move, or save a dashboard.
|
||||
- Restoring resources from the UI is currently not possible. As an alternative, you can restore dashboards directly in your GitHub repository by raising a PR, and they will be updated in Grafana.
|
||||
|
||||
**Authentication**
|
||||
|
||||
- You can only authenticate in GitHub using your Personal Access Token token.
|
||||
|
||||
**Permission management**
|
||||
|
||||
- You cannot modify the permissions of a provisioned folder after you've synced it.
|
||||
- Default permissions are: Admin = Admin, Editor = Editor, and Viewer = Viewer. Refer to [Roles and permissions](ref:roles-and-permissions) for more information.
|
||||
|
||||
**Compatibility**
|
||||
|
||||
- Support for native Git, Git app, and other providers, such as GitLab or Bitbucket, is on the roadmap.
|
||||
|
||||
## Supported resources
|
||||
|
||||
Git Sync only supports dashboards and folders. Alerts, panels, and other resources are not supported yet.
|
||||
@@ -86,9 +114,9 @@ A resource can be:
|
||||
| **Supported** | The resource can be managed with Git Sync. | The resource is supported but has compatibility issues. It **cannot** be managed with Git Sync. |
|
||||
| **Unsupported** | The resource is **not** supported and **cannot** be managed with Git Sync. | Not applicable. |
|
||||
|
||||
### Instance states
|
||||
### Git Sync instance states
|
||||
|
||||
An instance can be in one of the following states:
|
||||
An instance can be in one of the following Git Sync states:
|
||||
|
||||
- **Unprovisioned**: None of the instance's resources are being managed by Git Sync.
|
||||
- **Partially provisioned**: Some of the resources are controlled by Git Sync.
|
||||
|
||||
@@ -74,6 +74,7 @@ Most [generally available](https://grafana.com/docs/release-life-cycle/#general-
|
||||
| `elasticsearchCrossClusterSearch` | Enables cross cluster search in the Elasticsearch data source | |
|
||||
| `lokiLabelNamesQueryApi` | Defaults to using the Loki `/labels` API instead of `/series` | Yes |
|
||||
| `improvedExternalSessionHandlingSAML` | Enables improved support for SAML external sessions. Ensure the NameID format is correctly configured in Grafana for SAML Single Logout to function properly. | Yes |
|
||||
| `newLogsPanel` | Enables the new logs panel | Yes |
|
||||
| `alertingMigrationUI` | Enables the alerting migration UI, to migrate data source-managed rules to Grafana-managed rules | Yes |
|
||||
| `alertingImportYAMLUI` | Enables a UI feature for importing rules from a Prometheus file to Grafana-managed rules | Yes |
|
||||
| `unifiedNavbars` | Enables unified navbars | |
|
||||
|
||||
@@ -0,0 +1,87 @@
|
||||
import { test, expect } from '@grafana/plugin-e2e';
|
||||
|
||||
const DASHBOARD_UID = 'a2f4ad9e-3b44-4624-8067-35f31be5d309';
|
||||
|
||||
test.use({
|
||||
viewport: { width: 1280, height: 2000 },
|
||||
});
|
||||
|
||||
test.describe('Panels test: StatusHistory', { tag: ['@panels', '@status-history'] }, () => {
|
||||
test('renders successfully', async ({ gotoDashboardPage, selectors, page }) => {
|
||||
const dashboardPage = await gotoDashboardPage({
|
||||
uid: DASHBOARD_UID,
|
||||
});
|
||||
|
||||
// check that gauges are rendered
|
||||
const statusHistoryUplot = page.locator('.uplot');
|
||||
await expect(statusHistoryUplot, 'panels are rendered').toHaveCount(11);
|
||||
|
||||
// check that no panel errors exist
|
||||
const errorInfo = dashboardPage.getByGrafanaSelector(selectors.components.Panels.Panel.headerCornerInfo('error'));
|
||||
await expect(errorInfo, 'no errors in the panels').toBeHidden();
|
||||
});
|
||||
|
||||
test('"no data"', async ({ gotoDashboardPage, selectors, page }) => {
|
||||
const dashboardPage = await gotoDashboardPage({
|
||||
uid: DASHBOARD_UID,
|
||||
queryParams: new URLSearchParams({ editPanel: '15' }),
|
||||
});
|
||||
|
||||
const statusHistoryUplot = page.locator('.uplot');
|
||||
await expect(statusHistoryUplot, "that uplot doesn't appear").toBeHidden();
|
||||
|
||||
const emptyMessage = dashboardPage.getByGrafanaSelector(selectors.components.Panels.Panel.PanelDataErrorMessage);
|
||||
await expect(emptyMessage, 'that the empty text appears').toHaveText('No data');
|
||||
|
||||
// update the "No value" option and see if the panel updates
|
||||
const noValueOption = dashboardPage
|
||||
.getByGrafanaSelector(selectors.components.PanelEditor.OptionsPane.fieldLabel('Standard options No value'))
|
||||
.locator('input');
|
||||
|
||||
await noValueOption.fill('My empty value');
|
||||
await noValueOption.blur();
|
||||
await expect(emptyMessage, 'that the empty text has changed').toHaveText('My empty value');
|
||||
});
|
||||
|
||||
test('tooltip interactions', async ({ gotoDashboardPage, page, selectors }) => {
|
||||
const dashboardPage = await gotoDashboardPage({
|
||||
uid: DASHBOARD_UID,
|
||||
queryParams: new URLSearchParams({ editPanel: '13' }),
|
||||
});
|
||||
|
||||
const statusHistoryUplot = page.locator('.uplot');
|
||||
await expect(statusHistoryUplot, 'uplot is rendered').toBeVisible();
|
||||
|
||||
const tooltip = dashboardPage.getByGrafanaSelector(selectors.components.Panels.Visualization.Tooltip.Wrapper);
|
||||
|
||||
// hover over a spot to trigger the tooltip
|
||||
await statusHistoryUplot.hover({ position: { x: 100, y: 50 } });
|
||||
await expect(tooltip, 'tooltip appears on hover').toBeVisible();
|
||||
await expect(tooltip, 'tooltip displays the value').toContainText('value5');
|
||||
|
||||
// click to pin the tooltip, hover away to be sure it's pinned
|
||||
await statusHistoryUplot.click({ position: { x: 100, y: 50 } });
|
||||
await statusHistoryUplot.hover({ position: { x: 300, y: 50 } });
|
||||
await expect(tooltip, 'tooltip pinned on click').toBeVisible();
|
||||
await expect(tooltip, 'tooltip displays the first value').toContainText('value5');
|
||||
|
||||
// unpin the tooltip, ensure it closes on hover away
|
||||
await statusHistoryUplot.click({ position: { x: 300, y: 50 } });
|
||||
await statusHistoryUplot.blur();
|
||||
await expect(tooltip, 'tooltip closed after unpinning and hovering away').toBeHidden();
|
||||
|
||||
// test clicking the "x" as well
|
||||
await statusHistoryUplot.click({ position: { x: 100, y: 50 } });
|
||||
await expect(tooltip, 'tooltip appears on click').toBeVisible();
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.Portal.container).getByLabel('Close').click();
|
||||
await expect(tooltip, 'tooltip closed on "x" click').toBeHidden();
|
||||
|
||||
// disable tooltips
|
||||
await dashboardPage
|
||||
.getByGrafanaSelector(selectors.components.PanelEditor.OptionsPane.fieldLabel('Tooltip Tooltip mode'))
|
||||
.getByLabel('Hidden')
|
||||
.click();
|
||||
await statusHistoryUplot.hover({ position: { x: 100, y: 50 } });
|
||||
await expect(tooltip, 'tooltip is not shown when disabled').toBeHidden();
|
||||
});
|
||||
});
|
||||
@@ -110,7 +110,6 @@ require (
|
||||
github.com/grafana/otel-profiling-go v0.5.1 // @grafana/grafana-backend-group
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // @grafana/observability-traces-and-profiling
|
||||
github.com/grafana/pyroscope/api v1.2.1-0.20250415190842-3ff7247547ae // @grafana/observability-traces-and-profiling
|
||||
github.com/grafana/tempo v1.5.1-0.20250529124718-87c2dc380cec // @grafana/observability-traces-and-profiling
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // @grafana/grafana-search-and-storage
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 // @grafana/plugins-platform-backend
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // @grafana/grafana-backend-group
|
||||
@@ -171,6 +170,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.10 // @grafana-app-platform-squad
|
||||
github.com/spyzhov/ajson v0.9.6 // @grafana/grafana-sharing-squad
|
||||
github.com/stretchr/testify v1.11.1 // @grafana/grafana-backend-group
|
||||
github.com/testcontainers/testcontainers-go v0.36.0 //@grafana/grafana-app-platform-squad
|
||||
github.com/thomaspoignant/go-feature-flag v1.42.0 // @grafana/grafana-backend-group
|
||||
github.com/tjhop/slog-gokit v0.1.3 // @grafana/grafana-app-platform-squad
|
||||
github.com/ua-parser/uap-go v0.0.0-20250213224047-9c035f085b90 // @grafana/grafana-backend-group
|
||||
@@ -401,7 +401,7 @@ require (
|
||||
github.com/diegoholiveira/jsonlogic/v3 v3.7.4 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/docker v28.4.0+incompatible // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect; @grafana/grafana-app-platform-squad
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 // indirect
|
||||
github.com/dolthub/jsonpath v0.0.2-0.20240227200619-19675ab05c71 // indirect
|
||||
@@ -653,7 +653,15 @@ require (
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
require github.com/grafana/tempo v1.5.1-0.20250529124718-87c2dc380cec // @grafana/observability-traces-and-profiling
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.2 // indirect
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-openapi/swag/conv v0.25.1 // indirect
|
||||
github.com/go-openapi/swag/fileutils v0.25.1 // indirect
|
||||
github.com/go-openapi/swag/jsonname v0.25.1 // indirect
|
||||
@@ -663,6 +671,20 @@ require (
|
||||
github.com/go-openapi/swag/stringutils v0.25.1 // indirect
|
||||
github.com/go-openapi/swag/typeutils v0.25.1 // indirect
|
||||
github.com/go-openapi/swag/yamlutils v0.25.1 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
|
||||
github.com/magiconair/properties v1.8.10 // indirect
|
||||
github.com/moby/go-archive v0.1.0 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/moby/sys/user v0.4.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.6 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.8.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
)
|
||||
|
||||
// Use fork of crewjam/saml with fixes for some issues until changes get merged into upstream
|
||||
|
||||
@@ -645,6 +645,8 @@ gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGq
|
||||
gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU=
|
||||
github.com/1NCE-GmbH/grpc-go-pool v0.0.0-20231117122434-2a5bb974daa2 h1:qFYgLH2zZe3WHpQgUrzeazC+ebDebwAQqS9yE1cP5Bs=
|
||||
github.com/1NCE-GmbH/grpc-go-pool v0.0.0-20231117122434-2a5bb974daa2/go.mod h1:09/ALd1AXCTCOfcJYD8+jIYKmFmi6PVCkTsipC18F7E=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
|
||||
github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
|
||||
github.com/Azure/azure-sdk-for-go v23.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
@@ -1051,6 +1053,8 @@ github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151X
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@@ -1060,12 +1064,16 @@ github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03V
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
|
||||
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8=
|
||||
github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg=
|
||||
github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
|
||||
@@ -1132,6 +1140,8 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
|
||||
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
|
||||
@@ -1251,6 +1261,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
|
||||
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
@@ -1942,6 +1954,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/linode/linodego v1.47.0 h1:6MFNCyzWbr8Rhl4r7d5DwZLwxvFIsM4ARH6W0KS/R0U=
|
||||
github.com/linode/linodego v1.47.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk=
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
|
||||
github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
|
||||
github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
|
||||
@@ -1953,6 +1967,8 @@ github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
|
||||
github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE=
|
||||
github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
@@ -2066,12 +2082,20 @@ github.com/mithrandie/ternary v1.1.1 h1:k/joD6UGVYxHixYmSR8EGgDFNONBMqyD373xT4QR
|
||||
github.com/mithrandie/ternary v1.1.1/go.mod h1:0D9Ba3+09K2TdSZO7/bFCC0GjSXetCvYuYq0u8FY/1g=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
|
||||
github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
|
||||
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
|
||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
|
||||
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/mocktools/go-smtp-mock/v2 v2.5.1 h1:QcMJMChSgG1olVj4o6xxQFdrWzRjYNrcq660HAjd0wA=
|
||||
@@ -2218,6 +2242,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/pressly/goose/v3 v3.25.0 h1:6WeYhMWGRCzpyd89SpODFnCBCKz41KrVbRT58nVjGng=
|
||||
github.com/pressly/goose/v3 v3.25.0/go.mod h1:4hC1KrritdCxtuFsqgs1R4AU5bWtTAf+cnWvfhf2DNY=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
@@ -2365,6 +2391,8 @@ github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah
|
||||
github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas=
|
||||
github.com/shadowspore/fossil-delta v0.0.0-20241213113458-1d797d70cbe3 h1:/4/IJi5iyTdh6mqOUaASW148HQpujYiHl0Wl78dSOSc=
|
||||
github.com/shadowspore/fossil-delta v0.0.0-20241213113458-1d797d70cbe3/go.mod h1:aJIMhRsunltJR926EB2MUg8qHemFQDreSB33pyto2Ps=
|
||||
github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs=
|
||||
github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
|
||||
@@ -2461,6 +2489,8 @@ github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD
|
||||
github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/testcontainers/testcontainers-go v0.36.0 h1:YpffyLuHtdp5EUsI5mT4sRw8GZhO/5ozyDT1xWGXt00=
|
||||
github.com/testcontainers/testcontainers-go v0.36.0/go.mod h1:yk73GVJ0KUZIHUtFna6MO7QS144qYpoY8lEEtU9Hed0=
|
||||
github.com/thanos-io/objstore v0.0.0-20240818203309-0363dadfdfb1 h1:z0v9BB/p7s4J6R//+0a5M3wCld8KzNjrGRLIwXfrAZk=
|
||||
github.com/thanos-io/objstore v0.0.0-20240818203309-0363dadfdfb1/go.mod h1:3ukSkG4rIRUGkKM4oIz+BSuUx2e3RlQVVv3Cc3W+Tv4=
|
||||
github.com/thejerf/slogassert v0.3.4 h1:VoTsXixRbXMrRSSxDjYTiEDCM4VWbsYPW5rB/hX24kM=
|
||||
@@ -2471,6 +2501,10 @@ github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1C
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tjhop/slog-gokit v0.1.3 h1:6SdexP3UIeg93KLFeiM1Wp1caRwdTLgsD/THxBUy1+o=
|
||||
github.com/tjhop/slog-gokit v0.1.3/go.mod h1:Bbu5v2748qpAWH7k6gse/kw3076IJf6owJmh7yArmJs=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/trivago/tgo v1.0.7/go.mod h1:w4dpD+3tzNIIiIfkWWa85w5/B77tlvdZckQ+6PkFnhc=
|
||||
@@ -2547,6 +2581,8 @@ github.com/yuin/gopher-lua v0.0.0-20190206043414-8bfc7677f583/go.mod h1:gqRgreBU
|
||||
github.com/yuin/gopher-lua v0.0.0-20191213034115-f46add6fdb5c/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
|
||||
github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
|
||||
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/zclconf/go-cty v1.16.3 h1:osr++gw2T61A8KVYHoQiFbFd1Lh3JOCXc/jFLJXKTxk=
|
||||
github.com/zclconf/go-cty v1.16.3/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
||||
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
|
||||
@@ -2995,6 +3031,7 @@ golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -3027,6 +3064,7 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -3609,8 +3647,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
|
||||
gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
+299
-39
File diff suppressed because it is too large
Load Diff
@@ -35,17 +35,20 @@ const sourceFiles = teamFiles.filter((file) => {
|
||||
const ext = path.extname(file);
|
||||
return (
|
||||
['.ts', '.tsx', '.js', '.jsx'].includes(ext) &&
|
||||
// exclude all tests
|
||||
// exclude all tests and mocks
|
||||
!path.matchesGlob(file, '**/test/**/*') &&
|
||||
!file.includes('.test.') &&
|
||||
!file.includes('.spec.') &&
|
||||
!path.matchesGlob(file, '**/__mocks__/**/*') &&
|
||||
// and storybook stories
|
||||
!file.includes('.story.') &&
|
||||
// and generated files
|
||||
!file.includes('.gen.ts') &&
|
||||
// and type definitions
|
||||
!file.includes('.d.ts') &&
|
||||
!file.endsWith('/types.ts')
|
||||
!file.endsWith('/types.ts') &&
|
||||
// and anything in graveyard
|
||||
!path.matchesGlob(file, '**/graveyard/**/*')
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
+1
@@ -845,6 +845,7 @@ export type DashboardAccess = {
|
||||
/** The permissions part */
|
||||
canSave: boolean;
|
||||
canStar: boolean;
|
||||
isPublic: boolean;
|
||||
/** Metadata fields */
|
||||
slug?: string;
|
||||
url?: string;
|
||||
|
||||
+6
-1
@@ -729,6 +729,10 @@ export interface FeatureToggles {
|
||||
*/
|
||||
timeRangeProvider?: boolean;
|
||||
/**
|
||||
* Enables time range panning functionality
|
||||
*/
|
||||
timeRangePan?: boolean;
|
||||
/**
|
||||
* Disables the log limit restriction for Azure Monitor when true. The limit is enabled by default.
|
||||
* @default false
|
||||
*/
|
||||
@@ -883,7 +887,8 @@ export interface FeatureToggles {
|
||||
*/
|
||||
fetchRulesUsingPost?: boolean;
|
||||
/**
|
||||
* Enables the new logs panel in Explore
|
||||
* Enables the new logs panel
|
||||
* @default true
|
||||
*/
|
||||
newLogsPanel?: boolean;
|
||||
/**
|
||||
|
||||
+1
@@ -27,6 +27,7 @@ const dashboardToAppPlatform = (dashboard: (typeof mockTree)[number]['item']) =>
|
||||
},
|
||||
status: {},
|
||||
// TODO: Eventually add access properties, as required by tests
|
||||
access: {},
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@ export const getCardContainerStyles = (
|
||||
display: 'grid',
|
||||
position: 'relative',
|
||||
gridTemplateColumns: 'auto 1fr auto',
|
||||
gridTemplateRows: '1fr auto auto auto',
|
||||
gridTemplateRows: 'auto auto 1fr auto',
|
||||
gridAutoColumns: '1fr',
|
||||
gridAutoFlow: 'row',
|
||||
gridTemplateAreas: `
|
||||
|
||||
@@ -212,7 +212,7 @@ func (sk8s *shortURLK8sHandler) createKubernetesShortURLsHandler(c *contextmodel
|
||||
|
||||
c.Logger.Debug("Creating short URL", "path", cmd.Path)
|
||||
obj := shorturl.LegacyCreateCommandToUnstructured(cmd)
|
||||
obj.SetGenerateName("u") // becomes a prefix
|
||||
obj.SetGenerateName("s") // becomes a prefix
|
||||
|
||||
out, err := client.Create(c.Req.Context(), &obj, v1.CreateOptions{})
|
||||
if err != nil {
|
||||
|
||||
+1
-1
@@ -5,7 +5,7 @@ go 1.25.3
|
||||
// Override docker/docker to avoid:
|
||||
// go: github.com/drone-runners/drone-runner-docker@v1.8.2 requires
|
||||
// github.com/docker/docker@v0.0.0-00010101000000-000000000000: invalid version: unknown revision 000000000000
|
||||
replace github.com/docker/docker => github.com/moby/moby v27.5.1+incompatible
|
||||
replace github.com/docker/docker => github.com/moby/moby v28.0.1+incompatible
|
||||
|
||||
require (
|
||||
github.com/google/uuid v1.6.0 // indirect; @grafana/grafana-backend-group
|
||||
|
||||
@@ -58,4 +58,5 @@ import (
|
||||
|
||||
_ "github.com/grafana/grafana/apps/alerting/alertenrichment/pkg/apis/alertenrichment/v1beta1"
|
||||
_ "github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1"
|
||||
_ "github.com/testcontainers/testcontainers-go"
|
||||
)
|
||||
|
||||
@@ -3,11 +3,22 @@ package middleware
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
|
||||
contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/web"
|
||||
)
|
||||
|
||||
var (
|
||||
hostRedirectCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "host_redirect_total",
|
||||
Help: "Number of requests redirected due to host header mismatch",
|
||||
Namespace: "grafana",
|
||||
})
|
||||
)
|
||||
|
||||
func ValidateHostHeader(cfg *setting.Cfg) web.Handler {
|
||||
return func(c *contextmodel.ReqContext) {
|
||||
// ignore local render calls
|
||||
@@ -21,6 +32,8 @@ func ValidateHostHeader(cfg *setting.Cfg) web.Handler {
|
||||
}
|
||||
|
||||
if !strings.EqualFold(h, cfg.Domain) {
|
||||
hostRedirectCounter.Inc()
|
||||
c.Logger.Info("Enforcing Host header", "hosted", c.Req.Host, "expected", cfg.Domain)
|
||||
c.Redirect(strings.TrimSuffix(cfg.AppURL, "/")+c.Req.RequestURI, 301)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -53,6 +53,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/libraryelements"
|
||||
"github.com/grafana/grafana/pkg/services/librarypanels"
|
||||
"github.com/grafana/grafana/pkg/services/provisioning"
|
||||
"github.com/grafana/grafana/pkg/services/publicdashboards"
|
||||
"github.com/grafana/grafana/pkg/services/quota"
|
||||
"github.com/grafana/grafana/pkg/services/search/sort"
|
||||
"github.com/grafana/grafana/pkg/services/user"
|
||||
@@ -112,6 +113,7 @@ type DashboardsAPIBuilder struct {
|
||||
dualWriter dualwrite.Service
|
||||
folderClientProvider client.K8sHandlerProvider
|
||||
libraryPanels libraryelements.Service // for legacy library panels
|
||||
publicDashboardService publicdashboards.Service
|
||||
|
||||
isStandalone bool // skips any handling including anything to do with legacy storage
|
||||
}
|
||||
@@ -140,6 +142,7 @@ func RegisterAPIService(
|
||||
restConfigProvider apiserver.RestConfigProvider,
|
||||
userService user.Service,
|
||||
libraryPanels libraryelements.Service,
|
||||
publicDashboardService publicdashboards.Service,
|
||||
) *DashboardsAPIBuilder {
|
||||
dbp := legacysql.NewDatabaseProvider(sql)
|
||||
namespacer := request.GetNamespaceMapper(cfg)
|
||||
@@ -163,6 +166,7 @@ func RegisterAPIService(
|
||||
dualWriter: dual,
|
||||
folderClientProvider: newSimpleFolderClientProvider(folderClient),
|
||||
libraryPanels: libraryPanels,
|
||||
publicDashboardService: publicDashboardService,
|
||||
|
||||
legacy: &DashboardStorage{
|
||||
Access: legacy.NewDashboardAccess(dbp, namespacer, dashStore, provisioning, libraryPanelSvc, sorter, dashboardPermissionsSvc, accessControl, features),
|
||||
@@ -652,6 +656,7 @@ func (b *DashboardsAPIBuilder) storageForVersion(
|
||||
b.accessControl,
|
||||
opts.Scheme,
|
||||
newDTOFunc,
|
||||
b.publicDashboardService,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/accesscontrol"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
|
||||
"github.com/grafana/grafana/pkg/services/dashboards"
|
||||
"github.com/grafana/grafana/pkg/services/publicdashboards"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/apistore"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
|
||||
@@ -28,13 +29,14 @@ type dtoBuilder = func(dashboard runtime.Object, access *dashboard.DashboardAcce
|
||||
|
||||
// The DTO returns everything the UI needs in a single request
|
||||
type DTOConnector struct {
|
||||
getter rest.Getter
|
||||
legacy legacy.DashboardAccess
|
||||
unified resource.ResourceClient
|
||||
largeObjects apistore.LargeObjectSupport
|
||||
accessControl accesscontrol.AccessControl
|
||||
scheme *runtime.Scheme
|
||||
builder dtoBuilder
|
||||
getter rest.Getter
|
||||
legacy legacy.DashboardAccess
|
||||
unified resource.ResourceClient
|
||||
largeObjects apistore.LargeObjectSupport
|
||||
accessControl accesscontrol.AccessControl
|
||||
scheme *runtime.Scheme
|
||||
builder dtoBuilder
|
||||
publicDashboardService publicdashboards.Service
|
||||
}
|
||||
|
||||
func NewDTOConnector(
|
||||
@@ -45,15 +47,17 @@ func NewDTOConnector(
|
||||
accessControl accesscontrol.AccessControl,
|
||||
scheme *runtime.Scheme,
|
||||
builder dtoBuilder,
|
||||
publicDashboardService publicdashboards.Service,
|
||||
) (rest.Storage, error) {
|
||||
return &DTOConnector{
|
||||
getter: getter,
|
||||
legacy: legacyAccess,
|
||||
accessControl: accessControl,
|
||||
unified: resourceClient,
|
||||
largeObjects: largeObjects,
|
||||
builder: builder,
|
||||
scheme: scheme,
|
||||
getter: getter,
|
||||
legacy: legacyAccess,
|
||||
accessControl: accessControl,
|
||||
unified: resourceClient,
|
||||
largeObjects: largeObjects,
|
||||
builder: builder,
|
||||
scheme: scheme,
|
||||
publicDashboardService: publicDashboardService,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -154,6 +158,11 @@ func (r *DTOConnector) Connect(ctx context.Context, name string, opts runtime.Ob
|
||||
access.Slug = slugify.Slugify(title)
|
||||
access.Url = dashboards.GetDashboardFolderURL(false, name, access.Slug)
|
||||
|
||||
pubDash, err := r.publicDashboardService.FindByDashboardUid(ctx, user.GetOrgID(), name)
|
||||
if err == nil && pubDash != nil {
|
||||
access.IsPublic = true
|
||||
}
|
||||
|
||||
dash, err := r.builder(rawobj, access)
|
||||
if err != nil {
|
||||
responder.Error(err)
|
||||
|
||||
@@ -146,7 +146,7 @@ func validateOnDelete(ctx context.Context,
|
||||
|
||||
for _, v := range resp.Stats {
|
||||
if v.Count > 0 {
|
||||
return folder.ErrFolderNotEmpty.Errorf("folder is not empty, contains %d resources", v.Count)
|
||||
return folder.ErrFolderNotEmpty.Errorf("folder is not empty, contains %d %s.%s", v.Count, v.Group, v.Resource)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -224,6 +224,14 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *ge
|
||||
return err
|
||||
}
|
||||
|
||||
// Only teamBindingStore exposes the AfterCreate, AfterDelete, and BeginUpdate hooks
|
||||
if enableZanzanaSync {
|
||||
b.logger.Info("Enabling hooks for TeamBinding to sync to Zanzana")
|
||||
teamBindingStore.AfterCreate = b.AfterTeamBindingCreate
|
||||
teamBindingStore.AfterDelete = b.AfterTeamBindingDelete
|
||||
teamBindingStore.BeginUpdate = b.BeginTeamBindingUpdate
|
||||
}
|
||||
|
||||
storage[teamBindingResource.StoragePath()] = teamBindingDW
|
||||
}
|
||||
|
||||
@@ -238,6 +246,13 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *ge
|
||||
return err
|
||||
}
|
||||
|
||||
if enableZanzanaSync {
|
||||
b.logger.Info("Enabling hooks for User to sync basic role assignments to Zanzana")
|
||||
store.AfterCreate = b.AfterUserCreate
|
||||
store.BeginUpdate = b.BeginUserUpdate
|
||||
store.AfterDelete = b.AfterUserDelete
|
||||
}
|
||||
|
||||
dw, err := opts.DualWriteBuilder(userResource.GroupResource(), legacyStore, store)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -0,0 +1,363 @@
|
||||
package iam
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
|
||||
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
|
||||
v1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana"
|
||||
)
|
||||
|
||||
// convertTeamBindingToTuple converts a TeamBinding to a v1 TupleKey format
|
||||
// TeamBinding represents a user's membership in a team with a specific permission level
|
||||
func convertTeamBindingToTuple(tb *iamv0.TeamBinding) (*v1.TupleKey, error) {
|
||||
if tb.Spec.Subject.Name == "" {
|
||||
return nil, errEmptyName
|
||||
}
|
||||
|
||||
if tb.Spec.TeamRef.Name == "" {
|
||||
return nil, errEmptyName
|
||||
}
|
||||
|
||||
// Map permission to relation
|
||||
var relation string
|
||||
switch tb.Spec.Permission {
|
||||
case iamv0.TeamBindingTeamPermissionAdmin:
|
||||
relation = zanzana.RelationTeamAdmin
|
||||
case iamv0.TeamBindingTeamPermissionMember:
|
||||
relation = zanzana.RelationTeamMember
|
||||
default:
|
||||
// Default to member if unknown permission
|
||||
relation = zanzana.RelationTeamMember
|
||||
}
|
||||
|
||||
// Create tuple: user:{subjectUID} has {relation} relation to team:{teamUID}
|
||||
tuple := &v1.TupleKey{
|
||||
User: zanzana.NewTupleEntry(zanzana.TypeUser, tb.Spec.Subject.Name, ""),
|
||||
Relation: relation,
|
||||
Object: zanzana.NewTupleEntry(zanzana.TypeTeam, tb.Spec.TeamRef.Name, ""),
|
||||
}
|
||||
|
||||
return tuple, nil
|
||||
}
|
||||
|
||||
// AfterTeamBindingCreate is a post-create hook that writes the team binding to Zanzana (openFGA)
|
||||
func (b *IdentityAccessManagementAPIBuilder) AfterTeamBindingCreate(obj runtime.Object, _ *metav1.CreateOptions) {
|
||||
if b.zClient == nil {
|
||||
return
|
||||
}
|
||||
|
||||
tb, ok := obj.(*iamv0.TeamBinding)
|
||||
if !ok {
|
||||
b.logger.Error("failed to convert object to TeamBinding type", "object", obj)
|
||||
return
|
||||
}
|
||||
|
||||
resourceType := "teambinding"
|
||||
operation := "create"
|
||||
|
||||
// Grab a ticket to write to Zanzana
|
||||
// This limits the amount of concurrent connections to Zanzana
|
||||
wait := time.Now()
|
||||
b.zTickets <- true
|
||||
hooksWaitHistogram.WithLabelValues(resourceType, operation).Observe(time.Since(wait).Seconds())
|
||||
|
||||
go func(tb *iamv0.TeamBinding) {
|
||||
start := time.Now()
|
||||
status := "success"
|
||||
|
||||
defer func() {
|
||||
// Release the ticket after write is done
|
||||
<-b.zTickets
|
||||
// Record operation duration and count
|
||||
hooksDurationHistogram.WithLabelValues(resourceType, operation, status).Observe(time.Since(start).Seconds())
|
||||
hooksOperationCounter.WithLabelValues(resourceType, operation, status).Inc()
|
||||
}()
|
||||
|
||||
tuple, err := convertTeamBindingToTuple(tb)
|
||||
if err != nil {
|
||||
b.logger.Error("failed to convert team binding to tuple",
|
||||
"namespace", tb.Namespace,
|
||||
"name", tb.Name,
|
||||
"subject", tb.Spec.Subject.Name,
|
||||
"teamRef", tb.Spec.TeamRef.Name,
|
||||
"permission", tb.Spec.Permission,
|
||||
"err", err,
|
||||
)
|
||||
status = "failure"
|
||||
return
|
||||
}
|
||||
|
||||
b.logger.Debug("writing team binding to zanzana",
|
||||
"namespace", tb.Namespace,
|
||||
"name", tb.Name,
|
||||
"subject", tb.Spec.Subject.Name,
|
||||
"teamRef", tb.Spec.TeamRef.Name,
|
||||
"permission", tb.Spec.Permission,
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout)
|
||||
defer cancel()
|
||||
|
||||
err = b.zClient.Write(ctx, &v1.WriteRequest{
|
||||
Namespace: tb.Namespace,
|
||||
Writes: &v1.WriteRequestWrites{
|
||||
TupleKeys: []*v1.TupleKey{tuple},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
status = "failure"
|
||||
b.logger.Error("failed to write team binding to zanzana",
|
||||
"err", err,
|
||||
"namespace", tb.Namespace,
|
||||
"name", tb.Name,
|
||||
"subject", tb.Spec.Subject.Name,
|
||||
"teamRef", tb.Spec.TeamRef.Name,
|
||||
"permission", tb.Spec.Permission,
|
||||
)
|
||||
} else {
|
||||
// Record successful tuple write
|
||||
hooksTuplesCounter.WithLabelValues(resourceType, operation, "write").Inc()
|
||||
}
|
||||
}(tb.DeepCopy()) // Pass a copy of the object
|
||||
}
|
||||
|
||||
// BeginTeamBindingUpdate is a pre-update hook that prepares zanzana updates
|
||||
// It converts old and new team bindings to tuples and performs the zanzana write after K8s update succeeds
|
||||
func (b *IdentityAccessManagementAPIBuilder) BeginTeamBindingUpdate(ctx context.Context, obj, oldObj runtime.Object, options *metav1.UpdateOptions) (registry.FinishFunc, error) {
|
||||
if b.zClient == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Extract team bindings from both old and new objects
|
||||
oldTB, ok := oldObj.(*iamv0.TeamBinding)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
newTB, ok := obj.(*iamv0.TeamBinding)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if oldTB.Spec.Subject.Name == newTB.Spec.Subject.Name && oldTB.Spec.TeamRef.Name == newTB.Spec.TeamRef.Name && oldTB.Spec.Permission == newTB.Spec.Permission {
|
||||
return nil, nil // No changes to the team binding
|
||||
}
|
||||
|
||||
if newTB.Spec.Subject.Name == "" || newTB.Spec.TeamRef.Name == "" {
|
||||
b.logger.Error("invalid team binding",
|
||||
"namespace", newTB.Namespace,
|
||||
"name", newTB.Name,
|
||||
"subject", newTB.Spec.Subject.Name,
|
||||
"teamRef", newTB.Spec.TeamRef.Name,
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Convert old team binding to tuple for deletion
|
||||
var oldTuple *v1.TupleKey
|
||||
var oldErr error
|
||||
if oldTB.Spec.Subject.Name != "" && oldTB.Spec.TeamRef.Name != "" {
|
||||
oldTuple, oldErr = convertTeamBindingToTuple(oldTB)
|
||||
if oldErr != nil {
|
||||
b.logger.Error("failed to convert old team binding to tuple",
|
||||
"namespace", oldTB.Namespace,
|
||||
"name", oldTB.Name,
|
||||
"err", oldErr,
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Convert new team binding to tuple for writing
|
||||
var newTuple *v1.TupleKey
|
||||
var newErr error
|
||||
if newTB.Spec.Subject.Name != "" && newTB.Spec.TeamRef.Name != "" {
|
||||
newTuple, newErr = convertTeamBindingToTuple(newTB)
|
||||
if newErr != nil {
|
||||
b.logger.Error("failed to convert new team binding to tuple",
|
||||
"namespace", newTB.Namespace,
|
||||
"name", newTB.Name,
|
||||
"err", newErr,
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Return a finish function that performs the zanzana write only on success
|
||||
return func(ctx context.Context, success bool) {
|
||||
if !success {
|
||||
return
|
||||
}
|
||||
|
||||
wait := time.Now()
|
||||
b.zTickets <- true
|
||||
hooksWaitHistogram.WithLabelValues("teambinding", "update").Observe(time.Since(wait).Seconds())
|
||||
|
||||
go func() {
|
||||
start := time.Now()
|
||||
status := "success"
|
||||
|
||||
defer func() {
|
||||
<-b.zTickets
|
||||
// Record operation duration and count
|
||||
hooksDurationHistogram.WithLabelValues("teambinding", "update", status).Observe(time.Since(start).Seconds())
|
||||
hooksOperationCounter.WithLabelValues("teambinding", "update", status).Inc()
|
||||
}()
|
||||
|
||||
b.logger.Debug("updating team binding in zanzana",
|
||||
"namespace", newTB.Namespace,
|
||||
"name", newTB.Name,
|
||||
"oldSubject", oldTB.Spec.Subject.Name,
|
||||
"newSubject", newTB.Spec.Subject.Name,
|
||||
"oldTeamRef", oldTB.Spec.TeamRef.Name,
|
||||
"newTeamRef", newTB.Spec.TeamRef.Name,
|
||||
"oldPermission", oldTB.Spec.Permission,
|
||||
"newPermission", newTB.Spec.Permission,
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Prepare write request
|
||||
req := &v1.WriteRequest{
|
||||
Namespace: newTB.Namespace,
|
||||
}
|
||||
|
||||
// Add delete for old tuple
|
||||
if oldTuple != nil && oldErr == nil {
|
||||
deleteTuple := toTupleKeysWithoutCondition([]*v1.TupleKey{oldTuple})
|
||||
req.Deletes = &v1.WriteRequestDeletes{
|
||||
TupleKeys: deleteTuple,
|
||||
}
|
||||
b.logger.Debug("deleting existing team binding from zanzana",
|
||||
"namespace", newTB.Namespace,
|
||||
"subject", oldTB.Spec.Subject.Name,
|
||||
"teamRef", oldTB.Spec.TeamRef.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// Add write for new tuple
|
||||
if newTuple != nil && newErr == nil {
|
||||
req.Writes = &v1.WriteRequestWrites{
|
||||
TupleKeys: []*v1.TupleKey{newTuple},
|
||||
}
|
||||
b.logger.Debug("writing new team binding to zanzana",
|
||||
"namespace", newTB.Namespace,
|
||||
"subject", newTB.Spec.Subject.Name,
|
||||
"teamRef", newTB.Spec.TeamRef.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// Only make the request if there are deletes or writes
|
||||
if (req.Deletes != nil && len(req.Deletes.TupleKeys) > 0) || (req.Writes != nil && len(req.Writes.TupleKeys) > 0) {
|
||||
err := b.zClient.Write(ctx, req)
|
||||
if err != nil {
|
||||
status = "failure"
|
||||
b.logger.Error("failed to update team binding in zanzana",
|
||||
"err", err,
|
||||
"namespace", newTB.Namespace,
|
||||
"name", newTB.Name,
|
||||
)
|
||||
} else {
|
||||
// Record successful tuple operations
|
||||
if oldTuple != nil && oldErr == nil {
|
||||
hooksTuplesCounter.WithLabelValues("teambinding", "update", "delete").Inc()
|
||||
}
|
||||
if newTuple != nil && newErr == nil {
|
||||
hooksTuplesCounter.WithLabelValues("teambinding", "update", "write").Inc()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
b.logger.Debug("no tuples to update in zanzana", "namespace", newTB.Namespace, "name", newTB.Name)
|
||||
}
|
||||
}()
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AfterTeamBindingDelete is a post-delete hook that removes the team binding from Zanzana (openFGA)
|
||||
func (b *IdentityAccessManagementAPIBuilder) AfterTeamBindingDelete(obj runtime.Object, _ *metav1.DeleteOptions) {
|
||||
if b.zClient == nil {
|
||||
return
|
||||
}
|
||||
|
||||
tb, ok := obj.(*iamv0.TeamBinding)
|
||||
if !ok {
|
||||
b.logger.Error("failed to convert object to TeamBinding type", "object", obj)
|
||||
return
|
||||
}
|
||||
|
||||
resourceType := "teambinding"
|
||||
operation := "delete"
|
||||
|
||||
// Grab a ticket to write to Zanzana
|
||||
// This limits the amount of concurrent connections to Zanzana
|
||||
wait := time.Now()
|
||||
b.zTickets <- true
|
||||
hooksWaitHistogram.WithLabelValues(resourceType, operation).Observe(time.Since(wait).Seconds())
|
||||
|
||||
go func(tb *iamv0.TeamBinding) {
|
||||
start := time.Now()
|
||||
status := "success"
|
||||
|
||||
defer func() {
|
||||
// Release the ticket after write is done
|
||||
<-b.zTickets
|
||||
// Record operation duration and count
|
||||
hooksDurationHistogram.WithLabelValues(resourceType, operation, status).Observe(time.Since(start).Seconds())
|
||||
hooksOperationCounter.WithLabelValues(resourceType, operation, status).Inc()
|
||||
}()
|
||||
|
||||
tuple, err := convertTeamBindingToTuple(tb)
|
||||
if err != nil {
|
||||
b.logger.Error("failed to convert team binding to tuple for deletion",
|
||||
"namespace", tb.Namespace,
|
||||
"name", tb.Name,
|
||||
"subject", tb.Spec.Subject.Name,
|
||||
"teamRef", tb.Spec.TeamRef.Name,
|
||||
"err", err,
|
||||
)
|
||||
status = "failure"
|
||||
return
|
||||
}
|
||||
|
||||
// Convert tuple to TupleKeyWithoutCondition for deletion
|
||||
deleteTuple := toTupleKeysWithoutCondition([]*v1.TupleKey{tuple})
|
||||
|
||||
b.logger.Debug("deleting team binding from zanzana",
|
||||
"namespace", tb.Namespace,
|
||||
"name", tb.Name,
|
||||
"subject", tb.Spec.Subject.Name,
|
||||
"teamRef", tb.Spec.TeamRef.Name,
|
||||
"permission", tb.Spec.Permission,
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout)
|
||||
defer cancel()
|
||||
|
||||
err = b.zClient.Write(ctx, &v1.WriteRequest{
|
||||
Namespace: tb.Namespace,
|
||||
Deletes: &v1.WriteRequestDeletes{
|
||||
TupleKeys: deleteTuple,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
status = "failure"
|
||||
b.logger.Error("failed to delete team binding from zanzana",
|
||||
"err", err,
|
||||
"namespace", tb.Namespace,
|
||||
"name", tb.Name,
|
||||
"subject", tb.Spec.Subject.Name,
|
||||
"teamRef", tb.Spec.TeamRef.Name,
|
||||
)
|
||||
} else {
|
||||
// Record successful tuple deletion
|
||||
hooksTuplesCounter.WithLabelValues(resourceType, operation, "delete").Inc()
|
||||
}
|
||||
}(tb.DeepCopy()) // Pass a copy of the object
|
||||
}
|
||||
@@ -0,0 +1,966 @@
|
||||
package iam
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
v1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAfterTeamBindingCreate(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
b := &IdentityAccessManagementAPIBuilder{
|
||||
logger: log.NewNopLogger(),
|
||||
zTickets: make(chan bool, 1),
|
||||
}
|
||||
|
||||
t.Run("should create zanzana entry for team binding with member permission", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
teamBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-1",
|
||||
Namespace: "org-1",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
External: false,
|
||||
},
|
||||
}
|
||||
|
||||
testMemberBinding := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.NotNil(t, req.Writes)
|
||||
require.Len(t, req.Writes.TupleKeys, 1)
|
||||
require.Equal(t, "org-1", req.Namespace)
|
||||
require.Nil(t, req.Deletes)
|
||||
|
||||
expectedTuple := &v1.TupleKey{
|
||||
User: "user:user-1",
|
||||
Relation: "member",
|
||||
Object: "team:team-1",
|
||||
}
|
||||
|
||||
actualTuple := req.Writes.TupleKeys[0]
|
||||
require.Equal(t, expectedTuple.User, actualTuple.User)
|
||||
require.Equal(t, expectedTuple.Relation, actualTuple.Relation)
|
||||
require.Equal(t, expectedTuple.Object, actualTuple.Object)
|
||||
require.Nil(t, actualTuple.Condition)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testMemberBinding}
|
||||
b.AfterTeamBindingCreate(&teamBinding, nil)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should create zanzana entry for team binding with admin permission", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
teamBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-2",
|
||||
Namespace: "org-2",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-2",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-2",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionAdmin,
|
||||
External: true,
|
||||
},
|
||||
}
|
||||
|
||||
testAdminBinding := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.NotNil(t, req.Writes)
|
||||
require.Len(t, req.Writes.TupleKeys, 1)
|
||||
require.Equal(t, "org-2", req.Namespace)
|
||||
require.Nil(t, req.Deletes)
|
||||
|
||||
expectedTuple := &v1.TupleKey{
|
||||
User: "user:user-2",
|
||||
Relation: "admin",
|
||||
Object: "team:team-2",
|
||||
}
|
||||
|
||||
actualTuple := req.Writes.TupleKeys[0]
|
||||
require.Equal(t, expectedTuple.User, actualTuple.User)
|
||||
require.Equal(t, expectedTuple.Relation, actualTuple.Relation)
|
||||
require.Equal(t, expectedTuple.Object, actualTuple.Object)
|
||||
require.Nil(t, actualTuple.Condition)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testAdminBinding}
|
||||
b.AfterTeamBindingCreate(&teamBinding, nil)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should not write to zanzana when zClient is nil", func(t *testing.T) {
|
||||
builder := &IdentityAccessManagementAPIBuilder{
|
||||
logger: log.NewNopLogger(),
|
||||
zTickets: make(chan bool, 1),
|
||||
zClient: nil,
|
||||
}
|
||||
|
||||
teamBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-3",
|
||||
Namespace: "org-3",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-3",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-3",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
// Should not panic or error when zClient is nil
|
||||
builder.AfterTeamBindingCreate(&teamBinding, nil)
|
||||
})
|
||||
|
||||
t.Run("should handle conversion error gracefully", func(t *testing.T) {
|
||||
// TeamBinding with empty subject name should fail conversion
|
||||
teamBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-4",
|
||||
Namespace: "org-4",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "", // Empty name should cause error
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-4",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
writeCalled := false
|
||||
testErrorHandling := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
writeCalled = true
|
||||
// Should not be called due to conversion error
|
||||
require.Fail(t, "Write should not be called when conversion fails")
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testErrorHandling}
|
||||
b.AfterTeamBindingCreate(&teamBinding, nil)
|
||||
// Wait a bit to ensure the goroutine has time to process
|
||||
// The goroutine will complete but won't call the write callback
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.False(t, writeCalled, "Write callback should not be called when conversion fails")
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeginTeamBindingUpdate(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
b := &IdentityAccessManagementAPIBuilder{
|
||||
logger: log.NewNopLogger(),
|
||||
zTickets: make(chan bool, 1),
|
||||
}
|
||||
|
||||
t.Run("should update zanzana entry when permission changes from member to admin", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
oldBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-1",
|
||||
Namespace: "org-1",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
newBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-1",
|
||||
Namespace: "org-1",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionAdmin,
|
||||
},
|
||||
}
|
||||
|
||||
testPermissionUpdate := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.Equal(t, "org-1", req.Namespace)
|
||||
|
||||
// Should delete old member permission
|
||||
require.NotNil(t, req.Deletes)
|
||||
require.Len(t, req.Deletes.TupleKeys, 1)
|
||||
require.Equal(
|
||||
t,
|
||||
req.Deletes.TupleKeys[0],
|
||||
&v1.TupleKeyWithoutCondition{User: "user:user-1", Relation: "member", Object: "team:team-1"},
|
||||
)
|
||||
|
||||
// Should write new admin permission
|
||||
require.NotNil(t, req.Writes)
|
||||
require.Len(t, req.Writes.TupleKeys, 1)
|
||||
require.Equal(
|
||||
t,
|
||||
req.Writes.TupleKeys[0],
|
||||
&v1.TupleKey{User: "user:user-1", Relation: "admin", Object: "team:team-1"},
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testPermissionUpdate}
|
||||
|
||||
finishFunc, err := b.BeginTeamBindingUpdate(context.Background(), &newBinding, &oldBinding, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finishFunc)
|
||||
|
||||
finishFunc(context.Background(), true)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should update zanzana entry when user changes", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
oldBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-2",
|
||||
Namespace: "org-2",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
newBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-2",
|
||||
Namespace: "org-2",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-2",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
testUserUpdate := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.Equal(t, "org-2", req.Namespace)
|
||||
|
||||
// Should delete old user binding
|
||||
require.NotNil(t, req.Deletes)
|
||||
require.Len(t, req.Deletes.TupleKeys, 1)
|
||||
require.Equal(
|
||||
t,
|
||||
req.Deletes.TupleKeys[0],
|
||||
&v1.TupleKeyWithoutCondition{User: "user:user-1", Relation: "member", Object: "team:team-1"},
|
||||
)
|
||||
|
||||
// Should write new user binding
|
||||
require.NotNil(t, req.Writes)
|
||||
require.Len(t, req.Writes.TupleKeys, 1)
|
||||
require.Equal(
|
||||
t,
|
||||
req.Writes.TupleKeys[0],
|
||||
&v1.TupleKey{User: "user:user-2", Relation: "member", Object: "team:team-1"},
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testUserUpdate}
|
||||
|
||||
finishFunc, err := b.BeginTeamBindingUpdate(context.Background(), &newBinding, &oldBinding, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finishFunc)
|
||||
|
||||
finishFunc(context.Background(), true)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should update zanzana entry when team changes", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
oldBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-3",
|
||||
Namespace: "org-3",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionAdmin,
|
||||
},
|
||||
}
|
||||
|
||||
newBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-3",
|
||||
Namespace: "org-3",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-2",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionAdmin,
|
||||
},
|
||||
}
|
||||
|
||||
testTeamUpdate := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.Equal(t, "org-3", req.Namespace)
|
||||
|
||||
// Should delete old team binding
|
||||
require.NotNil(t, req.Deletes)
|
||||
require.Len(t, req.Deletes.TupleKeys, 1)
|
||||
require.Equal(
|
||||
t,
|
||||
req.Deletes.TupleKeys[0],
|
||||
&v1.TupleKeyWithoutCondition{User: "user:user-1", Relation: "admin", Object: "team:team-1"},
|
||||
)
|
||||
|
||||
// Should write new team binding
|
||||
require.NotNil(t, req.Writes)
|
||||
require.Len(t, req.Writes.TupleKeys, 1)
|
||||
require.Equal(
|
||||
t,
|
||||
req.Writes.TupleKeys[0],
|
||||
&v1.TupleKey{User: "user:user-1", Relation: "admin", Object: "team:team-2"},
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testTeamUpdate}
|
||||
|
||||
finishFunc, err := b.BeginTeamBindingUpdate(context.Background(), &newBinding, &oldBinding, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finishFunc)
|
||||
|
||||
finishFunc(context.Background(), true)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should not write to zanzana when update fails", func(t *testing.T) {
|
||||
oldBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-4",
|
||||
Namespace: "org-4",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
newBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-4",
|
||||
Namespace: "org-4",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-2",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
testNoWriteOnFailure := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
// Should not be called when success=false
|
||||
require.Fail(t, "Write should not be called when update fails")
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testNoWriteOnFailure}
|
||||
|
||||
finishFunc, err := b.BeginTeamBindingUpdate(context.Background(), &newBinding, &oldBinding, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finishFunc)
|
||||
|
||||
// Call finish function with success=false
|
||||
finishFunc(context.Background(), false)
|
||||
// No wait needed since write should not be called
|
||||
})
|
||||
|
||||
t.Run("should not write to zanzana when zClient is nil", func(t *testing.T) {
|
||||
builder := &IdentityAccessManagementAPIBuilder{
|
||||
logger: log.NewNopLogger(),
|
||||
zTickets: make(chan bool, 1),
|
||||
zClient: nil,
|
||||
}
|
||||
|
||||
oldBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-5",
|
||||
Namespace: "org-5",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
newBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-5",
|
||||
Namespace: "org-5",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-2",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
finishFunc, err := builder.BeginTeamBindingUpdate(context.Background(), &newBinding, &oldBinding, nil)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, finishFunc) // Should return nil when zClient is nil
|
||||
})
|
||||
|
||||
t.Run("should handle empty old binding subject name gracefully", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
oldBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-6",
|
||||
Namespace: "org-6",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "", // Empty name - conversion will be skipped
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
newBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-6",
|
||||
Namespace: "org-6",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-2",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
testEmptyOldBinding := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.Equal(t, "org-6", req.Namespace)
|
||||
|
||||
// Should not delete old binding (it was skipped due to empty name)
|
||||
require.Nil(t, req.Deletes)
|
||||
|
||||
// Should write new binding
|
||||
require.NotNil(t, req.Writes)
|
||||
require.Len(t, req.Writes.TupleKeys, 1)
|
||||
require.Equal(
|
||||
t,
|
||||
req.Writes.TupleKeys[0],
|
||||
&v1.TupleKey{User: "user:user-2", Relation: "member", Object: "team:team-1"},
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testEmptyOldBinding}
|
||||
|
||||
finishFunc, err := b.BeginTeamBindingUpdate(context.Background(), &newBinding, &oldBinding, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finishFunc) // Should still return finish function
|
||||
|
||||
finishFunc(context.Background(), true)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should return nil finish func when bindings are identical", func(t *testing.T) {
|
||||
oldBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-7",
|
||||
Namespace: "org-7",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
newBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-7",
|
||||
Namespace: "org-7",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
writeCalled := false
|
||||
testNoWriteOnNoChange := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
writeCalled = true
|
||||
require.Fail(t, "Write should not be called when bindings are identical")
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testNoWriteOnNoChange}
|
||||
|
||||
finishFunc, err := b.BeginTeamBindingUpdate(context.Background(), &newBinding, &oldBinding, nil)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, finishFunc) // Should return nil when bindings are identical
|
||||
|
||||
// Verify write was never called
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.False(t, writeCalled, "Write callback should not be called when bindings are identical")
|
||||
})
|
||||
|
||||
t.Run("should return nil finish func when new binding has empty subject name", func(t *testing.T) {
|
||||
oldBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-8",
|
||||
Namespace: "org-8",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
newBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-8",
|
||||
Namespace: "org-8",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "", // Empty name - should cause early return
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
writeCalled := false
|
||||
testNoWriteOnInvalidBinding := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
writeCalled = true
|
||||
require.Fail(t, "Write should not be called when new binding has empty subject name")
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testNoWriteOnInvalidBinding}
|
||||
|
||||
finishFunc, err := b.BeginTeamBindingUpdate(context.Background(), &newBinding, &oldBinding, nil)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, finishFunc) // Should return nil when new binding has empty subject name
|
||||
|
||||
// Verify write was never called
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.False(t, writeCalled, "Write callback should not be called when new binding has empty subject name")
|
||||
})
|
||||
|
||||
t.Run("should return nil finish func when new binding has empty team ref name", func(t *testing.T) {
|
||||
oldBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-9",
|
||||
Namespace: "org-9",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
newBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-9",
|
||||
Namespace: "org-9",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-2",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "", // Empty name - should cause early return
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
writeCalled := false
|
||||
testNoWriteOnInvalidBinding := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
writeCalled = true
|
||||
require.Fail(t, "Write should not be called when new binding has empty team ref name")
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testNoWriteOnInvalidBinding}
|
||||
|
||||
finishFunc, err := b.BeginTeamBindingUpdate(context.Background(), &newBinding, &oldBinding, nil)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, finishFunc) // Should return nil when new binding has empty team ref name
|
||||
|
||||
// Verify write was never called
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.False(t, writeCalled, "Write callback should not be called when new binding has empty team ref name")
|
||||
})
|
||||
}
|
||||
|
||||
func TestAfterTeamBindingDelete(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
b := &IdentityAccessManagementAPIBuilder{
|
||||
logger: log.NewNopLogger(),
|
||||
zTickets: make(chan bool, 1),
|
||||
}
|
||||
|
||||
t.Run("should delete zanzana entry for team binding with member permission", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
teamBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-1",
|
||||
Namespace: "org-1",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
External: false,
|
||||
},
|
||||
}
|
||||
|
||||
testMemberDelete := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.Equal(t, "org-1", req.Namespace)
|
||||
|
||||
// Should have deletes but no writes
|
||||
require.NotNil(t, req.Deletes)
|
||||
require.Len(t, req.Deletes.TupleKeys, 1)
|
||||
require.Nil(t, req.Writes)
|
||||
|
||||
require.Equal(
|
||||
t,
|
||||
req.Deletes.TupleKeys[0],
|
||||
&v1.TupleKeyWithoutCondition{User: "user:user-1", Relation: "member", Object: "team:team-1"},
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testMemberDelete}
|
||||
b.AfterTeamBindingDelete(&teamBinding, nil)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should delete zanzana entry for team binding with admin permission", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
teamBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-2",
|
||||
Namespace: "org-2",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-2",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-2",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionAdmin,
|
||||
External: true,
|
||||
},
|
||||
}
|
||||
|
||||
testAdminDelete := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.Equal(t, "org-2", req.Namespace)
|
||||
|
||||
// Should have deletes but no writes
|
||||
require.NotNil(t, req.Deletes)
|
||||
require.Len(t, req.Deletes.TupleKeys, 1)
|
||||
require.Nil(t, req.Writes)
|
||||
|
||||
require.Equal(
|
||||
t,
|
||||
req.Deletes.TupleKeys[0],
|
||||
&v1.TupleKeyWithoutCondition{User: "user:user-2", Relation: "admin", Object: "team:team-2"},
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testAdminDelete}
|
||||
b.AfterTeamBindingDelete(&teamBinding, nil)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should not delete from zanzana when zClient is nil", func(t *testing.T) {
|
||||
builder := &IdentityAccessManagementAPIBuilder{
|
||||
logger: log.NewNopLogger(),
|
||||
zTickets: make(chan bool, 1),
|
||||
zClient: nil,
|
||||
}
|
||||
|
||||
teamBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-3",
|
||||
Namespace: "org-3",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-3",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-3",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
// Should not panic or error when zClient is nil
|
||||
builder.AfterTeamBindingDelete(&teamBinding, nil)
|
||||
})
|
||||
|
||||
t.Run("should handle conversion error gracefully", func(t *testing.T) {
|
||||
// TeamBinding with empty team ref name should fail conversion
|
||||
teamBinding := iamv0.TeamBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "binding-4",
|
||||
Namespace: "org-4",
|
||||
},
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-4",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "", // Empty name should cause error
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
writeCalled := false
|
||||
testErrorHandling := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
writeCalled = true
|
||||
// Should not be called due to conversion error
|
||||
require.Fail(t, "Write should not be called when conversion fails")
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testErrorHandling}
|
||||
b.AfterTeamBindingDelete(&teamBinding, nil)
|
||||
// Wait a bit to ensure the goroutine has time to process
|
||||
// The goroutine will complete but won't call the write callback
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.False(t, writeCalled, "Write callback should not be called when conversion fails")
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertTeamBindingToTuple(t *testing.T) {
|
||||
t.Run("should convert member permission correctly", func(t *testing.T) {
|
||||
tb := &iamv0.TeamBinding{
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
tuple, err := convertTeamBindingToTuple(tb)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, tuple)
|
||||
require.Equal(t, "user:user-1", tuple.User)
|
||||
require.Equal(t, "member", tuple.Relation)
|
||||
require.Equal(t, "team:team-1", tuple.Object)
|
||||
require.Nil(t, tuple.Condition)
|
||||
})
|
||||
|
||||
t.Run("should convert admin permission correctly", func(t *testing.T) {
|
||||
tb := &iamv0.TeamBinding{
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-2",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-2",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionAdmin,
|
||||
},
|
||||
}
|
||||
|
||||
tuple, err := convertTeamBindingToTuple(tb)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, tuple)
|
||||
require.Equal(t, "user:user-2", tuple.User)
|
||||
require.Equal(t, "admin", tuple.Relation)
|
||||
require.Equal(t, "team:team-2", tuple.Object)
|
||||
require.Nil(t, tuple.Condition)
|
||||
})
|
||||
|
||||
t.Run("should return error for empty subject name", func(t *testing.T) {
|
||||
tb := &iamv0.TeamBinding{
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
tuple, err := convertTeamBindingToTuple(tb)
|
||||
require.Error(t, err)
|
||||
require.Nil(t, tuple)
|
||||
require.Equal(t, errEmptyName, err)
|
||||
})
|
||||
|
||||
t.Run("should return error for empty team ref name", func(t *testing.T) {
|
||||
tb := &iamv0.TeamBinding{
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "",
|
||||
},
|
||||
Permission: iamv0.TeamBindingTeamPermissionMember,
|
||||
},
|
||||
}
|
||||
|
||||
tuple, err := convertTeamBindingToTuple(tb)
|
||||
require.Error(t, err)
|
||||
require.Nil(t, tuple)
|
||||
require.Equal(t, errEmptyName, err)
|
||||
})
|
||||
|
||||
t.Run("should default to member for unknown permission", func(t *testing.T) {
|
||||
tb := &iamv0.TeamBinding{
|
||||
Spec: iamv0.TeamBindingSpec{
|
||||
Subject: iamv0.TeamBindingspecSubject{
|
||||
Name: "user-1",
|
||||
},
|
||||
TeamRef: iamv0.TeamBindingTeamRef{
|
||||
Name: "team-1",
|
||||
},
|
||||
Permission: "unknown", // Invalid permission
|
||||
},
|
||||
}
|
||||
|
||||
tuple, err := convertTeamBindingToTuple(tb)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, tuple)
|
||||
// Should default to member relation
|
||||
require.Equal(t, "member", tuple.Relation)
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,303 @@
|
||||
package iam
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
|
||||
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
|
||||
v1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana"
|
||||
)
|
||||
|
||||
// createUserBasicRoleTuple creates a tuple for a user's basic role assignment
|
||||
func createUserBasicRoleTuple(userUID, orgRole string) *v1.TupleKey {
|
||||
if orgRole == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
basicRole := zanzana.TranslateBasicRole(orgRole)
|
||||
if basicRole == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &v1.TupleKey{
|
||||
User: zanzana.NewTupleEntry(zanzana.TypeUser, userUID, ""),
|
||||
Relation: zanzana.RelationAssignee,
|
||||
Object: zanzana.NewTupleEntry(zanzana.TypeRole, basicRole, ""),
|
||||
}
|
||||
}
|
||||
|
||||
// AfterUserCreate is a post-create hook that writes the user's basic role assignment to Zanzana (openFGA)
|
||||
func (b *IdentityAccessManagementAPIBuilder) AfterUserCreate(obj runtime.Object, _ *metav1.CreateOptions) {
|
||||
if b.zClient == nil {
|
||||
return
|
||||
}
|
||||
|
||||
user, ok := obj.(*iamv0.User)
|
||||
if !ok {
|
||||
b.logger.Error("failed to convert object to User type", "object", obj)
|
||||
return
|
||||
}
|
||||
|
||||
resourceType := "user"
|
||||
operation := "create"
|
||||
|
||||
// Skip if user has no role assigned
|
||||
if user.Spec.Role == "" {
|
||||
b.logger.Debug("user has no role assigned, skipping basic role sync",
|
||||
"namespace", user.Namespace,
|
||||
"userUID", user.Name,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// Grab a ticket to write to Zanzana
|
||||
wait := time.Now()
|
||||
b.zTickets <- true
|
||||
hooksWaitHistogram.WithLabelValues(resourceType, operation).Observe(time.Since(wait).Seconds())
|
||||
|
||||
go func(u *iamv0.User) {
|
||||
start := time.Now()
|
||||
status := "success"
|
||||
|
||||
defer func() {
|
||||
<-b.zTickets
|
||||
hooksDurationHistogram.WithLabelValues(resourceType, operation, status).Observe(time.Since(start).Seconds())
|
||||
hooksOperationCounter.WithLabelValues(resourceType, operation, status).Inc()
|
||||
}()
|
||||
|
||||
tuple := createUserBasicRoleTuple(u.Name, u.Spec.Role)
|
||||
if tuple == nil {
|
||||
b.logger.Warn("failed to create user basic role tuple",
|
||||
"namespace", u.Namespace,
|
||||
"userUID", u.Name,
|
||||
"role", u.Spec.Role,
|
||||
)
|
||||
status = "failure"
|
||||
return
|
||||
}
|
||||
|
||||
b.logger.Debug("writing user basic role to zanzana",
|
||||
"namespace", u.Namespace,
|
||||
"userUID", u.Name,
|
||||
"role", u.Spec.Role,
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout)
|
||||
defer cancel()
|
||||
|
||||
err := b.zClient.Write(ctx, &v1.WriteRequest{
|
||||
Namespace: u.Namespace,
|
||||
Writes: &v1.WriteRequestWrites{
|
||||
TupleKeys: []*v1.TupleKey{tuple},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
status = "failure"
|
||||
b.logger.Error("failed to write user basic role to zanzana",
|
||||
"err", err,
|
||||
"namespace", u.Namespace,
|
||||
"userUID", u.Name,
|
||||
"role", u.Spec.Role,
|
||||
)
|
||||
} else {
|
||||
hooksTuplesCounter.WithLabelValues(resourceType, operation, "write").Inc()
|
||||
}
|
||||
}(user.DeepCopy())
|
||||
}
|
||||
|
||||
// BeginUserUpdate is a pre-update hook that gets called on user updates
|
||||
// It compares old and new roles and performs the zanzana write after K8s update succeeds
|
||||
func (b *IdentityAccessManagementAPIBuilder) BeginUserUpdate(ctx context.Context, obj, oldObj runtime.Object, options *metav1.UpdateOptions) (registry.FinishFunc, error) {
|
||||
if b.zClient == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
oldUser, ok := oldObj.(*iamv0.User)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
newUser, ok := obj.(*iamv0.User)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// If role hasn't changed, no need to update
|
||||
if oldUser.Spec.Role == newUser.Spec.Role {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Return a finish function that performs the zanzana write only on success
|
||||
return func(ctx context.Context, success bool) {
|
||||
if !success {
|
||||
return
|
||||
}
|
||||
|
||||
wait := time.Now()
|
||||
b.zTickets <- true
|
||||
hooksWaitHistogram.WithLabelValues("user", "update").Observe(time.Since(wait).Seconds())
|
||||
|
||||
go func(old, new *iamv0.User) {
|
||||
start := time.Now()
|
||||
status := "success"
|
||||
|
||||
defer func() {
|
||||
<-b.zTickets
|
||||
hooksDurationHistogram.WithLabelValues("user", "update", status).Observe(time.Since(start).Seconds())
|
||||
hooksOperationCounter.WithLabelValues("user", "update", status).Inc()
|
||||
}()
|
||||
|
||||
b.logger.Debug("updating user basic role in zanzana",
|
||||
"namespace", new.Namespace,
|
||||
"userUID", new.Name,
|
||||
"oldRole", old.Spec.Role,
|
||||
"newRole", new.Spec.Role,
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout)
|
||||
defer cancel()
|
||||
|
||||
req := &v1.WriteRequest{
|
||||
Namespace: new.Namespace,
|
||||
}
|
||||
|
||||
// Delete old role tuple if it existed
|
||||
if old.Spec.Role != "" {
|
||||
oldTuple := createUserBasicRoleTuple(old.Name, old.Spec.Role)
|
||||
if oldTuple != nil {
|
||||
deleteTuple := tupleToTupleKeyWithoutCondition(oldTuple)
|
||||
req.Deletes = &v1.WriteRequestDeletes{
|
||||
TupleKeys: []*v1.TupleKeyWithoutCondition{deleteTuple},
|
||||
}
|
||||
b.logger.Debug("deleting old user basic role from zanzana",
|
||||
"namespace", new.Namespace,
|
||||
"userUID", new.Name,
|
||||
"role", old.Spec.Role,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Write new role tuple if it exists
|
||||
if new.Spec.Role != "" {
|
||||
newTuple := createUserBasicRoleTuple(new.Name, new.Spec.Role)
|
||||
if newTuple != nil {
|
||||
req.Writes = &v1.WriteRequestWrites{
|
||||
TupleKeys: []*v1.TupleKey{newTuple},
|
||||
}
|
||||
b.logger.Debug("writing new user basic role to zanzana",
|
||||
"namespace", new.Namespace,
|
||||
"userUID", new.Name,
|
||||
"role", new.Spec.Role,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Only make the request if there are deletes or writes
|
||||
if (req.Deletes != nil && len(req.Deletes.TupleKeys) > 0) || (req.Writes != nil && len(req.Writes.TupleKeys) > 0) {
|
||||
err := b.zClient.Write(ctx, req)
|
||||
if err != nil {
|
||||
status = "failure"
|
||||
b.logger.Error("failed to update user basic role in zanzana",
|
||||
"err", err,
|
||||
"namespace", new.Namespace,
|
||||
"userUID", new.Name,
|
||||
)
|
||||
} else {
|
||||
if req.Deletes != nil && len(req.Deletes.TupleKeys) > 0 {
|
||||
hooksTuplesCounter.WithLabelValues("user", "update", "delete").Inc()
|
||||
}
|
||||
if req.Writes != nil && len(req.Writes.TupleKeys) > 0 {
|
||||
hooksTuplesCounter.WithLabelValues("user", "update", "write").Inc()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
b.logger.Debug("no tuples to update in zanzana", "namespace", new.Namespace)
|
||||
}
|
||||
}(oldUser.DeepCopy(), newUser.DeepCopy())
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AfterUserDelete is a post-delete hook that removes the user's basic role assignment from Zanzana (openFGA)
|
||||
func (b *IdentityAccessManagementAPIBuilder) AfterUserDelete(obj runtime.Object, _ *metav1.DeleteOptions) {
|
||||
if b.zClient == nil {
|
||||
return
|
||||
}
|
||||
|
||||
user, ok := obj.(*iamv0.User)
|
||||
if !ok {
|
||||
b.logger.Error("failed to convert object to User type", "object", obj)
|
||||
return
|
||||
}
|
||||
|
||||
resourceType := "user"
|
||||
operation := "delete"
|
||||
|
||||
// Skip if user had no role assigned
|
||||
if user.Spec.Role == "" {
|
||||
b.logger.Debug("user had no role assigned, skipping basic role sync",
|
||||
"namespace", user.Namespace,
|
||||
"userUID", user.Name,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
wait := time.Now()
|
||||
b.zTickets <- true
|
||||
hooksWaitHistogram.WithLabelValues(resourceType, operation).Observe(time.Since(wait).Seconds())
|
||||
|
||||
go func(u *iamv0.User) {
|
||||
start := time.Now()
|
||||
status := "success"
|
||||
|
||||
defer func() {
|
||||
<-b.zTickets
|
||||
hooksDurationHistogram.WithLabelValues(resourceType, operation, status).Observe(time.Since(start).Seconds())
|
||||
hooksOperationCounter.WithLabelValues(resourceType, operation, status).Inc()
|
||||
}()
|
||||
|
||||
tuple := createUserBasicRoleTuple(u.Name, u.Spec.Role)
|
||||
if tuple == nil {
|
||||
b.logger.Warn("failed to create user basic role tuple for deletion",
|
||||
"namespace", u.Namespace,
|
||||
"userUID", u.Name,
|
||||
"role", u.Spec.Role,
|
||||
)
|
||||
status = "failure"
|
||||
return
|
||||
}
|
||||
|
||||
deleteTuple := tupleToTupleKeyWithoutCondition(tuple)
|
||||
|
||||
b.logger.Debug("deleting user basic role from zanzana",
|
||||
"namespace", u.Namespace,
|
||||
"userUID", u.Name,
|
||||
"role", u.Spec.Role,
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout)
|
||||
defer cancel()
|
||||
|
||||
err := b.zClient.Write(ctx, &v1.WriteRequest{
|
||||
Namespace: u.Namespace,
|
||||
Deletes: &v1.WriteRequestDeletes{
|
||||
TupleKeys: []*v1.TupleKeyWithoutCondition{deleteTuple},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
status = "failure"
|
||||
b.logger.Error("failed to delete user basic role from zanzana",
|
||||
"err", err,
|
||||
"namespace", u.Namespace,
|
||||
"userUID", u.Name,
|
||||
"role", u.Spec.Role,
|
||||
)
|
||||
} else {
|
||||
hooksTuplesCounter.WithLabelValues(resourceType, operation, "delete").Inc()
|
||||
}
|
||||
}(user.DeepCopy())
|
||||
}
|
||||
@@ -0,0 +1,609 @@
|
||||
package iam
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
v1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAfterUserCreate(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
b := &IdentityAccessManagementAPIBuilder{
|
||||
logger: log.NewNopLogger(),
|
||||
zTickets: make(chan bool, 1),
|
||||
}
|
||||
|
||||
t.Run("should create zanzana entry for user with Admin role", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
user := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "df2p421det1q8c",
|
||||
Namespace: "org-1",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Admin",
|
||||
},
|
||||
}
|
||||
|
||||
testAdminRole := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.NotNil(t, req.Writes)
|
||||
require.Len(t, req.Writes.TupleKeys, 1)
|
||||
require.Equal(t, "org-1", req.Namespace)
|
||||
|
||||
tuple := req.Writes.TupleKeys[0]
|
||||
require.Equal(t, "user:df2p421det1q8c", tuple.User)
|
||||
require.Equal(t, "assignee", tuple.Relation)
|
||||
require.Equal(t, "role:basic_admin", tuple.Object)
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testAdminRole}
|
||||
b.AfterUserCreate(&user, nil)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should create zanzana entry for user with Editor role", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
user := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "user123",
|
||||
Namespace: "org-2",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Editor",
|
||||
},
|
||||
}
|
||||
|
||||
testEditorRole := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.NotNil(t, req.Writes)
|
||||
require.Len(t, req.Writes.TupleKeys, 1)
|
||||
require.Equal(t, "org-2", req.Namespace)
|
||||
|
||||
tuple := req.Writes.TupleKeys[0]
|
||||
require.Equal(t, "user:user123", tuple.User)
|
||||
require.Equal(t, "assignee", tuple.Relation)
|
||||
require.Equal(t, "role:basic_editor", tuple.Object)
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testEditorRole}
|
||||
b.AfterUserCreate(&user, nil)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should create zanzana entry for user with Viewer role", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
user := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "viewer456",
|
||||
Namespace: "org-3",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Viewer",
|
||||
},
|
||||
}
|
||||
|
||||
testViewerRole := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.NotNil(t, req.Writes)
|
||||
require.Len(t, req.Writes.TupleKeys, 1)
|
||||
require.Equal(t, "org-3", req.Namespace)
|
||||
|
||||
tuple := req.Writes.TupleKeys[0]
|
||||
require.Equal(t, "user:viewer456", tuple.User)
|
||||
require.Equal(t, "assignee", tuple.Relation)
|
||||
require.Equal(t, "role:basic_viewer", tuple.Object)
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testViewerRole}
|
||||
b.AfterUserCreate(&user, nil)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should skip when user has no role", func(t *testing.T) {
|
||||
user := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "norole789",
|
||||
Namespace: "org-4",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "",
|
||||
},
|
||||
}
|
||||
|
||||
// Should not call zanzana client
|
||||
b.zClient = nil
|
||||
b.AfterUserCreate(&user, nil)
|
||||
// If we get here without panic, the test passes
|
||||
})
|
||||
|
||||
t.Run("should skip when zClient is nil", func(t *testing.T) {
|
||||
builder := &IdentityAccessManagementAPIBuilder{
|
||||
logger: log.NewNopLogger(),
|
||||
zTickets: make(chan bool, 1),
|
||||
zClient: nil,
|
||||
}
|
||||
|
||||
user := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser",
|
||||
Namespace: "org-1",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Admin",
|
||||
},
|
||||
}
|
||||
|
||||
// Should return early without calling zanzana
|
||||
builder.AfterUserCreate(&user, nil)
|
||||
// If we get here without panic, the test passes
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeginUserUpdate(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
b := &IdentityAccessManagementAPIBuilder{
|
||||
logger: log.NewNopLogger(),
|
||||
zTickets: make(chan bool, 1),
|
||||
}
|
||||
|
||||
t.Run("should update zanzana entry when role changes from Viewer to Admin", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
oldUser := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser",
|
||||
Namespace: "org-1",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Viewer",
|
||||
},
|
||||
}
|
||||
|
||||
newUser := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser",
|
||||
Namespace: "org-1",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Admin",
|
||||
},
|
||||
}
|
||||
|
||||
testRoleChange := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.Equal(t, "org-1", req.Namespace)
|
||||
|
||||
// Should delete old role
|
||||
require.NotNil(t, req.Deletes)
|
||||
require.Len(t, req.Deletes.TupleKeys, 1)
|
||||
deleteTuple := req.Deletes.TupleKeys[0]
|
||||
require.Equal(t, "user:testuser", deleteTuple.User)
|
||||
require.Equal(t, "assignee", deleteTuple.Relation)
|
||||
require.Equal(t, "role:basic_viewer", deleteTuple.Object)
|
||||
|
||||
// Should write new role
|
||||
require.NotNil(t, req.Writes)
|
||||
require.Len(t, req.Writes.TupleKeys, 1)
|
||||
writeTuple := req.Writes.TupleKeys[0]
|
||||
require.Equal(t, "user:testuser", writeTuple.User)
|
||||
require.Equal(t, "assignee", writeTuple.Relation)
|
||||
require.Equal(t, "role:basic_admin", writeTuple.Object)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testRoleChange}
|
||||
|
||||
finishFunc, err := b.BeginUserUpdate(context.Background(), &newUser, &oldUser, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finishFunc)
|
||||
|
||||
finishFunc(context.Background(), true)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should delete old role when new role is empty", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
oldUser := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser2",
|
||||
Namespace: "org-2",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Editor",
|
||||
},
|
||||
}
|
||||
|
||||
newUser := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser2",
|
||||
Namespace: "org-2",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "",
|
||||
},
|
||||
}
|
||||
|
||||
testRemoveRole := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.Equal(t, "org-2", req.Namespace)
|
||||
|
||||
// Should delete old role
|
||||
require.NotNil(t, req.Deletes)
|
||||
require.Len(t, req.Deletes.TupleKeys, 1)
|
||||
deleteTuple := req.Deletes.TupleKeys[0]
|
||||
require.Equal(t, "user:testuser2", deleteTuple.User)
|
||||
require.Equal(t, "assignee", deleteTuple.Relation)
|
||||
require.Equal(t, "role:basic_editor", deleteTuple.Object)
|
||||
|
||||
// Should not write new role
|
||||
require.Nil(t, req.Writes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testRemoveRole}
|
||||
|
||||
finishFunc, err := b.BeginUserUpdate(context.Background(), &newUser, &oldUser, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finishFunc)
|
||||
|
||||
finishFunc(context.Background(), true)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should be able to add a new role when old role was empty", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
oldUser := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser3",
|
||||
Namespace: "org-3",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "",
|
||||
},
|
||||
}
|
||||
|
||||
newUser := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser3",
|
||||
Namespace: "org-3",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Admin",
|
||||
},
|
||||
}
|
||||
|
||||
testAddRole := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.Equal(t, "org-3", req.Namespace)
|
||||
|
||||
// Should not delete old role (was empty)
|
||||
require.Nil(t, req.Deletes)
|
||||
|
||||
// Should write new role
|
||||
require.NotNil(t, req.Writes)
|
||||
require.Len(t, req.Writes.TupleKeys, 1)
|
||||
writeTuple := req.Writes.TupleKeys[0]
|
||||
require.Equal(t, "user:testuser3", writeTuple.User)
|
||||
require.Equal(t, "assignee", writeTuple.Relation)
|
||||
require.Equal(t, "role:basic_admin", writeTuple.Object)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testAddRole}
|
||||
|
||||
finishFunc, err := b.BeginUserUpdate(context.Background(), &newUser, &oldUser, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finishFunc)
|
||||
|
||||
finishFunc(context.Background(), true)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should skip update when role hasn't changed", func(t *testing.T) {
|
||||
oldUser := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser4",
|
||||
Namespace: "org-4",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Editor",
|
||||
},
|
||||
}
|
||||
|
||||
newUser := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser4",
|
||||
Namespace: "org-4",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Editor",
|
||||
},
|
||||
}
|
||||
|
||||
finishFunc, err := b.BeginUserUpdate(context.Background(), &newUser, &oldUser, nil)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, finishFunc) // Should return nil when no update needed
|
||||
})
|
||||
|
||||
t.Run("should not call zanzana when update fails", func(t *testing.T) {
|
||||
oldUser := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser5",
|
||||
Namespace: "org-5",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Viewer",
|
||||
},
|
||||
}
|
||||
|
||||
newUser := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser5",
|
||||
Namespace: "org-5",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Admin",
|
||||
},
|
||||
}
|
||||
|
||||
callCount := 0
|
||||
testNoCall := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
callCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testNoCall}
|
||||
|
||||
finishFunc, err := b.BeginUserUpdate(context.Background(), &newUser, &oldUser, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, finishFunc)
|
||||
|
||||
// Call with success=false - should not trigger zanzana write
|
||||
finishFunc(context.Background(), false)
|
||||
require.Equal(t, 0, callCount, "zanzana should not be called when update fails")
|
||||
})
|
||||
|
||||
t.Run("should skip when zClient is nil", func(t *testing.T) {
|
||||
builder := &IdentityAccessManagementAPIBuilder{
|
||||
logger: log.NewNopLogger(),
|
||||
zTickets: make(chan bool, 1),
|
||||
zClient: nil,
|
||||
}
|
||||
|
||||
oldUser := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser",
|
||||
Namespace: "org-1",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Viewer",
|
||||
},
|
||||
}
|
||||
|
||||
newUser := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser",
|
||||
Namespace: "org-1",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Admin",
|
||||
},
|
||||
}
|
||||
|
||||
finishFunc, err := builder.BeginUserUpdate(context.Background(), &newUser, &oldUser, nil)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, finishFunc) // Should return nil when zClient is nil
|
||||
})
|
||||
}
|
||||
|
||||
func TestAfterUserDelete(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
b := &IdentityAccessManagementAPIBuilder{
|
||||
logger: log.NewNopLogger(),
|
||||
zTickets: make(chan bool, 1),
|
||||
}
|
||||
|
||||
t.Run("should delete zanzana entry for user with Admin role", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
user := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "df2p421det1q8c",
|
||||
Namespace: "org-1",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Admin",
|
||||
},
|
||||
}
|
||||
|
||||
testDeleteAdmin := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.Equal(t, "org-1", req.Namespace)
|
||||
|
||||
// Should have deletes but no writes
|
||||
require.NotNil(t, req.Deletes)
|
||||
require.Len(t, req.Deletes.TupleKeys, 1)
|
||||
require.Nil(t, req.Writes)
|
||||
|
||||
deleteTuple := req.Deletes.TupleKeys[0]
|
||||
require.Equal(t, "user:df2p421det1q8c", deleteTuple.User)
|
||||
require.Equal(t, "assignee", deleteTuple.Relation)
|
||||
require.Equal(t, "role:basic_admin", deleteTuple.Object)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testDeleteAdmin}
|
||||
b.AfterUserDelete(&user, nil)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should delete zanzana entry for user with Editor role", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
user := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "editor123",
|
||||
Namespace: "org-2",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Editor",
|
||||
},
|
||||
}
|
||||
|
||||
testDeleteEditor := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.Equal(t, "org-2", req.Namespace)
|
||||
|
||||
require.NotNil(t, req.Deletes)
|
||||
require.Len(t, req.Deletes.TupleKeys, 1)
|
||||
deleteTuple := req.Deletes.TupleKeys[0]
|
||||
require.Equal(t, "user:editor123", deleteTuple.User)
|
||||
require.Equal(t, "assignee", deleteTuple.Relation)
|
||||
require.Equal(t, "role:basic_editor", deleteTuple.Object)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testDeleteEditor}
|
||||
b.AfterUserDelete(&user, nil)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should delete zanzana entry for user with Viewer role", func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
user := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "viewer456",
|
||||
Namespace: "org-3",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Viewer",
|
||||
},
|
||||
}
|
||||
|
||||
testDeleteViewer := func(ctx context.Context, req *v1.WriteRequest) error {
|
||||
defer wg.Done()
|
||||
require.NotNil(t, req)
|
||||
require.Equal(t, "org-3", req.Namespace)
|
||||
|
||||
require.NotNil(t, req.Deletes)
|
||||
require.Len(t, req.Deletes.TupleKeys, 1)
|
||||
deleteTuple := req.Deletes.TupleKeys[0]
|
||||
require.Equal(t, "user:viewer456", deleteTuple.User)
|
||||
require.Equal(t, "assignee", deleteTuple.Relation)
|
||||
require.Equal(t, "role:basic_viewer", deleteTuple.Object)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
b.zClient = &FakeZanzanaClient{writeCallback: testDeleteViewer}
|
||||
b.AfterUserDelete(&user, nil)
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
t.Run("should skip when user has no role", func(t *testing.T) {
|
||||
user := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "norole789",
|
||||
Namespace: "org-4",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "",
|
||||
},
|
||||
}
|
||||
|
||||
// Should not call zanzana client
|
||||
b.zClient = nil
|
||||
b.AfterUserDelete(&user, nil)
|
||||
// If we get here without panic, the test passes
|
||||
})
|
||||
|
||||
t.Run("should skip when zClient is nil", func(t *testing.T) {
|
||||
builder := &IdentityAccessManagementAPIBuilder{
|
||||
logger: log.NewNopLogger(),
|
||||
zTickets: make(chan bool, 1),
|
||||
zClient: nil,
|
||||
}
|
||||
|
||||
user := iamv0.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testuser",
|
||||
Namespace: "org-1",
|
||||
},
|
||||
Spec: iamv0.UserSpec{
|
||||
Role: "Admin",
|
||||
},
|
||||
}
|
||||
|
||||
// Should return early without calling zanzana
|
||||
builder.AfterUserDelete(&user, nil)
|
||||
// If we get here without panic, the test passes
|
||||
})
|
||||
}
|
||||
|
||||
func TestCreateUserBasicRoleTuple(t *testing.T) {
|
||||
t.Run("should create tuple for Admin role", func(t *testing.T) {
|
||||
tuple := createUserBasicRoleTuple("user123", "Admin")
|
||||
require.NotNil(t, tuple)
|
||||
require.Equal(t, "user:user123", tuple.User)
|
||||
require.Equal(t, "assignee", tuple.Relation)
|
||||
require.Equal(t, "role:basic_admin", tuple.Object)
|
||||
})
|
||||
|
||||
t.Run("should create tuple for Editor role", func(t *testing.T) {
|
||||
tuple := createUserBasicRoleTuple("user456", "Editor")
|
||||
require.NotNil(t, tuple)
|
||||
require.Equal(t, "user:user456", tuple.User)
|
||||
require.Equal(t, "assignee", tuple.Relation)
|
||||
require.Equal(t, "role:basic_editor", tuple.Object)
|
||||
})
|
||||
|
||||
t.Run("should create tuple for Viewer role", func(t *testing.T) {
|
||||
tuple := createUserBasicRoleTuple("user789", "Viewer")
|
||||
require.NotNil(t, tuple)
|
||||
require.Equal(t, "user:user789", tuple.User)
|
||||
require.Equal(t, "assignee", tuple.Relation)
|
||||
require.Equal(t, "role:basic_viewer", tuple.Object)
|
||||
})
|
||||
|
||||
t.Run("should create tuple for None role", func(t *testing.T) {
|
||||
tuple := createUserBasicRoleTuple("user000", "None")
|
||||
require.NotNil(t, tuple)
|
||||
require.Equal(t, "user:user000", tuple.User)
|
||||
require.Equal(t, "assignee", tuple.Relation)
|
||||
require.Equal(t, "role:basic_none", tuple.Object)
|
||||
})
|
||||
|
||||
t.Run("should return nil for empty role", func(t *testing.T) {
|
||||
tuple := createUserBasicRoleTuple("user123", "")
|
||||
require.Nil(t, tuple)
|
||||
})
|
||||
|
||||
t.Run("should return nil for invalid role", func(t *testing.T) {
|
||||
tuple := createUserBasicRoleTuple("user123", "InvalidRole")
|
||||
require.Nil(t, tuple)
|
||||
})
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -74,6 +75,11 @@ type jobDriver struct {
|
||||
|
||||
// notifications channel for job create events
|
||||
notifications chan struct{}
|
||||
|
||||
// Mutex to protect concurrent access to job processing
|
||||
mu sync.Mutex
|
||||
// currentJob is the job currently being processed
|
||||
currentJob *provisioning.Job
|
||||
}
|
||||
|
||||
func NewJobDriver(
|
||||
@@ -142,7 +148,7 @@ func (d *jobDriver) claimAndProcessOneJob(ctx context.Context) error {
|
||||
logger := logging.FromContext(ctx)
|
||||
|
||||
// Claim a job to work on.
|
||||
job, rollback, err := d.store.Claim(ctx)
|
||||
claimedJob, rollback, err := d.store.Claim(ctx)
|
||||
if err != nil {
|
||||
return apifmt.Errorf("failed to claim job: %w", err)
|
||||
}
|
||||
@@ -150,14 +156,16 @@ func (d *jobDriver) claimAndProcessOneJob(ctx context.Context) error {
|
||||
// The rollback function does not care about cancellations.
|
||||
defer rollback()
|
||||
|
||||
logger = logger.With("job", job.GetName(), "namespace", job.GetNamespace())
|
||||
namespace := claimedJob.GetNamespace()
|
||||
logger = logger.With("job", claimedJob.GetName(), "namespace", namespace)
|
||||
ctx = logging.Context(ctx, logger)
|
||||
logger.Debug("claimed a job")
|
||||
d.currentJob = claimedJob
|
||||
|
||||
// Now that we have a job, we need to augment our namespace to grant ourselves permission to work on it.
|
||||
// Incidentally, this also limits our permissions to only the namespace of the job.
|
||||
ctx = request.WithNamespace(ctx, job.GetNamespace())
|
||||
ctx, _, err = identity.WithProvisioningIdentity(ctx, job.GetNamespace())
|
||||
ctx = request.WithNamespace(ctx, namespace)
|
||||
ctx, _, err = identity.WithProvisioningIdentity(ctx, namespace)
|
||||
if err != nil {
|
||||
return apifmt.Errorf("failed to grant provisioning identity: %w", err)
|
||||
}
|
||||
@@ -169,37 +177,42 @@ func (d *jobDriver) claimAndProcessOneJob(ctx context.Context) error {
|
||||
leaseRenewalCtx, cancelLeaseRenewal := context.WithCancel(jobctx)
|
||||
leaseExpired := make(chan struct{})
|
||||
|
||||
go d.leaseRenewalLoop(leaseRenewalCtx, job, logger, leaseExpired)
|
||||
go d.leaseRenewalLoop(leaseRenewalCtx, logger, leaseExpired)
|
||||
defer cancelLeaseRenewal()
|
||||
|
||||
recorder := newJobProgressRecorder(d.onProgress(job))
|
||||
recorder := newJobProgressRecorder(d.onProgress())
|
||||
recorder.SetMessage(ctx, "start job")
|
||||
|
||||
// Process the job with lease loss detection
|
||||
start := time.Now()
|
||||
job.Status.Started = start.UnixMilli()
|
||||
err = d.processJobWithLeaseCheck(jobctx, job, recorder, leaseExpired)
|
||||
err = d.processJobWithLeaseCheck(jobctx, recorder, leaseExpired)
|
||||
end := time.Now()
|
||||
logger.Debug("job processed", "duration", end.Sub(start), "error", err)
|
||||
logger.Debug("job processed", "duration", end.Sub(recorder.Started()), "error", err)
|
||||
|
||||
// Capture job timeout
|
||||
if jobctx.Err() != nil && err == nil {
|
||||
err = jobctx.Err()
|
||||
}
|
||||
|
||||
job.Status = recorder.Complete(ctx, err)
|
||||
// Complete the job
|
||||
d.mu.Lock()
|
||||
d.currentJob.Status = recorder.Complete(ctx, err)
|
||||
defer func() {
|
||||
d.currentJob = nil
|
||||
d.mu.Unlock()
|
||||
}()
|
||||
|
||||
// Save the finished job
|
||||
err = d.historicJobs.WriteJob(ctx, job.DeepCopy())
|
||||
err = d.historicJobs.WriteJob(ctx, d.currentJob.DeepCopy())
|
||||
if err != nil {
|
||||
// We're not going to return this as it is not critical. Not ideal, but not critical.
|
||||
logger.Warn("failed to create historic job", "historic_job", *job, "error", err)
|
||||
logger.Warn("failed to create historic job", "historic_job", *d.currentJob, "error", err)
|
||||
} else {
|
||||
logger.Debug("created historic job", "historic_job", *job)
|
||||
logger.Debug("created historic job", "historic_job", *d.currentJob)
|
||||
}
|
||||
|
||||
// Mark the job as completed.
|
||||
if err := d.store.Complete(ctx, job); err != nil {
|
||||
return apifmt.Errorf("failed to complete job '%s' in '%s': %w", job.GetName(), job.GetNamespace(), err)
|
||||
if err := d.store.Complete(ctx, d.currentJob); err != nil {
|
||||
return apifmt.Errorf("failed to complete job '%s' in '%s': %w", d.currentJob.GetName(), d.currentJob.GetNamespace(), err)
|
||||
}
|
||||
logger.Debug("job completed")
|
||||
|
||||
@@ -208,7 +221,7 @@ func (d *jobDriver) claimAndProcessOneJob(ctx context.Context) error {
|
||||
|
||||
// leaseRenewalLoop continuously renews the lease for a job until the context is cancelled.
|
||||
// If lease renewal fails persistently, it signals via the leaseExpired channel.
|
||||
func (d *jobDriver) leaseRenewalLoop(ctx context.Context, job *provisioning.Job, logger logging.Logger, leaseExpired chan struct{}) {
|
||||
func (d *jobDriver) leaseRenewalLoop(ctx context.Context, logger logging.Logger, leaseExpired chan struct{}) {
|
||||
ticker := time.NewTicker(d.leaseRenewalInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
@@ -223,7 +236,15 @@ func (d *jobDriver) leaseRenewalLoop(ctx context.Context, job *provisioning.Job,
|
||||
logger.Debug("lease renewal loop stopping")
|
||||
return
|
||||
case <-ticker.C:
|
||||
err := d.store.RenewLease(ctx, job)
|
||||
d.mu.Lock()
|
||||
if d.currentJob == nil {
|
||||
d.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
err := d.store.RenewLease(ctx, d.currentJob)
|
||||
d.mu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
consecutiveFailures++
|
||||
if apierrors.IsNotFound(err) ||
|
||||
@@ -253,11 +274,11 @@ func (d *jobDriver) leaseRenewalLoop(ctx context.Context, job *provisioning.Job,
|
||||
}
|
||||
|
||||
// processJobWithLeaseCheck processes a job but aborts if the lease expires.
|
||||
func (d *jobDriver) processJobWithLeaseCheck(ctx context.Context, job *provisioning.Job, recorder JobProgressRecorder, leaseExpired <-chan struct{}) error {
|
||||
func (d *jobDriver) processJobWithLeaseCheck(ctx context.Context, recorder JobProgressRecorder, leaseExpired <-chan struct{}) error {
|
||||
// Run the job processing in a goroutine so we can monitor lease expiry
|
||||
resultChan := make(chan error, 1)
|
||||
go func() {
|
||||
resultChan <- d.processJob(ctx, job, recorder)
|
||||
resultChan <- d.processJob(ctx, recorder)
|
||||
}()
|
||||
|
||||
select {
|
||||
@@ -270,16 +291,28 @@ func (d *jobDriver) processJobWithLeaseCheck(ctx context.Context, job *provision
|
||||
}
|
||||
}
|
||||
|
||||
func (d *jobDriver) processJob(ctx context.Context, job *provisioning.Job, recorder JobProgressRecorder) error {
|
||||
func (d *jobDriver) processJob(ctx context.Context, recorder JobProgressRecorder) error {
|
||||
logger := logging.FromContext(ctx)
|
||||
d.mu.Lock()
|
||||
if d.currentJob == nil {
|
||||
d.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Here it's safe to copy as only job spec is used for processing
|
||||
job := d.currentJob.DeepCopy()
|
||||
repoName := d.currentJob.Spec.Repository
|
||||
namespace := d.currentJob.Namespace
|
||||
d.mu.Unlock()
|
||||
|
||||
for _, worker := range d.workers {
|
||||
if !worker.IsSupported(ctx, *job) {
|
||||
continue
|
||||
}
|
||||
|
||||
repo, err := d.repoGetter.GetRepository(ctx, job.Namespace, job.Spec.Repository)
|
||||
repo, err := d.repoGetter.GetRepository(ctx, namespace, repoName)
|
||||
if err != nil {
|
||||
return apifmt.Errorf("failed to get repository '%s': %w", job.Spec.Repository, err)
|
||||
return apifmt.Errorf("failed to get repository '%s': %w", repoName, err)
|
||||
}
|
||||
|
||||
r := repo.Config()
|
||||
@@ -298,42 +331,51 @@ func (d *jobDriver) processJob(ctx context.Context, job *provisioning.Job, recor
|
||||
return apifmt.Errorf("no workers were registered to handle the job")
|
||||
}
|
||||
|
||||
func (d *jobDriver) onProgress(job *provisioning.Job) ProgressFn {
|
||||
func (d *jobDriver) onProgress() ProgressFn {
|
||||
return func(ctx context.Context, status provisioning.JobStatus) error {
|
||||
logging.FromContext(ctx).Debug("job progress", "status", status)
|
||||
|
||||
const maxRetries = 3
|
||||
for attempt := 0; attempt < maxRetries; attempt++ {
|
||||
// Use the current job for the first attempt, fetch fresh for retries
|
||||
currentJob := job
|
||||
d.mu.Lock()
|
||||
if d.currentJob == nil {
|
||||
d.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use the current job for the first attempt; on retry attempts, fetch fresh data from the store to resolve conflicts
|
||||
if attempt > 0 {
|
||||
// Fetch the latest version to resolve conflicts
|
||||
latest, err := d.store.Get(ctx, job.GetNamespace(), job.GetName())
|
||||
latest, err := d.store.Get(ctx, d.currentJob.GetNamespace(), d.currentJob.GetName())
|
||||
if err != nil {
|
||||
d.mu.Unlock()
|
||||
if apierrors.IsNotFound(err) {
|
||||
// Job was completed/deleted, nothing to update
|
||||
return nil
|
||||
}
|
||||
return apifmt.Errorf("failed to fetch job for progress update: %w", err)
|
||||
}
|
||||
currentJob = latest
|
||||
|
||||
*d.currentJob = *latest
|
||||
}
|
||||
|
||||
job := d.currentJob
|
||||
// Update status on the current job
|
||||
currentJob.Status = status
|
||||
|
||||
updated, err := d.store.Update(ctx, currentJob)
|
||||
job.Status = status
|
||||
updated, err := d.store.Update(ctx, job)
|
||||
if err != nil {
|
||||
if apierrors.IsConflict(err) && attempt < maxRetries-1 {
|
||||
// Conflict detected, retry with fresh data
|
||||
logging.FromContext(ctx).Debug("progress update conflict, retrying", "attempt", attempt+1)
|
||||
continue
|
||||
}
|
||||
d.mu.Unlock()
|
||||
return apifmt.Errorf("failed to update job progress: %w", err)
|
||||
}
|
||||
|
||||
// Update succeeded, update our local copy
|
||||
*job = *updated
|
||||
*d.currentJob = *updated
|
||||
d.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
// Code generated by mockery v2.52.4. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.4. DO NOT EDIT.
|
||||
|
||||
package jobs
|
||||
|
||||
import (
|
||||
context "context"
|
||||
time "time"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
v0alpha1 "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockJobProgressRecorder is an autogenerated mock type for the JobProgressRecorder type
|
||||
@@ -271,6 +273,51 @@ func (_c *MockJobProgressRecorder_SetTotal_Call) RunAndReturn(run func(context.C
|
||||
return _c
|
||||
}
|
||||
|
||||
// Started provides a mock function with no fields
|
||||
func (_m *MockJobProgressRecorder) Started() time.Time {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Started")
|
||||
}
|
||||
|
||||
var r0 time.Time
|
||||
if rf, ok := ret.Get(0).(func() time.Time); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(time.Time)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockJobProgressRecorder_Started_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Started'
|
||||
type MockJobProgressRecorder_Started_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Started is a helper method to define mock.On call
|
||||
func (_e *MockJobProgressRecorder_Expecter) Started() *MockJobProgressRecorder_Started_Call {
|
||||
return &MockJobProgressRecorder_Started_Call{Call: _e.mock.On("Started")}
|
||||
}
|
||||
|
||||
func (_c *MockJobProgressRecorder_Started_Call) Run(run func()) *MockJobProgressRecorder_Started_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run()
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockJobProgressRecorder_Started_Call) Return(_a0 time.Time) *MockJobProgressRecorder_Started_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockJobProgressRecorder_Started_Call) RunAndReturn(run func() time.Time) *MockJobProgressRecorder_Started_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// StrictMaxErrors provides a mock function with given fields: maxErrors
|
||||
func (_m *MockJobProgressRecorder) StrictMaxErrors(maxErrors int) {
|
||||
_m.Called(maxErrors)
|
||||
|
||||
@@ -69,6 +69,10 @@ func newJobProgressRecorder(ProgressFn ProgressFn) JobProgressRecorder {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *jobProgressRecorder) Started() time.Time {
|
||||
return r.started
|
||||
}
|
||||
|
||||
func (r *jobProgressRecorder) Record(ctx context.Context, result JobResourceResult) {
|
||||
var shouldLogError bool
|
||||
var logErr error
|
||||
|
||||
@@ -2,6 +2,7 @@ package jobs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/apps/provisioning/pkg/repository"
|
||||
@@ -18,6 +19,7 @@ type RepoGetter interface {
|
||||
//
|
||||
//go:generate mockery --name JobProgressRecorder --structname MockJobProgressRecorder --inpackage --filename job_progress_recorder_mock.go --with-expecter
|
||||
type JobProgressRecorder interface {
|
||||
Started() time.Time
|
||||
Record(ctx context.Context, result JobResourceResult)
|
||||
ResetResults()
|
||||
SetFinalMessage(ctx context.Context, msg string)
|
||||
|
||||
@@ -40,8 +40,6 @@ var expectedHeaders = map[string]string{
|
||||
strings.ToLower(queryService.HeaderPanelPluginId): queryService.HeaderPanelPluginId,
|
||||
strings.ToLower(queryService.HeaderDashboardTitle): queryService.HeaderDashboardTitle,
|
||||
strings.ToLower(queryService.HeaderPanelTitle): queryService.HeaderPanelTitle,
|
||||
strings.ToLower("X-Real-IP"): "X-Real-IP",
|
||||
strings.ToLower("X-Forwarded-For"): "X-Forwarded-For",
|
||||
}
|
||||
|
||||
func ExtractKnownHeaders(header http.Header) map[string]string {
|
||||
|
||||
Generated
+2
-2
@@ -840,7 +840,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
|
||||
identitySynchronizer := authnimpl.ProvideIdentitySynchronizer(authnimplService)
|
||||
ldapImpl := service12.ProvideService(cfg, featureToggles, ssosettingsimplService)
|
||||
apiService := api4.ProvideService(cfg, routeRegisterImpl, accessControl, userService, authinfoimplService, ossGroups, identitySynchronizer, orgService, ldapImpl, userAuthTokenService, bundleregistryService)
|
||||
dashboardsAPIBuilder := dashboard.RegisterAPIService(cfg, featureToggles, apiserverService, dashboardService, dashboardProvisioningService, service15, dashboardServiceImpl, dashboardPermissionsService, accessControl, accessClient, provisioningServiceImpl, dashboardsStore, registerer, sqlStore, tracingService, resourceClient, dualwriteService, sortService, quotaService, libraryPanelService, eventualRestConfigProvider, userService, libraryElementService)
|
||||
dashboardsAPIBuilder := dashboard.RegisterAPIService(cfg, featureToggles, apiserverService, dashboardService, dashboardProvisioningService, service15, dashboardServiceImpl, dashboardPermissionsService, accessControl, accessClient, provisioningServiceImpl, dashboardsStore, registerer, sqlStore, tracingService, resourceClient, dualwriteService, sortService, quotaService, libraryPanelService, eventualRestConfigProvider, userService, libraryElementService, publicDashboardServiceImpl)
|
||||
snapshotsAPIBuilder := dashboardsnapshot.RegisterAPIService(serviceImpl, apiserverService, cfg, featureToggles, sqlStore, registerer)
|
||||
dataSourceAPIBuilder, err := datasource.RegisterAPIService(configProvider, featureToggles, apiserverService, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, accessControl, registerer)
|
||||
if err != nil {
|
||||
@@ -1474,7 +1474,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
|
||||
identitySynchronizer := authnimpl.ProvideIdentitySynchronizer(authnimplService)
|
||||
ldapImpl := service12.ProvideService(cfg, featureToggles, ssosettingsimplService)
|
||||
apiService := api4.ProvideService(cfg, routeRegisterImpl, accessControl, userService, authinfoimplService, ossGroups, identitySynchronizer, orgService, ldapImpl, userAuthTokenService, bundleregistryService)
|
||||
dashboardsAPIBuilder := dashboard.RegisterAPIService(cfg, featureToggles, apiserverService, dashboardService, dashboardProvisioningService, service15, dashboardServiceImpl, dashboardPermissionsService, accessControl, accessClient, provisioningServiceImpl, dashboardsStore, registerer, sqlStore, tracingService, resourceClient, dualwriteService, sortService, quotaService, libraryPanelService, eventualRestConfigProvider, userService, libraryElementService)
|
||||
dashboardsAPIBuilder := dashboard.RegisterAPIService(cfg, featureToggles, apiserverService, dashboardService, dashboardProvisioningService, service15, dashboardServiceImpl, dashboardPermissionsService, accessControl, accessClient, provisioningServiceImpl, dashboardsStore, registerer, sqlStore, tracingService, resourceClient, dualwriteService, sortService, quotaService, libraryPanelService, eventualRestConfigProvider, userService, libraryElementService, publicDashboardServiceImpl)
|
||||
snapshotsAPIBuilder := dashboardsnapshot.RegisterAPIService(serviceImpl, apiserverService, cfg, featureToggles, sqlStore, registerer)
|
||||
dataSourceAPIBuilder, err := datasource.RegisterAPIService(configProvider, featureToggles, apiserverService, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, accessControl, registerer)
|
||||
if err != nil {
|
||||
|
||||
@@ -203,6 +203,14 @@ func createPostStartHook(
|
||||
logger.Error("Failed to initialize app", "app", installer.ManifestData().AppName, "error", err)
|
||||
return fmt.Errorf("failed to get app from installer %s: %w", installer.ManifestData().AppName, err)
|
||||
}
|
||||
return app.Runner().Run(hookContext.Context)
|
||||
go func() {
|
||||
err := app.Runner().Run(hookContext.Context)
|
||||
if err != nil {
|
||||
logger.Error("App runner exited with error", "app", installer.ManifestData().AppName, "error", err)
|
||||
} else {
|
||||
logger.Info("App runner exited without error", "app", installer.ManifestData().AppName)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -30,6 +30,9 @@ message MutateOperation {
|
||||
DeleteFolderOperation delete_folder = 2;
|
||||
CreatePermissionOperation create_permission = 3;
|
||||
DeletePermissionOperation delete_permission = 4;
|
||||
UpdateUserOrgRoleOperation update_user_org_role = 5;
|
||||
DeleteUserOrgRoleOperation delete_user_org_role = 6;
|
||||
AddUserOrgRoleOperation add_user_org_role = 7;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,6 +64,28 @@ message DeletePermissionOperation {
|
||||
Permission permission = 2;
|
||||
}
|
||||
|
||||
message AddUserOrgRoleOperation {
|
||||
// User UID
|
||||
string user = 1;
|
||||
// Role name (e.g: "Admin", "Editor", "Viewer")
|
||||
string role = 2;
|
||||
}
|
||||
|
||||
// UpdateUserOrgRoleOperation assigns the user's basic role and deletes existing basic role assignments.
|
||||
message UpdateUserOrgRoleOperation {
|
||||
// User UID
|
||||
string user = 1;
|
||||
// Role name (e.g: "Admin", "Editor", "Viewer")
|
||||
string role = 2;
|
||||
}
|
||||
|
||||
message DeleteUserOrgRoleOperation {
|
||||
// User UID
|
||||
string user = 1;
|
||||
// Role name (e.g: "Admin", "Editor", "Viewer")
|
||||
string role = 2;
|
||||
}
|
||||
|
||||
message Resource {
|
||||
// group of the resource (e.g: "dashboard.grafana.app")
|
||||
string group = 1;
|
||||
|
||||
@@ -110,7 +110,7 @@ func ProvideStandaloneZanzanaClient(cfg *setting.Cfg, features featuremgmt.Featu
|
||||
ServerCertFile: cfg.ZanzanaClient.ServerCertFile,
|
||||
}
|
||||
|
||||
return NewRemoteZanzanaClient(fmt.Sprintf("stacks-%s", cfg.StackID), zanzanaConfig)
|
||||
return NewRemoteZanzanaClient(cfg.ZanzanaClient.TokenNamespace, zanzanaConfig)
|
||||
}
|
||||
|
||||
type ZanzanaClientConfig struct {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"slices"
|
||||
|
||||
authlib "github.com/grafana/authlib/types"
|
||||
|
||||
dashboards "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1"
|
||||
@@ -23,6 +25,14 @@ var basicRolesTranslations = map[string]string{
|
||||
roleNone: "basic_none",
|
||||
}
|
||||
|
||||
var basicRolesUIDs = []string{
|
||||
"basic_grafana_admin",
|
||||
"basic_admin",
|
||||
"basic_editor",
|
||||
"basic_viewer",
|
||||
"basic_none",
|
||||
}
|
||||
|
||||
type resourceTranslation struct {
|
||||
typ string
|
||||
group string
|
||||
@@ -67,6 +77,13 @@ var resourceTranslations = map[string]resourceTranslation{
|
||||
"dashboards:write": newScopedMapping(RelationUpdate, dashboardGroup, dashboardResource, ""),
|
||||
"dashboards:create": newScopedMapping(RelationCreate, dashboardGroup, dashboardResource, ""),
|
||||
"dashboards:delete": newScopedMapping(RelationDelete, dashboardGroup, dashboardResource, ""),
|
||||
// Action sets
|
||||
"folders:view": newMapping(RelationSetView, ""),
|
||||
"folders:edit": newMapping(RelationSetEdit, ""),
|
||||
"folders:admin": newMapping(RelationSetAdmin, ""),
|
||||
"dashboards:view": newScopedMapping(RelationSetView, dashboardGroup, dashboardResource, ""),
|
||||
"dashboards:edit": newScopedMapping(RelationSetEdit, dashboardGroup, dashboardResource, ""),
|
||||
"dashboards:admin": newScopedMapping(RelationSetAdmin, dashboardGroup, dashboardResource, ""),
|
||||
},
|
||||
},
|
||||
KindDashboards: {
|
||||
@@ -78,6 +95,10 @@ var resourceTranslations = map[string]resourceTranslation{
|
||||
"dashboards:write": newMapping(RelationUpdate, ""),
|
||||
"dashboards:create": newMapping(RelationCreate, ""),
|
||||
"dashboards:delete": newMapping(RelationDelete, ""),
|
||||
// Action sets
|
||||
"dashboards:view": newMapping(RelationSetView, ""),
|
||||
"dashboards:edit": newMapping(RelationSetEdit, ""),
|
||||
"dashboards:admin": newMapping(RelationSetAdmin, ""),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -138,3 +159,7 @@ func TranslateToGroupResource(kind string) string {
|
||||
func TranslateBasicRole(name string) string {
|
||||
return basicRolesTranslations[name]
|
||||
}
|
||||
|
||||
func IsBasicRole(name string) bool {
|
||||
return slices.Contains(basicRolesUIDs, name)
|
||||
}
|
||||
|
||||
@@ -350,6 +350,14 @@ func NewTypedTuple(typ, subject, relation, name string) *openfgav1.TupleKey {
|
||||
}
|
||||
}
|
||||
|
||||
func NewTuple(subject, relation, object string) *openfgav1.TupleKey {
|
||||
return &openfgav1.TupleKey{
|
||||
User: subject,
|
||||
Relation: relation,
|
||||
Object: object,
|
||||
}
|
||||
}
|
||||
|
||||
func ToAuthzExtTupleKey(t *openfgav1.TupleKey) *authzextv1.TupleKey {
|
||||
tupleKey := &authzextv1.TupleKey{
|
||||
User: t.GetUser(),
|
||||
@@ -457,3 +465,21 @@ func AddRenderContext(req *openfgav1.CheckRequest) {
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
func SplitTupleObject(object string) (string, string, string) {
|
||||
var objectType, name, relation string
|
||||
parts := strings.Split(object, ":")
|
||||
if len(parts) < 2 {
|
||||
return "", "", ""
|
||||
}
|
||||
|
||||
objectType = parts[0]
|
||||
nameRel := parts[1]
|
||||
parts = strings.Split(nameRel, "#")
|
||||
if len(parts) > 1 {
|
||||
relation = parts[1]
|
||||
}
|
||||
name = parts[0]
|
||||
|
||||
return objectType, name, relation
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func authorize(ctx context.Context, namespace string, ss setting.ZanzanaServerSe
|
||||
return status.Errorf(codes.Unauthenticated, "unauthenticated")
|
||||
}
|
||||
if !claims.NamespaceMatches(c.GetNamespace(), namespace) {
|
||||
return status.Errorf(codes.PermissionDenied, "namespace does not match")
|
||||
return status.Errorf(codes.PermissionDenied, "token namespace %s does not match request namespace", c.GetNamespace())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -12,8 +12,9 @@ import (
|
||||
type OperationGroup string
|
||||
|
||||
const (
|
||||
OperationGroupFolder OperationGroup = "folder"
|
||||
OperationGroupPermission OperationGroup = "permission"
|
||||
OperationGroupFolder OperationGroup = "folder"
|
||||
OperationGroupPermission OperationGroup = "permission"
|
||||
OperationGroupUserOrgRole OperationGroup = "user_org_role"
|
||||
)
|
||||
|
||||
func (s *Server) Mutate(ctx context.Context, req *authzextv1.MutateRequest) (*authzextv1.MutateResponse, error) {
|
||||
@@ -58,6 +59,10 @@ func (s *Server) mutate(ctx context.Context, req *authzextv1.MutateRequest) (*au
|
||||
if err := s.mutateResourcePermissions(ctx, storeInf, operations); err != nil {
|
||||
return nil, fmt.Errorf("failed to mutate resource permissions: %w", err)
|
||||
}
|
||||
case OperationGroupUserOrgRole:
|
||||
if err := s.mutateOrgRoles(ctx, storeInf, operations); err != nil {
|
||||
return nil, fmt.Errorf("failed to mutate org roles: %w", err)
|
||||
}
|
||||
default:
|
||||
s.logger.Warn("unsupported operation group", "operationGroup", operationGroup)
|
||||
}
|
||||
@@ -72,6 +77,8 @@ func getOperationGroup(operation *authzextv1.MutateOperation) (OperationGroup, e
|
||||
return OperationGroupFolder, nil
|
||||
case *authzextv1.MutateOperation_CreatePermission, *authzextv1.MutateOperation_DeletePermission:
|
||||
return OperationGroupPermission, nil
|
||||
case *authzextv1.MutateOperation_UpdateUserOrgRole, *authzextv1.MutateOperation_DeleteUserOrgRole, *authzextv1.MutateOperation_AddUserOrgRole:
|
||||
return OperationGroupUserOrgRole, nil
|
||||
}
|
||||
return OperationGroup(""), errors.New("unsupported mutate operation type")
|
||||
}
|
||||
|
||||
@@ -0,0 +1,113 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
openfgav1 "github.com/openfga/api/proto/openfga/v1"
|
||||
|
||||
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
zanzana "github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
)
|
||||
|
||||
func (s *Server) mutateOrgRoles(ctx context.Context, store *storeInfo, operations []*authzextv1.MutateOperation) error {
|
||||
ctx, span := s.tracer.Start(ctx, "server.mutateOrgRoles")
|
||||
defer span.End()
|
||||
|
||||
writeTuples := make([]*openfgav1.TupleKey, 0)
|
||||
deleteTuples := make([]*openfgav1.TupleKeyWithoutCondition, 0)
|
||||
|
||||
for _, operation := range operations {
|
||||
switch op := operation.Operation.(type) {
|
||||
case *authzextv1.MutateOperation_AddUserOrgRole:
|
||||
basicRole := zanzana.TranslateBasicRole(op.AddUserOrgRole.GetRole())
|
||||
tuple := &openfgav1.TupleKey{
|
||||
User: zanzana.NewTupleEntry(zanzana.TypeUser, op.AddUserOrgRole.GetUser(), ""),
|
||||
Relation: zanzana.RelationAssignee,
|
||||
Object: zanzana.NewTupleEntry(zanzana.TypeRole, basicRole, ""),
|
||||
}
|
||||
writeTuples = append(writeTuples, tuple)
|
||||
case *authzextv1.MutateOperation_DeleteUserOrgRole:
|
||||
basicRole := zanzana.TranslateBasicRole(op.DeleteUserOrgRole.GetRole())
|
||||
tuple := &openfgav1.TupleKeyWithoutCondition{
|
||||
User: zanzana.NewTupleEntry(zanzana.TypeUser, op.DeleteUserOrgRole.GetUser(), ""),
|
||||
Relation: zanzana.RelationAssignee,
|
||||
Object: zanzana.NewTupleEntry(zanzana.TypeRole, basicRole, ""),
|
||||
}
|
||||
deleteTuples = append(deleteTuples, tuple)
|
||||
case *authzextv1.MutateOperation_UpdateUserOrgRole:
|
||||
writeTuple, existingTuples, err := s.getUserOrgRoleUpdateTuples(ctx, store, op.UpdateUserOrgRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeTuples = append(writeTuples, writeTuple)
|
||||
deleteTuples = append(deleteTuples, existingTuples...)
|
||||
default:
|
||||
s.logger.Debug("unsupported mutate operation", "operation", op)
|
||||
}
|
||||
}
|
||||
|
||||
if len(writeTuples) == 0 && len(deleteTuples) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
writeReq := &openfgav1.WriteRequest{
|
||||
StoreId: store.ID,
|
||||
AuthorizationModelId: store.ModelID,
|
||||
}
|
||||
if len(writeTuples) > 0 {
|
||||
writeReq.Writes = &openfgav1.WriteRequestWrites{
|
||||
TupleKeys: writeTuples,
|
||||
OnDuplicate: "ignore",
|
||||
}
|
||||
}
|
||||
if len(deleteTuples) > 0 {
|
||||
writeReq.Deletes = &openfgav1.WriteRequestDeletes{
|
||||
TupleKeys: deleteTuples,
|
||||
OnMissing: "ignore",
|
||||
}
|
||||
}
|
||||
|
||||
_, err := s.openfga.Write(ctx, writeReq)
|
||||
if err != nil {
|
||||
s.logger.Error("failed to write user org role tuples", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) getUserOrgRoleUpdateTuples(ctx context.Context, store *storeInfo, req *authzextv1.UpdateUserOrgRoleOperation) (*openfgav1.TupleKey, []*openfgav1.TupleKeyWithoutCondition, error) {
|
||||
readReq := &openfgav1.ReadRequest{
|
||||
StoreId: store.ID,
|
||||
TupleKey: &openfgav1.ReadRequestTupleKey{
|
||||
User: zanzana.NewTupleEntry(zanzana.TypeUser, req.GetUser(), ""),
|
||||
Relation: zanzana.RelationAssignee,
|
||||
// read tuples by object type ("role:")
|
||||
Object: zanzana.NewTupleEntry(zanzana.TypeRole, "", ""),
|
||||
},
|
||||
}
|
||||
res, err := s.openfga.Read(ctx, readReq)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
existingBasicRoleTuples := make([]*openfgav1.TupleKeyWithoutCondition, 0)
|
||||
for _, tuple := range res.GetTuples() {
|
||||
_, roleName, _ := zanzana.SplitTupleObject(tuple.GetKey().GetObject())
|
||||
if zanzana.IsBasicRole(roleName) {
|
||||
existingBasicRoleTuples = append(existingBasicRoleTuples, &openfgav1.TupleKeyWithoutCondition{
|
||||
User: tuple.GetKey().GetUser(),
|
||||
Relation: tuple.GetKey().GetRelation(),
|
||||
Object: tuple.GetKey().GetObject(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
basicRole := zanzana.TranslateBasicRole(req.GetRole())
|
||||
writeTuple := &openfgav1.TupleKey{
|
||||
User: zanzana.NewTupleEntry(zanzana.TypeUser, req.GetUser(), ""),
|
||||
Relation: zanzana.RelationAssignee,
|
||||
Object: zanzana.NewTupleEntry(zanzana.TypeRole, basicRole, ""),
|
||||
}
|
||||
|
||||
return writeTuple, existingBasicRoleTuples, nil
|
||||
}
|
||||
@@ -0,0 +1,110 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
openfgav1 "github.com/openfga/api/proto/openfga/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
v1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
|
||||
)
|
||||
|
||||
func setupMutateOrgRoles(t *testing.T, srv *Server) *Server {
|
||||
t.Helper()
|
||||
|
||||
// seed tuples
|
||||
tuples := []*openfgav1.TupleKey{
|
||||
common.NewTuple("user:1", common.RelationAssignee, "role:basic_editor"),
|
||||
}
|
||||
|
||||
return setupOpenFGADatabase(t, srv, tuples)
|
||||
}
|
||||
|
||||
func testMutateOrgRoles(t *testing.T, srv *Server) {
|
||||
setupMutateOrgRoles(t, srv)
|
||||
|
||||
t.Run("should update user org role and delete old role", func(t *testing.T) {
|
||||
_, err := srv.Mutate(newContextWithNamespace(), &v1.MutateRequest{
|
||||
Namespace: "default",
|
||||
Operations: []*v1.MutateOperation{
|
||||
{
|
||||
Operation: &v1.MutateOperation_UpdateUserOrgRole{
|
||||
UpdateUserOrgRole: &v1.UpdateUserOrgRoleOperation{
|
||||
User: "1",
|
||||
Role: "Admin",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err := srv.Read(newContextWithNamespace(), &v1.ReadRequest{
|
||||
Namespace: "default",
|
||||
TupleKey: &v1.ReadRequestTupleKey{
|
||||
Relation: common.RelationAssignee,
|
||||
Object: "role:basic_admin",
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, res.Tuples, 1)
|
||||
require.Equal(t, "user:1", res.Tuples[0].Key.User)
|
||||
|
||||
res, err = srv.Read(newContextWithNamespace(), &v1.ReadRequest{
|
||||
Namespace: "default",
|
||||
TupleKey: &v1.ReadRequestTupleKey{
|
||||
Relation: common.RelationAssignee,
|
||||
Object: "role:basic_editor",
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, res.Tuples, 0)
|
||||
})
|
||||
|
||||
t.Run("should add user org role and delete old role", func(t *testing.T) {
|
||||
_, err := srv.Mutate(newContextWithNamespace(), &v1.MutateRequest{
|
||||
Namespace: "default",
|
||||
Operations: []*v1.MutateOperation{
|
||||
{
|
||||
Operation: &v1.MutateOperation_AddUserOrgRole{
|
||||
AddUserOrgRole: &v1.AddUserOrgRoleOperation{
|
||||
User: "1",
|
||||
Role: "Viewer",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Operation: &v1.MutateOperation_DeleteUserOrgRole{
|
||||
DeleteUserOrgRole: &v1.DeleteUserOrgRoleOperation{
|
||||
User: "1",
|
||||
Role: "Admin",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err := srv.Read(newContextWithNamespace(), &v1.ReadRequest{
|
||||
Namespace: "default",
|
||||
TupleKey: &v1.ReadRequestTupleKey{
|
||||
Relation: common.RelationAssignee,
|
||||
Object: "role:basic_admin",
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, res.Tuples, 0)
|
||||
|
||||
res, err = srv.Read(newContextWithNamespace(), &v1.ReadRequest{
|
||||
Namespace: "default",
|
||||
TupleKey: &v1.ReadRequestTupleKey{
|
||||
Relation: common.RelationAssignee,
|
||||
Object: "role:basic_viewer",
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, res.Tuples, 1)
|
||||
require.Equal(t, "user:1", res.Tuples[0].Key.User)
|
||||
})
|
||||
}
|
||||
@@ -128,6 +128,10 @@ func TestIntegrationServer(t *testing.T) {
|
||||
t.Run("test mutate resource permissions", func(t *testing.T) {
|
||||
testMutateResourcePermissions(t, srv)
|
||||
})
|
||||
|
||||
t.Run("test mutate org roles", func(t *testing.T) {
|
||||
testMutateOrgRoles(t, srv)
|
||||
})
|
||||
}
|
||||
|
||||
func setupOpenFGAServer(t *testing.T, testDB db.DB, cfg *setting.Cfg) *Server {
|
||||
|
||||
@@ -84,7 +84,7 @@ func (s *OSSCachingService) HandleResourceRequest(ctx context.Context, req *back
|
||||
var _ CachingService = &OSSCachingService{}
|
||||
|
||||
// GetKey creates a prefixed cache key and uses the internal `encoder` to encode the query into a string
|
||||
func GetKey(prefix string, query interface{}) (string, error) {
|
||||
func GetKey(namespace, prefix string, query interface{}) (string, error) {
|
||||
keybuf := bytes.NewBuffer(nil)
|
||||
|
||||
encoder := &JSONEncoder{}
|
||||
@@ -98,6 +98,12 @@ func GetKey(prefix string, query interface{}) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// The namespace is empty only when this function is used by the legacy caching module.
|
||||
// This case can be removed when the legacy caching module is not being used anymore.
|
||||
if namespace != "" {
|
||||
return strings.Join([]string{namespace, prefix, key}, ":"), nil
|
||||
}
|
||||
|
||||
return strings.Join([]string{prefix, key}, ":"), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1254,6 +1254,13 @@ var (
|
||||
Stage: FeatureStageExperimental,
|
||||
Owner: grafanaFrontendPlatformSquad,
|
||||
},
|
||||
{
|
||||
Name: "timeRangePan",
|
||||
Description: "Enables time range panning functionality",
|
||||
Stage: FeatureStageExperimental,
|
||||
FrontendOnly: true,
|
||||
Owner: grafanaDatavizSquad,
|
||||
},
|
||||
{
|
||||
Name: "azureMonitorDisableLogLimit",
|
||||
Description: "Disables the log limit restriction for Azure Monitor when true. The limit is enabled by default.",
|
||||
@@ -1516,10 +1523,11 @@ var (
|
||||
},
|
||||
{
|
||||
Name: "newLogsPanel",
|
||||
Description: "Enables the new logs panel in Explore",
|
||||
Stage: FeatureStageExperimental,
|
||||
Description: "Enables the new logs panel",
|
||||
Stage: FeatureStageGeneralAvailability,
|
||||
FrontendOnly: true,
|
||||
Owner: grafanaObservabilityLogsSquad,
|
||||
Expression: "true",
|
||||
},
|
||||
{
|
||||
Name: "grafanaconThemes",
|
||||
|
||||
@@ -164,6 +164,7 @@ managedDualWriter,experimental,@grafana/search-and-storage,false,false,false
|
||||
pluginsSriChecks,GA,@grafana/plugins-platform-backend,false,false,false
|
||||
unifiedStorageBigObjectsSupport,experimental,@grafana/search-and-storage,false,false,false
|
||||
timeRangeProvider,experimental,@grafana/grafana-frontend-platform,false,false,false
|
||||
timeRangePan,experimental,@grafana/dataviz-squad,false,false,true
|
||||
azureMonitorDisableLogLimit,GA,@grafana/partner-datasources,false,false,false
|
||||
preinstallAutoUpdate,GA,@grafana/plugins-platform-backend,false,false,false
|
||||
playlistsReconciler,experimental,@grafana/grafana-app-platform-squad,false,true,false
|
||||
@@ -198,7 +199,7 @@ grafanaAdvisor,privatePreview,@grafana/plugins-platform-backend,false,false,fals
|
||||
elasticsearchImprovedParsing,experimental,@grafana/aws-datasources,false,false,false
|
||||
datasourceConnectionsTab,privatePreview,@grafana/plugins-platform-backend,false,false,true
|
||||
fetchRulesUsingPost,experimental,@grafana/alerting-squad,false,false,false
|
||||
newLogsPanel,experimental,@grafana/observability-logs,false,false,true
|
||||
newLogsPanel,GA,@grafana/observability-logs,false,false,true
|
||||
grafanaconThemes,GA,@grafana/grafana-frontend-platform,false,true,false
|
||||
alertingJiraIntegration,experimental,@grafana/alerting-squad,false,false,true
|
||||
alertingUseNewSimplifiedRoutingHashAlgorithm,preview,@grafana/alerting-squad,false,true,false
|
||||
|
||||
|
Generated
+5
-1
@@ -667,6 +667,10 @@ const (
|
||||
// Enables time pickers sync
|
||||
FlagTimeRangeProvider = "timeRangeProvider"
|
||||
|
||||
// FlagTimeRangePan
|
||||
// Enables time range panning functionality
|
||||
FlagTimeRangePan = "timeRangePan"
|
||||
|
||||
// FlagAzureMonitorDisableLogLimit
|
||||
// Disables the log limit restriction for Azure Monitor when true. The limit is enabled by default.
|
||||
FlagAzureMonitorDisableLogLimit = "azureMonitorDisableLogLimit"
|
||||
@@ -804,7 +808,7 @@ const (
|
||||
FlagFetchRulesUsingPost = "fetchRulesUsingPost"
|
||||
|
||||
// FlagNewLogsPanel
|
||||
// Enables the new logs panel in Explore
|
||||
// Enables the new logs panel
|
||||
FlagNewLogsPanel = "newLogsPanel"
|
||||
|
||||
// FlagGrafanaconThemes
|
||||
|
||||
@@ -2775,14 +2775,18 @@
|
||||
{
|
||||
"metadata": {
|
||||
"name": "newLogsPanel",
|
||||
"resourceVersion": "1753448760331",
|
||||
"creationTimestamp": "2025-02-04T17:40:17Z"
|
||||
"resourceVersion": "1762166984808",
|
||||
"creationTimestamp": "2025-02-04T17:40:17Z",
|
||||
"annotations": {
|
||||
"grafana.app/updatedTimestamp": "2025-11-03 10:49:44.808226 +0000 UTC"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"description": "Enables the new logs panel in Explore",
|
||||
"stage": "experimental",
|
||||
"description": "Enables the new logs panel",
|
||||
"stage": "GA",
|
||||
"codeowner": "@grafana/observability-logs",
|
||||
"frontend": true
|
||||
"frontend": true,
|
||||
"expression": "true"
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -3911,6 +3915,22 @@
|
||||
"frontend": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "timeRangePan",
|
||||
"resourceVersion": "1762290731154",
|
||||
"creationTimestamp": "2025-10-24T19:49:53Z",
|
||||
"annotations": {
|
||||
"grafana.app/updatedTimestamp": "2025-11-04 21:12:11.154822 +0000 UTC"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"description": "Enables time range panning functionality",
|
||||
"stage": "experimental",
|
||||
"codeowner": "@grafana/dataviz-squad",
|
||||
"frontend": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "timeRangeProvider",
|
||||
|
||||
@@ -1460,6 +1460,148 @@ func TestIntegrationRuleGroupsCaseSensitive(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// To address issues arising from case-insensitive collations in some databases (e.g., MySQL/MariaDB),
|
||||
func TestIntegrationListAlertRulesByGroupCaseSensitiveOrdering(t *testing.T) {
|
||||
tutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
usr := models.UserUID("test")
|
||||
|
||||
sqlStore := db.InitTestDB(t)
|
||||
cfg := setting.NewCfg()
|
||||
cfg.UnifiedAlerting.BaseInterval = 1 * time.Second
|
||||
folderService := setupFolderService(t, sqlStore, cfg, featuremgmt.WithFeatures())
|
||||
b := &fakeBus{}
|
||||
logger := log.New("test-dbstore")
|
||||
store := createTestStore(sqlStore, folderService, logger, cfg.UnifiedAlerting, b)
|
||||
store.FeatureToggles = featuremgmt.WithFeatures()
|
||||
|
||||
gen := models.RuleGen.With(models.RuleMuts.WithOrgID(1))
|
||||
|
||||
// Create namespace and base group key
|
||||
groupKey := models.GenerateGroupKey(1)
|
||||
|
||||
// Create groups with case-sensitive names: "TEST", "Test", "test"
|
||||
groupKeyUpper := groupKey
|
||||
groupKeyUpper.RuleGroup = "TEST"
|
||||
|
||||
groupKeyMixed := groupKey
|
||||
groupKeyMixed.RuleGroup = "Test"
|
||||
|
||||
groupKeyLower := groupKey
|
||||
groupKeyLower.RuleGroup = "test"
|
||||
|
||||
// Generate rules for each group
|
||||
groupUpper := gen.With(gen.WithGroupKey(groupKeyUpper)).GenerateMany(2)
|
||||
groupMixed := gen.With(gen.WithGroupKey(groupKeyMixed)).GenerateMany(2)
|
||||
groupLower := gen.With(gen.WithGroupKey(groupKeyLower)).GenerateMany(2)
|
||||
|
||||
// Insert all rules
|
||||
allRules := append(append(groupUpper, groupMixed...), groupLower...)
|
||||
_, err := store.InsertAlertRules(context.Background(), &usr, allRules)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("should order groups case-sensitively", func(t *testing.T) {
|
||||
result, _, err := store.ListAlertRulesByGroup(context.Background(), &models.ListAlertRulesExtendedQuery{
|
||||
ListAlertRulesQuery: models.ListAlertRulesQuery{OrgID: 1},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, result, 6, "should return all 6 rules")
|
||||
|
||||
// Extract group names in order
|
||||
var groupOrder []string
|
||||
for _, rule := range result {
|
||||
if len(groupOrder) == 0 || groupOrder[len(groupOrder)-1] != rule.RuleGroup {
|
||||
groupOrder = append(groupOrder, rule.RuleGroup)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify case-sensitive alphabetical ordering
|
||||
// different databases may sort uppercase before lowercase or vice versa depending on character set, the important part is that the order is consistent and case-sensitive
|
||||
expectedOrder := []string{"test", "Test", "TEST"}
|
||||
alternateExpectedOrder := []string{"TEST", "Test", "test"}
|
||||
if !slices.Equal(groupOrder, expectedOrder) && !slices.Equal(groupOrder, alternateExpectedOrder) {
|
||||
t.Fatalf("groups are not ordered case-sensitively as expected. got: %v, want: %v or %v", groupOrder, expectedOrder, alternateExpectedOrder)
|
||||
}
|
||||
|
||||
// Verify each group contains the correct rules
|
||||
groupRules := make(map[string][]*models.AlertRule)
|
||||
for _, rule := range result {
|
||||
groupRules[rule.RuleGroup] = append(groupRules[rule.RuleGroup], rule)
|
||||
}
|
||||
|
||||
require.Len(t, groupRules["TEST"], 2, "TEST group should have 2 rules")
|
||||
require.Len(t, groupRules["Test"], 2, "Test group should have 2 rules")
|
||||
require.Len(t, groupRules["test"], 2, "test group should have 2 rules")
|
||||
})
|
||||
|
||||
t.Run("should respect group limit with case-sensitive ordering", func(t *testing.T) {
|
||||
// Test with limit of 2 groups - should get first 2 groups in case-sensitive order
|
||||
result, continueToken, err := store.ListAlertRulesByGroup(context.Background(), &models.ListAlertRulesExtendedQuery{
|
||||
ListAlertRulesQuery: models.ListAlertRulesQuery{OrgID: 1},
|
||||
Limit: 2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, result, 4, "should return 4 rules (2 rules from first 2 groups)")
|
||||
require.NotEmpty(t, continueToken, "should have continue token when limit is reached")
|
||||
|
||||
// Extract group names from limited result
|
||||
var limitedGroupOrder []string
|
||||
for _, rule := range result {
|
||||
if len(limitedGroupOrder) == 0 || limitedGroupOrder[len(limitedGroupOrder)-1] != rule.RuleGroup {
|
||||
limitedGroupOrder = append(limitedGroupOrder, rule.RuleGroup)
|
||||
}
|
||||
}
|
||||
|
||||
// Should get first 2 groups in case-sensitive order: "TEST", "Test" or "test", "Test"
|
||||
expectedLimitedOrder := []string{"TEST", "Test"}
|
||||
alternateExpectedOrder := []string{"test", "Test"}
|
||||
matchesDescLexOrder := slices.Equal(limitedGroupOrder, expectedLimitedOrder)
|
||||
matchesAscLexOrder := slices.Equal(limitedGroupOrder, alternateExpectedOrder)
|
||||
if !matchesDescLexOrder && !matchesAscLexOrder {
|
||||
t.Fatalf("limited groups are not ordered case-sensitively as expected. got: %v, want: %v or %v", limitedGroupOrder, expectedLimitedOrder, alternateExpectedOrder)
|
||||
}
|
||||
|
||||
// Continue from token to get remaining groups
|
||||
remainingResult, nextToken, err := store.ListAlertRulesByGroup(context.Background(), &models.ListAlertRulesExtendedQuery{
|
||||
ListAlertRulesQuery: models.ListAlertRulesQuery{OrgID: 1},
|
||||
ContinueToken: continueToken,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, remainingResult, 2, "should return 2 rules from remaining group")
|
||||
require.Empty(t, nextToken, "should not have continue token when all groups are fetched")
|
||||
|
||||
lastGroup := "test"
|
||||
if matchesAscLexOrder {
|
||||
lastGroup = "TEST"
|
||||
}
|
||||
|
||||
// Verify the remaining group is "test"
|
||||
for _, rule := range remainingResult {
|
||||
require.Equal(t, lastGroup, rule.RuleGroup, "remaining group should be 'test'")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("should handle group limit of 1 correctly", func(t *testing.T) {
|
||||
result, continueToken, err := store.ListAlertRulesByGroup(context.Background(), &models.ListAlertRulesExtendedQuery{
|
||||
ListAlertRulesQuery: models.ListAlertRulesQuery{OrgID: 1},
|
||||
Limit: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, result, 2, "should return 2 rules from first group")
|
||||
require.NotEmpty(t, continueToken, "should have continue token")
|
||||
|
||||
// Should only get the first group which can be "TEST" or "test" depending on charset
|
||||
expectedGroup := "TEST"
|
||||
if result[0].RuleGroup == "test" {
|
||||
expectedGroup = "test"
|
||||
}
|
||||
|
||||
for _, rule := range result {
|
||||
require.Equal(t, expectedGroup, rule.RuleGroup, "all rules should be from the first group")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegrationIncreaseVersionForAllRulesInNamespaces(t *testing.T) {
|
||||
tutil.SkipIntegrationTestInShortMode(t)
|
||||
|
||||
|
||||
@@ -156,4 +156,8 @@ func (oss *OSSMigrations) AddMigration(mg *Migrator) {
|
||||
ualert.DropTitleUniqueIndexMigration(mg)
|
||||
|
||||
ualert.AddStateFiredAtColumn(mg)
|
||||
|
||||
ualert.CollateAlertRuleGroup(mg)
|
||||
|
||||
ualert.AddAlertRuleGroupIndexMigration(mg)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
package ualert
|
||||
|
||||
import "github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
|
||||
// CollateAlertRuleGroup ensures that rule_group column collates.
|
||||
func CollateAlertRuleGroup(mg *migrator.Migrator) {
|
||||
mg.AddMigration("ensure rule_group column is case sensitive in returned results", migrator.NewRawSQLMigration("").
|
||||
Mysql("ALTER TABLE alert_rule MODIFY rule_group VARCHAR(190) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_as_cs NOT NULL;"))
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user