Compare commits
58 Commits
chore/fix-
...
bugfix/fil
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
30219176e7 | ||
|
|
fa86386564 | ||
|
|
755edef944 | ||
|
|
f7bb66ea21 | ||
|
|
909b9b6bc1 | ||
|
|
f0ea97d105 | ||
|
|
48f415e24b | ||
|
|
f954464825 | ||
|
|
fdc84474ce | ||
|
|
95baa89e0f | ||
|
|
657bf76922 | ||
|
|
dc0ccd238b | ||
|
|
ca4b78f8ef | ||
|
|
2e9d0a626e | ||
|
|
af2c12228f | ||
|
|
50ff5b976c | ||
|
|
35affc57c2 | ||
|
|
9ceff992aa | ||
|
|
12dd3dffe0 | ||
|
|
7c6475262d | ||
|
|
b805d5cae0 | ||
|
|
49c5c0ce41 | ||
|
|
75caaccad4 | ||
|
|
00a6e1781f | ||
|
|
ca0c09cb73 | ||
|
|
c73f9600d7 | ||
|
|
1fbfa4d7fa | ||
|
|
c6831199a2 | ||
|
|
09e546a1f3 | ||
|
|
c47c360fd9 | ||
|
|
62cab8bd63 | ||
|
|
3b56643aa2 | ||
|
|
0250b37a4b | ||
|
|
1e031db607 | ||
|
|
848c84204a | ||
|
|
172f1fb974 | ||
|
|
a716549f36 | ||
|
|
e5c1de390d | ||
|
|
20f17d72c3 | ||
|
|
a3d7bd8dca | ||
|
|
074e8ce128 | ||
|
|
4149767391 | ||
|
|
0c49337205 | ||
|
|
c5345498b1 | ||
|
|
1bcccd5e61 | ||
|
|
12b38d1b7a | ||
|
|
359d097154 | ||
|
|
cfc5d96c34 | ||
|
|
3459c67bfb | ||
|
|
37ccd8bc3d | ||
|
|
5156177079 | ||
|
|
4817ecf6a3 | ||
|
|
c73cab8eef | ||
|
|
a37ebf609e | ||
|
|
b29e8ccb45 | ||
|
|
644f7b7001 | ||
|
|
629570926d | ||
|
|
1b59c82b74 |
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -520,7 +520,7 @@ i18next.config.ts @grafana/grafana-frontend-platform
|
||||
/e2e-playwright/various-suite/solo-route.spec.ts @grafana/dashboards-squad
|
||||
/e2e-playwright/various-suite/trace-view-scrolling.spec.ts @grafana/observability-traces-and-profiling
|
||||
/e2e-playwright/various-suite/verify-i18n.spec.ts @grafana/grafana-frontend-platform
|
||||
/e2e-playwright/various-suite/visualization-suggestions.spec.ts @grafana/dashboards-squad
|
||||
/e2e-playwright/various-suite/visualization-suggestions.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/various-suite/perf-test.spec.ts @grafana/grafana-frontend-platform
|
||||
|
||||
# Packages
|
||||
@@ -956,6 +956,7 @@ playwright.storybook.config.ts @grafana/grafana-frontend-platform
|
||||
/public/app/features/notifications/ @grafana/grafana-search-navigate-organise
|
||||
/public/app/features/org/ @grafana/grafana-search-navigate-organise
|
||||
/public/app/features/panel/ @grafana/dashboards-squad
|
||||
/public/app/features/panel/components/VizTypePicker/VisualizationSuggestions.tsx @grafana/dataviz-squad
|
||||
/public/app/features/panel/suggestions/ @grafana/dataviz-squad
|
||||
/public/app/features/playlist/ @grafana/dashboards-squad
|
||||
/public/app/features/plugins/ @grafana/plugins-platform-frontend
|
||||
|
||||
1
.github/workflows/pr-patch-check-event.yml
vendored
1
.github/workflows/pr-patch-check-event.yml
vendored
@@ -12,6 +12,7 @@ on:
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
statuses: write
|
||||
|
||||
# Since this is run on a pull request, we want to apply the patches intended for the
|
||||
# target branch onto the source branch, to verify compatibility before merging.
|
||||
|
||||
21
.github/workflows/pr-patch-check.yml
vendored
21
.github/workflows/pr-patch-check.yml
vendored
@@ -29,6 +29,10 @@ permissions:
|
||||
# target branch onto the source branch, to verify compatibility before merging.
|
||||
jobs:
|
||||
dispatch-job:
|
||||
# If the source is not from a fork then dispatch the job to the workflow.
|
||||
# This will fail on forks when trying to broker a token, so instead, forks will create the required status and mark
|
||||
# it as a success
|
||||
if: ${{ ! github.event.pull_request.head.repo.fork }}
|
||||
env:
|
||||
HEAD_REF: ${{ inputs.head_ref }}
|
||||
BASE_REF: ${{ github.base_ref }}
|
||||
@@ -76,3 +80,20 @@ jobs:
|
||||
triggering_github_handle: SENDER
|
||||
}
|
||||
})
|
||||
dispatch-job-fork:
|
||||
# If the source is from a fork then use the built-in workflow token to create the same status and unconditionally
|
||||
# mark it as a success.
|
||||
if: ${{ github.event.pull_request.head.repo.fork }}
|
||||
permissions:
|
||||
statuses: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Create status
|
||||
uses: myrotvorets/set-commit-status-action@6d6905c99cd24a4a2cbccc720b62dc6ca5587141
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
sha: ${{ inputs.pr_commit_sha }}
|
||||
repo: ${{ inputs.repo }}
|
||||
status: success
|
||||
context: "Test Patches (event)"
|
||||
description: "Test Patches (event) on a fork"
|
||||
|
||||
603
apps/dashboard/pkg/migration/conversion/testdata/input/v1beta1.value-mapping-and-overrides.json
vendored
Normal file
603
apps/dashboard/pkg/migration/conversion/testdata/input/v1beta1.value-mapping-and-overrides.json
vendored
Normal file
@@ -0,0 +1,603 @@
|
||||
{
|
||||
"kind": "DashboardWithAccessInfo",
|
||||
"apiVersion": "dashboard.grafana.app/v1beta1",
|
||||
"metadata": {
|
||||
"name": "value-mapping-test",
|
||||
"namespace": "default",
|
||||
"uid": "value-mapping-test",
|
||||
"resourceVersion": "1765384157199094",
|
||||
"generation": 2,
|
||||
"creationTimestamp": "2025-11-19T20:09:28Z",
|
||||
"labels": {
|
||||
"grafana.app/deprecatedInternalID": "646372978987008"
|
||||
},
|
||||
"annotations": {},
|
||||
"managedFields": []
|
||||
},
|
||||
"spec": {
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "Test dashboard for all value mapping types and override matcher types",
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"description": "Panel with ValueMap mapping type - maps specific text values to colors and display text",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"critical": {
|
||||
"color": "red",
|
||||
"index": 0,
|
||||
"text": "Critical!"
|
||||
},
|
||||
"warning": {
|
||||
"color": "orange",
|
||||
"index": 1,
|
||||
"text": "Warning"
|
||||
},
|
||||
"ok": {
|
||||
"color": "green",
|
||||
"index": 2,
|
||||
"text": "OK"
|
||||
}
|
||||
},
|
||||
"type": "value"
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "status"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 100
|
||||
},
|
||||
{
|
||||
"id": "custom.align",
|
||||
"value": "center"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 1,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "up",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "ValueMap Example",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"description": "Panel with RangeMap mapping type - maps numerical ranges to colors and display text",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"from": 0,
|
||||
"to": 50,
|
||||
"result": {
|
||||
"color": "green",
|
||||
"index": 0,
|
||||
"text": "Low"
|
||||
}
|
||||
},
|
||||
"type": "range"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"from": 50,
|
||||
"to": 80,
|
||||
"result": {
|
||||
"color": "orange",
|
||||
"index": 1,
|
||||
"text": "Medium"
|
||||
}
|
||||
},
|
||||
"type": "range"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"from": 80,
|
||||
"to": 100,
|
||||
"result": {
|
||||
"color": "red",
|
||||
"index": 2,
|
||||
"text": "High"
|
||||
}
|
||||
},
|
||||
"type": "range"
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byRegexp",
|
||||
"options": "/^cpu_/"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "percent"
|
||||
},
|
||||
{
|
||||
"id": "decimals",
|
||||
"value": 2
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "cpu_usage_percent",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "RangeMap Example",
|
||||
"type": "gauge"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"description": "Panel with RegexMap mapping type - maps values matching regex patterns to colors",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"pattern": "/^error.*/",
|
||||
"result": {
|
||||
"color": "red",
|
||||
"index": 0,
|
||||
"text": "Error"
|
||||
}
|
||||
},
|
||||
"type": "regex"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"pattern": "/^warn.*/",
|
||||
"result": {
|
||||
"color": "orange",
|
||||
"index": 1,
|
||||
"text": "Warning"
|
||||
}
|
||||
},
|
||||
"type": "regex"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"pattern": "/^info.*/",
|
||||
"result": {
|
||||
"color": "blue",
|
||||
"index": 2,
|
||||
"text": "Info"
|
||||
}
|
||||
},
|
||||
"type": "regex"
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byType",
|
||||
"options": "string"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.cellOptions",
|
||||
"value": {
|
||||
"type": "color-text"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"id": 3,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "log_level",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "RegexMap Example",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"description": "Panel with SpecialValueMap mapping type - maps special values like null, NaN, true, false to display text",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"color": "gray",
|
||||
"index": 0,
|
||||
"text": "No Data"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "nan",
|
||||
"result": {
|
||||
"color": "gray",
|
||||
"index": 1,
|
||||
"text": "Not a Number"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "null+nan",
|
||||
"result": {
|
||||
"color": "gray",
|
||||
"index": 2,
|
||||
"text": "N/A"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "true",
|
||||
"result": {
|
||||
"color": "green",
|
||||
"index": 3,
|
||||
"text": "Yes"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "false",
|
||||
"result": {
|
||||
"color": "red",
|
||||
"index": 4,
|
||||
"text": "No"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "empty",
|
||||
"result": {
|
||||
"color": "gray",
|
||||
"index": 5,
|
||||
"text": "Empty"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byFrameRefID",
|
||||
"options": "A"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "color",
|
||||
"value": {
|
||||
"mode": "fixed",
|
||||
"fixedColor": "blue"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"id": 4,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "some_metric",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "SpecialValueMap Example",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"description": "Panel with all mapping types combined - demonstrates mixing different mapping types and multiple override matchers",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"success": {
|
||||
"color": "green",
|
||||
"index": 0,
|
||||
"text": "Success"
|
||||
},
|
||||
"failure": {
|
||||
"color": "red",
|
||||
"index": 1,
|
||||
"text": "Failure"
|
||||
}
|
||||
},
|
||||
"type": "value"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"from": 0,
|
||||
"to": 100,
|
||||
"result": {
|
||||
"color": "blue",
|
||||
"index": 2,
|
||||
"text": "In Range"
|
||||
}
|
||||
},
|
||||
"type": "range"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"pattern": "/^[A-Z]{3}-\\d+$/",
|
||||
"result": {
|
||||
"color": "purple",
|
||||
"index": 3,
|
||||
"text": "ID Format"
|
||||
}
|
||||
},
|
||||
"type": "regex"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"color": "gray",
|
||||
"index": 4,
|
||||
"text": "Missing"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "status"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 120
|
||||
},
|
||||
{
|
||||
"id": "custom.cellOptions",
|
||||
"value": {
|
||||
"type": "color-background"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byRegexp",
|
||||
"options": "/^value_/"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "short"
|
||||
},
|
||||
{
|
||||
"id": "min",
|
||||
"value": 0
|
||||
},
|
||||
{
|
||||
"id": "max",
|
||||
"value": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byType",
|
||||
"options": "number"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "decimals",
|
||||
"value": 2
|
||||
},
|
||||
{
|
||||
"id": "thresholds",
|
||||
"value": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "yellow",
|
||||
"value": 50
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byFrameRefID",
|
||||
"options": "B"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Secondary Query"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"reducer": "allIsNull",
|
||||
"op": "gte",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hidden",
|
||||
"value": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"id": 5,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "combined_metric",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "secondary_metric",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"title": "Combined Mappings and Overrides Example",
|
||||
"type": "table"
|
||||
}
|
||||
],
|
||||
"schemaVersion": 42,
|
||||
"tags": [
|
||||
"value-mapping",
|
||||
"overrides",
|
||||
"test"
|
||||
],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "browser",
|
||||
"title": "Value Mapping and Overrides Test",
|
||||
"weekStart": ""
|
||||
},
|
||||
"status": {
|
||||
"conversion": {
|
||||
"failed": false,
|
||||
"storedVersion": "v0alpha1"
|
||||
}
|
||||
},
|
||||
"access": {
|
||||
"slug": "value-mapping-test",
|
||||
"url": "/d/value-mapping-test/value-mapping-and-overrides-test",
|
||||
"canSave": true,
|
||||
"canEdit": true,
|
||||
"canAdmin": true,
|
||||
"canStar": true,
|
||||
"canDelete": true,
|
||||
"annotationsPermissions": {
|
||||
"dashboard": {
|
||||
"canAdd": true,
|
||||
"canEdit": true,
|
||||
"canDelete": true
|
||||
},
|
||||
"organization": {
|
||||
"canAdd": true,
|
||||
"canEdit": true,
|
||||
"canDelete": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,580 @@
|
||||
{
|
||||
"kind": "DashboardWithAccessInfo",
|
||||
"apiVersion": "dashboard.grafana.app/v0alpha1",
|
||||
"metadata": {
|
||||
"name": "value-mapping-test",
|
||||
"namespace": "default",
|
||||
"uid": "value-mapping-test",
|
||||
"resourceVersion": "1765384157199094",
|
||||
"generation": 2,
|
||||
"creationTimestamp": "2025-11-19T20:09:28Z",
|
||||
"labels": {
|
||||
"grafana.app/deprecatedInternalID": "646372978987008"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations \u0026 Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "Test dashboard for all value mapping types and override matcher types",
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"description": "Panel with ValueMap mapping type - maps specific text values to colors and display text",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"critical": {
|
||||
"color": "red",
|
||||
"index": 0,
|
||||
"text": "Critical!"
|
||||
},
|
||||
"ok": {
|
||||
"color": "green",
|
||||
"index": 2,
|
||||
"text": "OK"
|
||||
},
|
||||
"warning": {
|
||||
"color": "orange",
|
||||
"index": 1,
|
||||
"text": "Warning"
|
||||
}
|
||||
},
|
||||
"type": "value"
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "status"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 100
|
||||
},
|
||||
{
|
||||
"id": "custom.align",
|
||||
"value": "center"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 1,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "up",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "ValueMap Example",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"description": "Panel with RangeMap mapping type - maps numerical ranges to colors and display text",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"from": 0,
|
||||
"result": {
|
||||
"color": "green",
|
||||
"index": 0,
|
||||
"text": "Low"
|
||||
},
|
||||
"to": 50
|
||||
},
|
||||
"type": "range"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"from": 50,
|
||||
"result": {
|
||||
"color": "orange",
|
||||
"index": 1,
|
||||
"text": "Medium"
|
||||
},
|
||||
"to": 80
|
||||
},
|
||||
"type": "range"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"from": 80,
|
||||
"result": {
|
||||
"color": "red",
|
||||
"index": 2,
|
||||
"text": "High"
|
||||
},
|
||||
"to": 100
|
||||
},
|
||||
"type": "range"
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byRegexp",
|
||||
"options": "/^cpu_/"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "percent"
|
||||
},
|
||||
{
|
||||
"id": "decimals",
|
||||
"value": 2
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "cpu_usage_percent",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "RangeMap Example",
|
||||
"type": "gauge"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"description": "Panel with RegexMap mapping type - maps values matching regex patterns to colors",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"pattern": "/^error.*/",
|
||||
"result": {
|
||||
"color": "red",
|
||||
"index": 0,
|
||||
"text": "Error"
|
||||
}
|
||||
},
|
||||
"type": "regex"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"pattern": "/^warn.*/",
|
||||
"result": {
|
||||
"color": "orange",
|
||||
"index": 1,
|
||||
"text": "Warning"
|
||||
}
|
||||
},
|
||||
"type": "regex"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"pattern": "/^info.*/",
|
||||
"result": {
|
||||
"color": "blue",
|
||||
"index": 2,
|
||||
"text": "Info"
|
||||
}
|
||||
},
|
||||
"type": "regex"
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byType",
|
||||
"options": "string"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.cellOptions",
|
||||
"value": {
|
||||
"type": "color-text"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"id": 3,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "log_level",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "RegexMap Example",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"description": "Panel with SpecialValueMap mapping type - maps special values like null, NaN, true, false to display text",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"color": "gray",
|
||||
"index": 0,
|
||||
"text": "No Data"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "nan",
|
||||
"result": {
|
||||
"color": "gray",
|
||||
"index": 1,
|
||||
"text": "Not a Number"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "null+nan",
|
||||
"result": {
|
||||
"color": "gray",
|
||||
"index": 2,
|
||||
"text": "N/A"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "true",
|
||||
"result": {
|
||||
"color": "green",
|
||||
"index": 3,
|
||||
"text": "Yes"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "false",
|
||||
"result": {
|
||||
"color": "red",
|
||||
"index": 4,
|
||||
"text": "No"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "empty",
|
||||
"result": {
|
||||
"color": "gray",
|
||||
"index": 5,
|
||||
"text": "Empty"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byFrameRefID",
|
||||
"options": "A"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "color",
|
||||
"value": {
|
||||
"fixedColor": "blue",
|
||||
"mode": "fixed"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"id": 4,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "some_metric",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "SpecialValueMap Example",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"description": "Panel with all mapping types combined - demonstrates mixing different mapping types and multiple override matchers",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"failure": {
|
||||
"color": "red",
|
||||
"index": 1,
|
||||
"text": "Failure"
|
||||
},
|
||||
"success": {
|
||||
"color": "green",
|
||||
"index": 0,
|
||||
"text": "Success"
|
||||
}
|
||||
},
|
||||
"type": "value"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"from": 0,
|
||||
"result": {
|
||||
"color": "blue",
|
||||
"index": 2,
|
||||
"text": "In Range"
|
||||
},
|
||||
"to": 100
|
||||
},
|
||||
"type": "range"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"pattern": "/^[A-Z]{3}-\\d+$/",
|
||||
"result": {
|
||||
"color": "purple",
|
||||
"index": 3,
|
||||
"text": "ID Format"
|
||||
}
|
||||
},
|
||||
"type": "regex"
|
||||
},
|
||||
{
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"color": "gray",
|
||||
"index": 4,
|
||||
"text": "Missing"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "status"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 120
|
||||
},
|
||||
{
|
||||
"id": "custom.cellOptions",
|
||||
"value": {
|
||||
"type": "color-background"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byRegexp",
|
||||
"options": "/^value_/"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "short"
|
||||
},
|
||||
{
|
||||
"id": "min",
|
||||
"value": 0
|
||||
},
|
||||
{
|
||||
"id": "max",
|
||||
"value": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byType",
|
||||
"options": "number"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "decimals",
|
||||
"value": 2
|
||||
},
|
||||
{
|
||||
"id": "thresholds",
|
||||
"value": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "yellow",
|
||||
"value": 50
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byFrameRefID",
|
||||
"options": "B"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Secondary Query"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsNull",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hidden",
|
||||
"value": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"id": 5,
|
||||
"targets": [
|
||||
{
|
||||
"expr": "combined_metric",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "secondary_metric",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"title": "Combined Mappings and Overrides Example",
|
||||
"type": "table"
|
||||
}
|
||||
],
|
||||
"schemaVersion": 42,
|
||||
"tags": [
|
||||
"value-mapping",
|
||||
"overrides",
|
||||
"test"
|
||||
],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "browser",
|
||||
"title": "Value Mapping and Overrides Test",
|
||||
"weekStart": ""
|
||||
},
|
||||
"status": {
|
||||
"conversion": {
|
||||
"failed": false,
|
||||
"storedVersion": "v1beta1"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,783 @@
|
||||
{
|
||||
"kind": "DashboardWithAccessInfo",
|
||||
"apiVersion": "dashboard.grafana.app/v2alpha1",
|
||||
"metadata": {
|
||||
"name": "value-mapping-test",
|
||||
"namespace": "default",
|
||||
"uid": "value-mapping-test",
|
||||
"resourceVersion": "1765384157199094",
|
||||
"generation": 2,
|
||||
"creationTimestamp": "2025-11-19T20:09:28Z",
|
||||
"labels": {
|
||||
"grafana.app/deprecatedInternalID": "646372978987008"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"annotations": [
|
||||
{
|
||||
"kind": "AnnotationQuery",
|
||||
"spec": {
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"query": {
|
||||
"kind": "grafana",
|
||||
"spec": {}
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations \u0026 Alerts",
|
||||
"builtIn": true,
|
||||
"legacyOptions": {
|
||||
"type": "dashboard"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"cursorSync": "Off",
|
||||
"description": "Test dashboard for all value mapping types and override matcher types",
|
||||
"editable": true,
|
||||
"elements": {
|
||||
"panel-1": {
|
||||
"kind": "Panel",
|
||||
"spec": {
|
||||
"id": 1,
|
||||
"title": "ValueMap Example",
|
||||
"description": "Panel with ValueMap mapping type - maps specific text values to colors and display text",
|
||||
"links": [],
|
||||
"data": {
|
||||
"kind": "QueryGroup",
|
||||
"spec": {
|
||||
"queries": [
|
||||
{
|
||||
"kind": "PanelQuery",
|
||||
"spec": {
|
||||
"query": {
|
||||
"kind": "prometheus",
|
||||
"spec": {
|
||||
"expr": "up"
|
||||
}
|
||||
},
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"refId": "A",
|
||||
"hidden": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"transformations": [],
|
||||
"queryOptions": {}
|
||||
}
|
||||
},
|
||||
"vizConfig": {
|
||||
"kind": "stat",
|
||||
"spec": {
|
||||
"pluginVersion": "",
|
||||
"options": {},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"type": "value",
|
||||
"options": {
|
||||
"critical": {
|
||||
"text": "Critical!",
|
||||
"color": "red",
|
||||
"index": 0
|
||||
},
|
||||
"ok": {
|
||||
"text": "OK",
|
||||
"color": "green",
|
||||
"index": 2
|
||||
},
|
||||
"warning": {
|
||||
"text": "Warning",
|
||||
"color": "orange",
|
||||
"index": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "status"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 100
|
||||
},
|
||||
{
|
||||
"id": "custom.align",
|
||||
"value": "center"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"panel-2": {
|
||||
"kind": "Panel",
|
||||
"spec": {
|
||||
"id": 2,
|
||||
"title": "RangeMap Example",
|
||||
"description": "Panel with RangeMap mapping type - maps numerical ranges to colors and display text",
|
||||
"links": [],
|
||||
"data": {
|
||||
"kind": "QueryGroup",
|
||||
"spec": {
|
||||
"queries": [
|
||||
{
|
||||
"kind": "PanelQuery",
|
||||
"spec": {
|
||||
"query": {
|
||||
"kind": "prometheus",
|
||||
"spec": {
|
||||
"expr": "cpu_usage_percent"
|
||||
}
|
||||
},
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"refId": "A",
|
||||
"hidden": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"transformations": [],
|
||||
"queryOptions": {}
|
||||
}
|
||||
},
|
||||
"vizConfig": {
|
||||
"kind": "gauge",
|
||||
"spec": {
|
||||
"pluginVersion": "",
|
||||
"options": {},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"type": "range",
|
||||
"options": {
|
||||
"from": 0,
|
||||
"to": 50,
|
||||
"result": {
|
||||
"text": "Low",
|
||||
"color": "green",
|
||||
"index": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "range",
|
||||
"options": {
|
||||
"from": 50,
|
||||
"to": 80,
|
||||
"result": {
|
||||
"text": "Medium",
|
||||
"color": "orange",
|
||||
"index": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "range",
|
||||
"options": {
|
||||
"from": 80,
|
||||
"to": 100,
|
||||
"result": {
|
||||
"text": "High",
|
||||
"color": "red",
|
||||
"index": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byRegexp",
|
||||
"options": "/^cpu_/"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "percent"
|
||||
},
|
||||
{
|
||||
"id": "decimals",
|
||||
"value": 2
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"panel-3": {
|
||||
"kind": "Panel",
|
||||
"spec": {
|
||||
"id": 3,
|
||||
"title": "RegexMap Example",
|
||||
"description": "Panel with RegexMap mapping type - maps values matching regex patterns to colors",
|
||||
"links": [],
|
||||
"data": {
|
||||
"kind": "QueryGroup",
|
||||
"spec": {
|
||||
"queries": [
|
||||
{
|
||||
"kind": "PanelQuery",
|
||||
"spec": {
|
||||
"query": {
|
||||
"kind": "prometheus",
|
||||
"spec": {
|
||||
"expr": "log_level"
|
||||
}
|
||||
},
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"refId": "A",
|
||||
"hidden": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"transformations": [],
|
||||
"queryOptions": {}
|
||||
}
|
||||
},
|
||||
"vizConfig": {
|
||||
"kind": "stat",
|
||||
"spec": {
|
||||
"pluginVersion": "",
|
||||
"options": {},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"type": "regex",
|
||||
"options": {
|
||||
"pattern": "/^error.*/",
|
||||
"result": {
|
||||
"text": "Error",
|
||||
"color": "red",
|
||||
"index": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "regex",
|
||||
"options": {
|
||||
"pattern": "/^warn.*/",
|
||||
"result": {
|
||||
"text": "Warning",
|
||||
"color": "orange",
|
||||
"index": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "regex",
|
||||
"options": {
|
||||
"pattern": "/^info.*/",
|
||||
"result": {
|
||||
"text": "Info",
|
||||
"color": "blue",
|
||||
"index": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byType",
|
||||
"options": "string"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.cellOptions",
|
||||
"value": {
|
||||
"type": "color-text"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"panel-4": {
|
||||
"kind": "Panel",
|
||||
"spec": {
|
||||
"id": 4,
|
||||
"title": "SpecialValueMap Example",
|
||||
"description": "Panel with SpecialValueMap mapping type - maps special values like null, NaN, true, false to display text",
|
||||
"links": [],
|
||||
"data": {
|
||||
"kind": "QueryGroup",
|
||||
"spec": {
|
||||
"queries": [
|
||||
{
|
||||
"kind": "PanelQuery",
|
||||
"spec": {
|
||||
"query": {
|
||||
"kind": "prometheus",
|
||||
"spec": {
|
||||
"expr": "some_metric"
|
||||
}
|
||||
},
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"refId": "A",
|
||||
"hidden": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"transformations": [],
|
||||
"queryOptions": {}
|
||||
}
|
||||
},
|
||||
"vizConfig": {
|
||||
"kind": "stat",
|
||||
"spec": {
|
||||
"pluginVersion": "",
|
||||
"options": {},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"text": "No Data",
|
||||
"color": "gray",
|
||||
"index": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "nan",
|
||||
"result": {
|
||||
"text": "Not a Number",
|
||||
"color": "gray",
|
||||
"index": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "null+nan",
|
||||
"result": {
|
||||
"text": "N/A",
|
||||
"color": "gray",
|
||||
"index": 2
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "true",
|
||||
"result": {
|
||||
"text": "Yes",
|
||||
"color": "green",
|
||||
"index": 3
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "false",
|
||||
"result": {
|
||||
"text": "No",
|
||||
"color": "red",
|
||||
"index": 4
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "empty",
|
||||
"result": {
|
||||
"text": "Empty",
|
||||
"color": "gray",
|
||||
"index": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byFrameRefID",
|
||||
"options": "A"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "color",
|
||||
"value": {
|
||||
"fixedColor": "blue",
|
||||
"mode": "fixed"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"panel-5": {
|
||||
"kind": "Panel",
|
||||
"spec": {
|
||||
"id": 5,
|
||||
"title": "Combined Mappings and Overrides Example",
|
||||
"description": "Panel with all mapping types combined - demonstrates mixing different mapping types and multiple override matchers",
|
||||
"links": [],
|
||||
"data": {
|
||||
"kind": "QueryGroup",
|
||||
"spec": {
|
||||
"queries": [
|
||||
{
|
||||
"kind": "PanelQuery",
|
||||
"spec": {
|
||||
"query": {
|
||||
"kind": "prometheus",
|
||||
"spec": {
|
||||
"expr": "combined_metric"
|
||||
}
|
||||
},
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"refId": "A",
|
||||
"hidden": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "PanelQuery",
|
||||
"spec": {
|
||||
"query": {
|
||||
"kind": "prometheus",
|
||||
"spec": {
|
||||
"expr": "secondary_metric"
|
||||
}
|
||||
},
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus-uid"
|
||||
},
|
||||
"refId": "B",
|
||||
"hidden": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"transformations": [],
|
||||
"queryOptions": {}
|
||||
}
|
||||
},
|
||||
"vizConfig": {
|
||||
"kind": "table",
|
||||
"spec": {
|
||||
"pluginVersion": "",
|
||||
"options": {},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"type": "value",
|
||||
"options": {
|
||||
"failure": {
|
||||
"text": "Failure",
|
||||
"color": "red",
|
||||
"index": 1
|
||||
},
|
||||
"success": {
|
||||
"text": "Success",
|
||||
"color": "green",
|
||||
"index": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "range",
|
||||
"options": {
|
||||
"from": 0,
|
||||
"to": 100,
|
||||
"result": {
|
||||
"text": "In Range",
|
||||
"color": "blue",
|
||||
"index": 2
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "regex",
|
||||
"options": {
|
||||
"pattern": "/^[A-Z]{3}-\\d+$/",
|
||||
"result": {
|
||||
"text": "ID Format",
|
||||
"color": "purple",
|
||||
"index": 3
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"text": "Missing",
|
||||
"color": "gray",
|
||||
"index": 4
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "status"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 120
|
||||
},
|
||||
{
|
||||
"id": "custom.cellOptions",
|
||||
"value": {
|
||||
"type": "color-background"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byRegexp",
|
||||
"options": "/^value_/"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "short"
|
||||
},
|
||||
{
|
||||
"id": "min",
|
||||
"value": 0
|
||||
},
|
||||
{
|
||||
"id": "max",
|
||||
"value": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byType",
|
||||
"options": "number"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "decimals",
|
||||
"value": 2
|
||||
},
|
||||
{
|
||||
"id": "thresholds",
|
||||
"value": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "yellow",
|
||||
"value": 50
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byFrameRefID",
|
||||
"options": "B"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Secondary Query"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsNull",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hidden",
|
||||
"value": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"layout": {
|
||||
"kind": "GridLayout",
|
||||
"spec": {
|
||||
"items": [
|
||||
{
|
||||
"kind": "GridLayoutItem",
|
||||
"spec": {
|
||||
"x": 0,
|
||||
"y": 0,
|
||||
"width": 12,
|
||||
"height": 8,
|
||||
"element": {
|
||||
"kind": "ElementReference",
|
||||
"name": "panel-1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "GridLayoutItem",
|
||||
"spec": {
|
||||
"x": 12,
|
||||
"y": 0,
|
||||
"width": 12,
|
||||
"height": 8,
|
||||
"element": {
|
||||
"kind": "ElementReference",
|
||||
"name": "panel-2"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "GridLayoutItem",
|
||||
"spec": {
|
||||
"x": 0,
|
||||
"y": 8,
|
||||
"width": 12,
|
||||
"height": 8,
|
||||
"element": {
|
||||
"kind": "ElementReference",
|
||||
"name": "panel-3"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "GridLayoutItem",
|
||||
"spec": {
|
||||
"x": 12,
|
||||
"y": 8,
|
||||
"width": 12,
|
||||
"height": 8,
|
||||
"element": {
|
||||
"kind": "ElementReference",
|
||||
"name": "panel-4"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "GridLayoutItem",
|
||||
"spec": {
|
||||
"x": 0,
|
||||
"y": 16,
|
||||
"width": 24,
|
||||
"height": 8,
|
||||
"element": {
|
||||
"kind": "ElementReference",
|
||||
"name": "panel-5"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"preload": false,
|
||||
"tags": [
|
||||
"value-mapping",
|
||||
"overrides",
|
||||
"test"
|
||||
],
|
||||
"timeSettings": {
|
||||
"timezone": "browser",
|
||||
"from": "now-6h",
|
||||
"to": "now",
|
||||
"autoRefresh": "",
|
||||
"autoRefreshIntervals": [
|
||||
"5s",
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
],
|
||||
"hideTimepicker": false,
|
||||
"fiscalYearStartMonth": 0
|
||||
},
|
||||
"title": "Value Mapping and Overrides Test",
|
||||
"variables": []
|
||||
},
|
||||
"status": {
|
||||
"conversion": {
|
||||
"failed": false,
|
||||
"storedVersion": "v1beta1"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,795 @@
|
||||
{
|
||||
"kind": "DashboardWithAccessInfo",
|
||||
"apiVersion": "dashboard.grafana.app/v2beta1",
|
||||
"metadata": {
|
||||
"name": "value-mapping-test",
|
||||
"namespace": "default",
|
||||
"uid": "value-mapping-test",
|
||||
"resourceVersion": "1765384157199094",
|
||||
"generation": 2,
|
||||
"creationTimestamp": "2025-11-19T20:09:28Z",
|
||||
"labels": {
|
||||
"grafana.app/deprecatedInternalID": "646372978987008"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"annotations": [
|
||||
{
|
||||
"kind": "AnnotationQuery",
|
||||
"spec": {
|
||||
"query": {
|
||||
"kind": "DataQuery",
|
||||
"group": "grafana",
|
||||
"version": "v0",
|
||||
"datasource": {
|
||||
"name": "-- Grafana --"
|
||||
},
|
||||
"spec": {}
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations \u0026 Alerts",
|
||||
"builtIn": true,
|
||||
"legacyOptions": {
|
||||
"type": "dashboard"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"cursorSync": "Off",
|
||||
"description": "Test dashboard for all value mapping types and override matcher types",
|
||||
"editable": true,
|
||||
"elements": {
|
||||
"panel-1": {
|
||||
"kind": "Panel",
|
||||
"spec": {
|
||||
"id": 1,
|
||||
"title": "ValueMap Example",
|
||||
"description": "Panel with ValueMap mapping type - maps specific text values to colors and display text",
|
||||
"links": [],
|
||||
"data": {
|
||||
"kind": "QueryGroup",
|
||||
"spec": {
|
||||
"queries": [
|
||||
{
|
||||
"kind": "PanelQuery",
|
||||
"spec": {
|
||||
"query": {
|
||||
"kind": "DataQuery",
|
||||
"group": "prometheus",
|
||||
"version": "v0",
|
||||
"datasource": {
|
||||
"name": "prometheus-uid"
|
||||
},
|
||||
"spec": {
|
||||
"expr": "up"
|
||||
}
|
||||
},
|
||||
"refId": "A",
|
||||
"hidden": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"transformations": [],
|
||||
"queryOptions": {}
|
||||
}
|
||||
},
|
||||
"vizConfig": {
|
||||
"kind": "VizConfig",
|
||||
"group": "stat",
|
||||
"version": "",
|
||||
"spec": {
|
||||
"options": {},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"type": "value",
|
||||
"options": {
|
||||
"critical": {
|
||||
"text": "Critical!",
|
||||
"color": "red",
|
||||
"index": 0
|
||||
},
|
||||
"ok": {
|
||||
"text": "OK",
|
||||
"color": "green",
|
||||
"index": 2
|
||||
},
|
||||
"warning": {
|
||||
"text": "Warning",
|
||||
"color": "orange",
|
||||
"index": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "status"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 100
|
||||
},
|
||||
{
|
||||
"id": "custom.align",
|
||||
"value": "center"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"panel-2": {
|
||||
"kind": "Panel",
|
||||
"spec": {
|
||||
"id": 2,
|
||||
"title": "RangeMap Example",
|
||||
"description": "Panel with RangeMap mapping type - maps numerical ranges to colors and display text",
|
||||
"links": [],
|
||||
"data": {
|
||||
"kind": "QueryGroup",
|
||||
"spec": {
|
||||
"queries": [
|
||||
{
|
||||
"kind": "PanelQuery",
|
||||
"spec": {
|
||||
"query": {
|
||||
"kind": "DataQuery",
|
||||
"group": "prometheus",
|
||||
"version": "v0",
|
||||
"datasource": {
|
||||
"name": "prometheus-uid"
|
||||
},
|
||||
"spec": {
|
||||
"expr": "cpu_usage_percent"
|
||||
}
|
||||
},
|
||||
"refId": "A",
|
||||
"hidden": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"transformations": [],
|
||||
"queryOptions": {}
|
||||
}
|
||||
},
|
||||
"vizConfig": {
|
||||
"kind": "VizConfig",
|
||||
"group": "gauge",
|
||||
"version": "",
|
||||
"spec": {
|
||||
"options": {},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"type": "range",
|
||||
"options": {
|
||||
"from": 0,
|
||||
"to": 50,
|
||||
"result": {
|
||||
"text": "Low",
|
||||
"color": "green",
|
||||
"index": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "range",
|
||||
"options": {
|
||||
"from": 50,
|
||||
"to": 80,
|
||||
"result": {
|
||||
"text": "Medium",
|
||||
"color": "orange",
|
||||
"index": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "range",
|
||||
"options": {
|
||||
"from": 80,
|
||||
"to": 100,
|
||||
"result": {
|
||||
"text": "High",
|
||||
"color": "red",
|
||||
"index": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byRegexp",
|
||||
"options": "/^cpu_/"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "percent"
|
||||
},
|
||||
{
|
||||
"id": "decimals",
|
||||
"value": 2
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"panel-3": {
|
||||
"kind": "Panel",
|
||||
"spec": {
|
||||
"id": 3,
|
||||
"title": "RegexMap Example",
|
||||
"description": "Panel with RegexMap mapping type - maps values matching regex patterns to colors",
|
||||
"links": [],
|
||||
"data": {
|
||||
"kind": "QueryGroup",
|
||||
"spec": {
|
||||
"queries": [
|
||||
{
|
||||
"kind": "PanelQuery",
|
||||
"spec": {
|
||||
"query": {
|
||||
"kind": "DataQuery",
|
||||
"group": "prometheus",
|
||||
"version": "v0",
|
||||
"datasource": {
|
||||
"name": "prometheus-uid"
|
||||
},
|
||||
"spec": {
|
||||
"expr": "log_level"
|
||||
}
|
||||
},
|
||||
"refId": "A",
|
||||
"hidden": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"transformations": [],
|
||||
"queryOptions": {}
|
||||
}
|
||||
},
|
||||
"vizConfig": {
|
||||
"kind": "VizConfig",
|
||||
"group": "stat",
|
||||
"version": "",
|
||||
"spec": {
|
||||
"options": {},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"type": "regex",
|
||||
"options": {
|
||||
"pattern": "/^error.*/",
|
||||
"result": {
|
||||
"text": "Error",
|
||||
"color": "red",
|
||||
"index": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "regex",
|
||||
"options": {
|
||||
"pattern": "/^warn.*/",
|
||||
"result": {
|
||||
"text": "Warning",
|
||||
"color": "orange",
|
||||
"index": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "regex",
|
||||
"options": {
|
||||
"pattern": "/^info.*/",
|
||||
"result": {
|
||||
"text": "Info",
|
||||
"color": "blue",
|
||||
"index": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byType",
|
||||
"options": "string"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.cellOptions",
|
||||
"value": {
|
||||
"type": "color-text"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"panel-4": {
|
||||
"kind": "Panel",
|
||||
"spec": {
|
||||
"id": 4,
|
||||
"title": "SpecialValueMap Example",
|
||||
"description": "Panel with SpecialValueMap mapping type - maps special values like null, NaN, true, false to display text",
|
||||
"links": [],
|
||||
"data": {
|
||||
"kind": "QueryGroup",
|
||||
"spec": {
|
||||
"queries": [
|
||||
{
|
||||
"kind": "PanelQuery",
|
||||
"spec": {
|
||||
"query": {
|
||||
"kind": "DataQuery",
|
||||
"group": "prometheus",
|
||||
"version": "v0",
|
||||
"datasource": {
|
||||
"name": "prometheus-uid"
|
||||
},
|
||||
"spec": {
|
||||
"expr": "some_metric"
|
||||
}
|
||||
},
|
||||
"refId": "A",
|
||||
"hidden": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"transformations": [],
|
||||
"queryOptions": {}
|
||||
}
|
||||
},
|
||||
"vizConfig": {
|
||||
"kind": "VizConfig",
|
||||
"group": "stat",
|
||||
"version": "",
|
||||
"spec": {
|
||||
"options": {},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"text": "No Data",
|
||||
"color": "gray",
|
||||
"index": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "nan",
|
||||
"result": {
|
||||
"text": "Not a Number",
|
||||
"color": "gray",
|
||||
"index": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "null+nan",
|
||||
"result": {
|
||||
"text": "N/A",
|
||||
"color": "gray",
|
||||
"index": 2
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "true",
|
||||
"result": {
|
||||
"text": "Yes",
|
||||
"color": "green",
|
||||
"index": 3
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "false",
|
||||
"result": {
|
||||
"text": "No",
|
||||
"color": "red",
|
||||
"index": 4
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "empty",
|
||||
"result": {
|
||||
"text": "Empty",
|
||||
"color": "gray",
|
||||
"index": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byFrameRefID",
|
||||
"options": "A"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "color",
|
||||
"value": {
|
||||
"fixedColor": "blue",
|
||||
"mode": "fixed"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"panel-5": {
|
||||
"kind": "Panel",
|
||||
"spec": {
|
||||
"id": 5,
|
||||
"title": "Combined Mappings and Overrides Example",
|
||||
"description": "Panel with all mapping types combined - demonstrates mixing different mapping types and multiple override matchers",
|
||||
"links": [],
|
||||
"data": {
|
||||
"kind": "QueryGroup",
|
||||
"spec": {
|
||||
"queries": [
|
||||
{
|
||||
"kind": "PanelQuery",
|
||||
"spec": {
|
||||
"query": {
|
||||
"kind": "DataQuery",
|
||||
"group": "prometheus",
|
||||
"version": "v0",
|
||||
"datasource": {
|
||||
"name": "prometheus-uid"
|
||||
},
|
||||
"spec": {
|
||||
"expr": "combined_metric"
|
||||
}
|
||||
},
|
||||
"refId": "A",
|
||||
"hidden": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "PanelQuery",
|
||||
"spec": {
|
||||
"query": {
|
||||
"kind": "DataQuery",
|
||||
"group": "prometheus",
|
||||
"version": "v0",
|
||||
"datasource": {
|
||||
"name": "prometheus-uid"
|
||||
},
|
||||
"spec": {
|
||||
"expr": "secondary_metric"
|
||||
}
|
||||
},
|
||||
"refId": "B",
|
||||
"hidden": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"transformations": [],
|
||||
"queryOptions": {}
|
||||
}
|
||||
},
|
||||
"vizConfig": {
|
||||
"kind": "VizConfig",
|
||||
"group": "table",
|
||||
"version": "",
|
||||
"spec": {
|
||||
"options": {},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [
|
||||
{
|
||||
"type": "value",
|
||||
"options": {
|
||||
"failure": {
|
||||
"text": "Failure",
|
||||
"color": "red",
|
||||
"index": 1
|
||||
},
|
||||
"success": {
|
||||
"text": "Success",
|
||||
"color": "green",
|
||||
"index": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "range",
|
||||
"options": {
|
||||
"from": 0,
|
||||
"to": 100,
|
||||
"result": {
|
||||
"text": "In Range",
|
||||
"color": "blue",
|
||||
"index": 2
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "regex",
|
||||
"options": {
|
||||
"pattern": "/^[A-Z]{3}-\\d+$/",
|
||||
"result": {
|
||||
"text": "ID Format",
|
||||
"color": "purple",
|
||||
"index": 3
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "special",
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"text": "Missing",
|
||||
"color": "gray",
|
||||
"index": 4
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "status"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 120
|
||||
},
|
||||
{
|
||||
"id": "custom.cellOptions",
|
||||
"value": {
|
||||
"type": "color-background"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byRegexp",
|
||||
"options": "/^value_/"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "unit",
|
||||
"value": "short"
|
||||
},
|
||||
{
|
||||
"id": "min",
|
||||
"value": 0
|
||||
},
|
||||
{
|
||||
"id": "max",
|
||||
"value": 100
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byType",
|
||||
"options": "number"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "decimals",
|
||||
"value": 2
|
||||
},
|
||||
{
|
||||
"id": "thresholds",
|
||||
"value": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "yellow",
|
||||
"value": 50
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byFrameRefID",
|
||||
"options": "B"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "displayName",
|
||||
"value": "Secondary Query"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byValue",
|
||||
"options": {
|
||||
"op": "gte",
|
||||
"reducer": "allIsNull",
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hidden",
|
||||
"value": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"layout": {
|
||||
"kind": "GridLayout",
|
||||
"spec": {
|
||||
"items": [
|
||||
{
|
||||
"kind": "GridLayoutItem",
|
||||
"spec": {
|
||||
"x": 0,
|
||||
"y": 0,
|
||||
"width": 12,
|
||||
"height": 8,
|
||||
"element": {
|
||||
"kind": "ElementReference",
|
||||
"name": "panel-1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "GridLayoutItem",
|
||||
"spec": {
|
||||
"x": 12,
|
||||
"y": 0,
|
||||
"width": 12,
|
||||
"height": 8,
|
||||
"element": {
|
||||
"kind": "ElementReference",
|
||||
"name": "panel-2"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "GridLayoutItem",
|
||||
"spec": {
|
||||
"x": 0,
|
||||
"y": 8,
|
||||
"width": 12,
|
||||
"height": 8,
|
||||
"element": {
|
||||
"kind": "ElementReference",
|
||||
"name": "panel-3"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "GridLayoutItem",
|
||||
"spec": {
|
||||
"x": 12,
|
||||
"y": 8,
|
||||
"width": 12,
|
||||
"height": 8,
|
||||
"element": {
|
||||
"kind": "ElementReference",
|
||||
"name": "panel-4"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kind": "GridLayoutItem",
|
||||
"spec": {
|
||||
"x": 0,
|
||||
"y": 16,
|
||||
"width": 24,
|
||||
"height": 8,
|
||||
"element": {
|
||||
"kind": "ElementReference",
|
||||
"name": "panel-5"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"preload": false,
|
||||
"tags": [
|
||||
"value-mapping",
|
||||
"overrides",
|
||||
"test"
|
||||
],
|
||||
"timeSettings": {
|
||||
"timezone": "browser",
|
||||
"from": "now-6h",
|
||||
"to": "now",
|
||||
"autoRefresh": "",
|
||||
"autoRefreshIntervals": [
|
||||
"5s",
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
],
|
||||
"hideTimepicker": false,
|
||||
"fiscalYearStartMonth": 0
|
||||
},
|
||||
"title": "Value Mapping and Overrides Test",
|
||||
"variables": []
|
||||
},
|
||||
"status": {
|
||||
"conversion": {
|
||||
"failed": false,
|
||||
"storedVersion": "v1beta1"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2022,6 +2022,9 @@ func transformPanelQueries(ctx context.Context, panelMap map[string]interface{},
|
||||
|
||||
func transformSingleQuery(ctx context.Context, targetMap map[string]interface{}, panelDatasource *dashv2alpha1.DashboardDataSourceRef, dsIndexProvider schemaversion.DataSourceIndexProvider) dashv2alpha1.DashboardPanelQueryKind {
|
||||
refId := schemaversion.GetStringValue(targetMap, "refId", "A")
|
||||
if refId == "" {
|
||||
refId = "A"
|
||||
}
|
||||
hidden := getBoolField(targetMap, "hide", false)
|
||||
|
||||
// Extract datasource from query or use panel datasource
|
||||
@@ -2518,22 +2521,15 @@ func buildRegexMap(mappingMap map[string]interface{}) *dashv2alpha1.DashboardReg
|
||||
regexMap := &dashv2alpha1.DashboardRegexMap{}
|
||||
regexMap.Type = dashv2alpha1.DashboardMappingTypeRegex
|
||||
|
||||
opts, ok := mappingMap["options"].([]interface{})
|
||||
if !ok || len(opts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
optMap, ok := opts[0].(map[string]interface{})
|
||||
optMap, ok := mappingMap["options"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
r := dashv2alpha1.DashboardV2alpha1RegexMapOptions{}
|
||||
if pattern, ok := optMap["regex"].(string); ok {
|
||||
if pattern, ok := optMap["pattern"].(string); ok {
|
||||
r.Pattern = pattern
|
||||
}
|
||||
|
||||
// Result is a DashboardValueMappingResult
|
||||
if resMap, ok := optMap["result"].(map[string]interface{}); ok {
|
||||
r.Result = buildValueMappingResult(resMap)
|
||||
}
|
||||
|
||||
@@ -211,6 +211,12 @@ type ScopeNavigationSpec struct {
|
||||
Scope string `json:"scope"`
|
||||
// Used to navigate to a sub-scope of the main scope. URL will not be used if this is set.
|
||||
SubScope string `json:"subScope,omitempty"`
|
||||
// Preload the subscope children, as soon as the ScopeNavigation is loaded.
|
||||
PreLoadSubScopeChildren bool `json:"preLoadSubScopeChildren,omitempty"`
|
||||
// Expands to display the subscope children when the ScopeNavigation is loaded.
|
||||
ExpandOnLoad bool `json:"expandOnLoad,omitempty"`
|
||||
// Makes the subscope not selectable, only serving as a way to build the tree.
|
||||
DisableSubScopeSelection bool `json:"disableSubScopeSelection,omitempty"`
|
||||
}
|
||||
|
||||
// Type of the item.
|
||||
|
||||
@@ -642,6 +642,27 @@ func schema_pkg_apis_scope_v0alpha1_ScopeNavigationSpec(ref common.ReferenceCall
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"preLoadSubScopeChildren": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Preload the subscope children, as soon as the ScopeNavigation is loaded.",
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"expandOnLoad": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Expands to display the subscope children when the ScopeNavigation is loaded.",
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"disableSubScopeSelection": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Makes the subscope not selectable, only serving as a way to build the tree.",
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"url", "scope"},
|
||||
},
|
||||
|
||||
@@ -210,6 +210,7 @@ navigationTree:
|
||||
url: /d/UTv--wqMk
|
||||
scope: shoe-org
|
||||
subScope: apparel
|
||||
disableSubScopeSelection: true
|
||||
children:
|
||||
- name: apparel-product-overview
|
||||
title: Product Overview
|
||||
|
||||
@@ -77,22 +77,24 @@ type TreeNode struct {
|
||||
}
|
||||
|
||||
type NavigationConfig struct {
|
||||
URL string `yaml:"url"` // URL path (e.g., /d/abc123 or /explore)
|
||||
Scope string `yaml:"scope"` // Required scope
|
||||
SubScope string `yaml:"subScope"` // Optional subScope for hierarchical navigation
|
||||
Title string `yaml:"title"` // Display title
|
||||
Groups []string `yaml:"groups"` // Optional groups for categorization
|
||||
URL string `yaml:"url"` // URL path (e.g., /d/abc123 or /explore)
|
||||
Scope string `yaml:"scope"` // Required scope
|
||||
SubScope string `yaml:"subScope"` // Optional subScope for hierarchical navigation
|
||||
Title string `yaml:"title"` // Display title
|
||||
Groups []string `yaml:"groups"` // Optional groups for categorization
|
||||
DisableSubScopeSelection bool `yaml:"disableSubScopeSelection"` // Makes the subscope not selectable
|
||||
}
|
||||
|
||||
// NavigationTreeNode represents a node in the navigation tree structure
|
||||
type NavigationTreeNode struct {
|
||||
Name string `yaml:"name"`
|
||||
Title string `yaml:"title"`
|
||||
URL string `yaml:"url"`
|
||||
Scope string `yaml:"scope"`
|
||||
SubScope string `yaml:"subScope,omitempty"`
|
||||
Groups []string `yaml:"groups,omitempty"`
|
||||
Children []NavigationTreeNode `yaml:"children,omitempty"`
|
||||
Name string `yaml:"name"`
|
||||
Title string `yaml:"title"`
|
||||
URL string `yaml:"url"`
|
||||
Scope string `yaml:"scope"`
|
||||
SubScope string `yaml:"subScope,omitempty"`
|
||||
Groups []string `yaml:"groups,omitempty"`
|
||||
DisableSubScopeSelection bool `yaml:"disableSubScopeSelection,omitempty"`
|
||||
Children []NavigationTreeNode `yaml:"children,omitempty"`
|
||||
}
|
||||
|
||||
// Helper function to convert ScopeFilterConfig to v0alpha1.ScopeFilter
|
||||
@@ -313,8 +315,9 @@ func (c *Client) createScopeNavigation(name string, nav NavigationConfig) error
|
||||
prefixedScope := prefix + "-" + nav.Scope
|
||||
|
||||
spec := v0alpha1.ScopeNavigationSpec{
|
||||
URL: nav.URL,
|
||||
Scope: prefixedScope,
|
||||
URL: nav.URL,
|
||||
Scope: prefixedScope,
|
||||
DisableSubScopeSelection: nav.DisableSubScopeSelection,
|
||||
}
|
||||
|
||||
if nav.SubScope != "" {
|
||||
@@ -404,9 +407,10 @@ func treeToNavigations(node NavigationTreeNode, parentPath []string, dashboardCo
|
||||
|
||||
// Create navigation for this node
|
||||
nav := NavigationConfig{
|
||||
URL: url,
|
||||
Scope: node.Scope,
|
||||
Title: node.Title,
|
||||
URL: url,
|
||||
Scope: node.Scope,
|
||||
Title: node.Title,
|
||||
DisableSubScopeSelection: node.DisableSubScopeSelection,
|
||||
}
|
||||
if node.SubScope != "" {
|
||||
nav.SubScope = node.SubScope
|
||||
|
||||
@@ -21,11 +21,28 @@ weight: 120
|
||||
|
||||
# Install a plugin
|
||||
|
||||
Besides the UI, you can use alternative methods to install a plugin depending on your environment or set-up.
|
||||
{{< admonition type="note" >}}
|
||||
|
||||
Installing plugins from the Grafana website into a Grafana Cloud instance will be removed in February 2026.
|
||||
|
||||
If you're a Grafana Cloud user, follow [Install a plugin through the Grafana UI](#install-a-plugin-through-the-grafana-uiinstall-a-plugin-through-the-grafana-ui) instead.
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
## Install a plugin through the Grafana UI
|
||||
|
||||
The most common way to install a plugin is through the Grafana UI.
|
||||
|
||||
1. In Grafana, click **Administration > Plugins and data > Plugins** in the side navigation menu to view all plugins.
|
||||
1. Browse and find a plugin.
|
||||
1. Click the plugin's logo.
|
||||
1. Click **Install**.
|
||||
|
||||
You can use use the following alternative methods to install a plugin depending on your environment or setup.
|
||||
|
||||
## Install a plugin using Grafana CLI
|
||||
|
||||
The Grafana CLI allows you to install, upgrade, and manage your Grafana plugins using a command line tool. For more information about Grafana CLI plugin commands, refer to [Plugin commands](/docs/grafana/<GRAFANA_VERSION>/cli/#plugins-commands).
|
||||
The Grafana CLI allows you to install, upgrade, and manage your Grafana plugins using a command line tool. For more information about Grafana CLI plugin commands, refer to [Plugin commands](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/cli/#plugins-commands).
|
||||
|
||||
## Install a plugin from a ZIP file
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ Before you begin, you should have the following available:
|
||||
- Administrator permissions in your Grafana instance; for more information on assigning Grafana RBAC roles, refer to [Assign RBAC roles](/docs/grafana-cloud/security-and-account-management/authentication-and-permissions/access-control/assign-rbac-roles/).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
All of the following Terraform configuration files should be saved in the same directory.
|
||||
Save all of the following Terraform configuration files in the same directory.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Configure the Grafana provider
|
||||
|
||||
@@ -3,7 +3,6 @@ aliases:
|
||||
- ../data-sources/azure-monitor/
|
||||
- ../features/datasources/azuremonitor/
|
||||
- azuremonitor/
|
||||
- azuremonitor/deprecated-application-insights/
|
||||
description: Guide for using Azure Monitor in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -23,6 +22,7 @@ labels:
|
||||
menuTitle: Azure Monitor
|
||||
title: Azure Monitor data source
|
||||
weight: 300
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
configure-grafana-feature-toggles:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -49,6 +49,11 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
transform-data:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
|
||||
configure-grafana-azure:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
|
||||
@@ -63,295 +68,98 @@ refs:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-access/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-access/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
query-editor-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
template-variables-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
alerting-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
troubleshooting-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
annotations-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
---
|
||||
|
||||
# Azure Monitor data source
|
||||
|
||||
Grafana ships with built-in support for Azure Monitor, the Azure service to maximize the availability and performance of applications and services in the Azure Cloud.
|
||||
This topic explains configuring and querying specific to the Azure Monitor data source.
|
||||
The Azure Monitor data source plugin allows you to query and visualize data from Azure Monitor, the Azure service to maximize the availability and performance of applications and services in the Azure Cloud.
|
||||
|
||||
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:data-source-management).
|
||||
Only users with the organization administrator role can add data sources.
|
||||
## Supported Azure clouds
|
||||
|
||||
Once you've added the Azure Monitor data source, you can [configure it](#configure-the-data-source) so that your Grafana instance's users can create queries in its [query editor](query-editor/) when they [build dashboards](ref:build-dashboards) and use [Explore](ref:explore).
|
||||
The Azure Monitor data source supports the following Azure cloud environments:
|
||||
|
||||
The Azure Monitor data source supports visualizing data from four Azure services:
|
||||
- **Azure** - Azure public cloud (default)
|
||||
- **Azure US Government** - Azure Government cloud
|
||||
- **Azure China** - Azure China cloud operated by 21Vianet
|
||||
|
||||
- **Azure Monitor Metrics:** Collect numeric data from resources in your Azure account.
|
||||
- **Azure Monitor Logs:** Collect log and performance data from your Azure account, and query using the Kusto Query Language (KQL).
|
||||
- **Azure Resource Graph:** Query your Azure resources across subscriptions.
|
||||
- **Azure Monitor Application Insights:** Collect trace logging data and other application performance metrics.
|
||||
## Supported Azure services
|
||||
|
||||
## Configure the data source
|
||||
The Azure Monitor data source supports the following Azure services:
|
||||
|
||||
**To access the data source configuration page:**
|
||||
| Service | Description |
|
||||
| ------------------------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Azure Monitor Metrics** | Collect numeric data from resources in your Azure account. Supports dimensions, aggregations, and time grain configuration. |
|
||||
| **Azure Monitor Logs** | Collect log and performance data from your Azure account using the Kusto Query Language (KQL). |
|
||||
| **Azure Resource Graph** | Query your Azure resources across subscriptions using KQL. Useful for inventory, compliance, and resource management. |
|
||||
| **Application Insights Traces** | Collect distributed trace data and correlate requests across your application components. |
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Under Your connections, click **Data sources**.
|
||||
1. Enter `Azure Monitor` in the search bar.
|
||||
1. Click **Azure Monitor**.
|
||||
## Get started
|
||||
|
||||
The **Settings** tab of the data source is displayed.
|
||||
The following documents will help you get started with the Azure Monitor data source:
|
||||
|
||||
### Configure Azure Active Directory (AD) authentication
|
||||
- [Configure the Azure Monitor data source](ref:configure-azure-monitor) - Set up authentication and connect to Azure
|
||||
- [Azure Monitor query editor](ref:query-editor-azure-monitor) - Create and edit queries for Metrics, Logs, Traces, and Resource Graph
|
||||
- [Template variables](ref:template-variables-azure-monitor) - Create dynamic dashboards with Azure Monitor variables
|
||||
- [Alerting](ref:alerting-azure-monitor) - Create alert rules using Azure Monitor data
|
||||
- [Troubleshooting](ref:troubleshooting-azure-monitor) - Solve common configuration and query errors
|
||||
|
||||
You must create an app registration and service principal in Azure AD to authenticate the data source.
|
||||
For configuration details, refer to the [Azure documentation for service principals](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in).
|
||||
## Additional features
|
||||
|
||||
The app registration you create must have the `Reader` role assigned on the subscription.
|
||||
For more information, refer to [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
|
||||
After you have configured the Azure Monitor data source, you can:
|
||||
|
||||
If you host Grafana in Azure, such as in App Service or Azure Virtual Machines, you can configure the Azure Monitor data source to use Managed Identity for secure authentication without entering credentials into Grafana.
|
||||
For details, refer to [Configuring using Managed Identity](#configuring-using-managed-identity).
|
||||
- Add [Annotations](ref:annotations-azure-monitor) to overlay Azure log events on your graphs.
|
||||
- Configure and use [Template variables](ref:template-variables-azure-monitor) for dynamic dashboards.
|
||||
- Add [Transformations](ref:transform-data) to manipulate query results.
|
||||
- Set up [Alerting](ref:alerting-azure-monitor) and recording rules using Metrics, Logs, Traces, and Resource Graph queries.
|
||||
- Use [Explore](ref:explore) to investigate your Azure data without building a dashboard.
|
||||
|
||||
You can configure the Azure Monitor data source to use Workload Identity for secure authentication without entering credentials into Grafana if you host Grafana in a Kubernetes environment, such as AKS, and require access to Azure resources.
|
||||
For details, refer to [Configuring using Workload Identity](#configuring-using-workload-identity).
|
||||
## Pre-built dashboards
|
||||
|
||||
| Name | Description |
|
||||
| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Enables Managed Identity. Selecting Managed Identity hides many of the other fields. For details, see [Configuring using Managed Identity](#configuring-using-managed-identity). |
|
||||
| **Azure Cloud** | Sets the national cloud for your Azure account. For most users, this is the default "Azure". For details, see the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/develop/authentication-national-cloud). |
|
||||
| **Directory (tenant) ID** | Sets the directory/tenant ID for the Azure AD app registration to use for authentication. For details, see the [Azure tenant and app ID docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in). |
|
||||
| **Application (client) ID** | Sets the application/client ID for the Azure AD app registration to use for authentication. |
|
||||
| **Client secret** | Sets the application client secret for the Azure AD app registration to use for authentication. For details, see the [Azure application secret docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret). |
|
||||
| **Default subscription** | _(Optional)_ Sets a default subscription for template variables to use. |
|
||||
| **Enable Basic Logs** | Allows this data source to execute queries against [Basic Logs tables](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/basic-logs-query?tabs=portal-1) in supported Log Analytics Workspaces. These queries may incur additional costs. |
|
||||
The Azure Monitor plugin includes the following pre-built dashboards:
|
||||
|
||||
### Provision the data source
|
||||
- **Azure Monitor Overview** - Displays key metrics across your Azure subscriptions and resources.
|
||||
- **Azure Storage Account** - Shows storage account metrics including availability, latency, and transactions.
|
||||
|
||||
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
|
||||
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
To import a pre-built dashboard:
|
||||
|
||||
#### Provisioning examples
|
||||
1. Go to **Connections** > **Data sources**.
|
||||
1. Select your Azure Monitor data source.
|
||||
1. Click the **Dashboards** tab.
|
||||
1. Click **Import** next to the dashboard you want to use.
|
||||
|
||||
**Azure AD App Registration (client secret):**
|
||||
## Related resources
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: clientsecret
|
||||
cloudName: azuremonitor # See table below
|
||||
tenantId: <tenant-id>
|
||||
clientId: <client-id>
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
secureJsonData:
|
||||
clientSecret: <client-secret>
|
||||
version: 1
|
||||
```
|
||||
|
||||
**Managed Identity:**
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: msi
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
**Workload Identity:**
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: workloadidentity
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
**Current User:**
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The `oauthPassThru` property is required for current user authentication to function.
|
||||
Additionally, `disableGrafanaCache` is necessary to prevent the data source returning cached responses for resources users don't have access to.
|
||||
{{< /admonition >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: currentuser
|
||||
oauthPassThru: true
|
||||
disableGrafanaCache: true
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
#### Supported cloud names
|
||||
|
||||
| Azure Cloud | `cloudName` Value |
|
||||
| ------------------------------------ | -------------------------- |
|
||||
| **Microsoft Azure public cloud** | `azuremonitor` (_Default_) |
|
||||
| **Microsoft Chinese national cloud** | `chinaazuremonitor` |
|
||||
| **US Government cloud** | `govazuremonitor` |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Cloud names for current user authentication differ to the `cloudName` values in the preceding table.
|
||||
The public cloud name is `AzureCloud`, the Chinese national cloud name is `AzureChinaCloud`, and the US Government cloud name is `AzureUSGovernment`.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Configure Managed Identity
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Managed Identity is available only in [Azure Managed Grafana](https://azure.microsoft.com/en-us/products/managed-grafana) or Grafana OSS/Enterprise when deployed in Azure. It is not available in Grafana Cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
You can use managed identity to configure Azure Monitor in Grafana if you host Grafana in Azure (such as an App Service or with Azure Virtual Machines) and have managed identity enabled on your VM.
|
||||
This lets you securely authenticate data sources without manually configuring credentials via Azure AD App Registrations.
|
||||
For details on Azure managed identities, refer to the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
|
||||
|
||||
**To enable managed identity for Grafana:**
|
||||
|
||||
1. Set the `managed_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
```
|
||||
|
||||
2. In the Azure Monitor data source configuration, set **Authentication** to **Managed Identity**.
|
||||
|
||||
This hides the directory ID, application ID, and client secret fields, and the data source uses managed identity to authenticate to Azure Monitor Metrics and Logs, and Azure Resource Graph.
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-managed-identity-2.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Managed Identity authentication" >}}
|
||||
|
||||
3. You can set the `managed_identity_client_id` field in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure) to allow a user-assigned managed identity to be used instead of the default system-assigned identity.
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
managed_identity_client_id = USER_ASSIGNED_IDENTITY_CLIENT_ID
|
||||
```
|
||||
|
||||
### Configure Workload Identity
|
||||
|
||||
You can use workload identity to configure Azure Monitor in Grafana if you host Grafana in a Kubernetes environment, such as AKS, in conjunction with managed identities.
|
||||
This lets you securely authenticate data sources without manually configuring credentials via Azure AD App Registrations.
|
||||
For details on workload identity, refer to the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/).
|
||||
|
||||
**To enable workload identity for Grafana:**
|
||||
|
||||
1. Set the `workload_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
```
|
||||
|
||||
2. In the Azure Monitor data source configuration, set **Authentication** to **Workload Identity**.
|
||||
|
||||
This hides the directory ID, application ID, and client secret fields, and the data source uses workload identity to authenticate to Azure Monitor Metrics and Logs, and Azure Resource Graph.
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-workload-identity.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Workload Identity authentication" >}}
|
||||
|
||||
3. There are additional configuration variables that can control the authentication method.`workload_identity_tenant_id` represents the Azure AD tenant that contains the managed identity, `workload_identity_client_id` represents the client ID of the managed identity if it differs from the default client ID, `workload_identity_token_file` represents the path to the token file. Refer to the [documentation](https://azure.github.io/azure-workload-identity/docs/) for more information on what values these variables should use, if any.
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
workload_identity_tenant_id = IDENTITY_TENANT_ID
|
||||
workload_identity_client_id = IDENTITY_CLIENT_ID
|
||||
workload_identity_token_file = TOKEN_FILE_PATH
|
||||
```
|
||||
|
||||
### Configure Current User authentication
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Current user authentication is an [experimental feature](/docs/release-life-cycle). Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Contact Grafana Support to enable this feature in Grafana Cloud. Aspects of Grafana may not work as expected when using this authentication method.
|
||||
{{< /admonition >}}
|
||||
|
||||
If your Grafana instance is configured with Azure Entra (formerly Active Directory) authentication for login, this authentication method can be used to forward the currently logged in user's credentials to the data source. The users credentials will then be used when requesting data from the data source. For details on how to configure your Grafana instance using Azure Entra refer to the [documentation](ref:configure-grafana-azure-auth).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Additional configuration is required to ensure that the App Registration used to login a user via Azure provides an access token with the permissions required by the data source.
|
||||
|
||||
The App Registration must be configured to issue both **Access Tokens** and **ID Tokens**.
|
||||
|
||||
1. In the Azure Portal, open the App Registration that requires configuration.
|
||||
2. Select **Authentication** in the side menu.
|
||||
3. Under **Implicit grant and hybrid flows** check both the **Access tokens** and **ID tokens** boxes.
|
||||
4. Save the changes to ensure the App Registration is updated.
|
||||
|
||||
The App Registration must also be configured with additional **API Permissions** to provide authenticated users with access to the APIs utilised by the data source.
|
||||
|
||||
1. In the Azure Portal, open the App Registration that requires configuration.
|
||||
1. Select **API Permissions** in the side menu.
|
||||
1. Ensure the `openid`, `profile`, `email`, and `offline_access` permissions are present under the **Microsoft Graph** section. If not, they must be added.
|
||||
1. Select **Add a permission** and choose the following permissions. They must be added individually. Refer to the [Azure documentation](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-configure-app-access-web-apis) for more information.
|
||||
- Select **Azure Service Management** > **Delegated permissions** > `user_impersonation` > **Add permissions**
|
||||
- Select **APIs my organization uses** > Search for **Log Analytics API** and select it > **Delegated permissions** > `Date.Read` > **Add permissions**
|
||||
|
||||
Once all permissions have been added, the Azure authentication section in Grafana must be updated. The `scopes` section must be updated to include the `.default` scope to ensure that a token with access to all APIs declared on the App Registration is requested by Grafana. Once updated the scopes value should equal: `.default openid email profile`.
|
||||
{{< /admonition >}}
|
||||
|
||||
This method of authentication doesn't inherently support all backend functionality as a user's credentials won't be in scope.
|
||||
Affected functionality includes alerting, reporting, and recorded queries.
|
||||
In order to support backend queries when using a data source configured with current user authentication, you can configure service credentials.
|
||||
Also, note that query and resource caching is disabled by default for data sources using current user authentication.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To configure fallback service credentials the [feature toggle](ref:configure-grafana-feature-toggles) `idForwarding` must be set to `true` and `user_identity_fallback_credentials_enabled` must be enabled in the [Azure configuration section](ref:configure-grafana-azure) (enabled by default when `user_identity_enabled` is set to `true`).
|
||||
{{< /admonition >}}
|
||||
|
||||
Permissions for fallback credentials may need to be broad to appropriately support backend functionality.
|
||||
For example, an alerting query created by a user is dependent on their permissions.
|
||||
If a user tries to create an alert for a resource that the fallback credentials can't access, the alert will fail.
|
||||
|
||||
**To enable current user authentication for Grafana:**
|
||||
|
||||
1. Set the `user_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
|
||||
By default this will also enable fallback service credentials.
|
||||
If you want to disable service credentials at the instance level set `user_identity_fallback_credentials_enabled` to false.
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
user_identity_enabled = true
|
||||
```
|
||||
|
||||
1. In the Azure Monitor data source configuration, set **Authentication** to **Current User**.
|
||||
If fallback service credentials are enabled at the instance level, an additional configuration section is visible that you can use to enable or disable using service credentials for this data source.
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-current-user.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Current User authentication" >}}
|
||||
|
||||
1. If you want backend functionality to work with this data source, enable service credentials and configure the data source using the most applicable credentials for your circumstances.
|
||||
|
||||
## Query the data source
|
||||
|
||||
The Azure Monitor data source can query data from Azure Monitor Metrics and Logs, the Azure Resource Graph, and Application Insights Traces. Each source has its own specialized query editor.
|
||||
|
||||
For details, see the [query editor documentation](query-editor/).
|
||||
|
||||
## Use template variables
|
||||
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
|
||||
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For details, see the [template variables documentation](template-variables/).
|
||||
|
||||
## Application Insights and Insights Analytics (removed)
|
||||
|
||||
Until Grafana v8.0, you could query the same Azure Application Insights data using Application Insights and Insights Analytics.
|
||||
|
||||
These queries were deprecated in Grafana v7.5. In Grafana v8.0, Application Insights and Insights Analytics were made read-only in favor of querying this data through Metrics and Logs. These query methods were completely removed in Grafana v9.0.
|
||||
|
||||
If you're upgrading from a Grafana version prior to v9.0 and relied on Application Insights and Analytics queries, refer to the [Grafana v9.0 documentation](/docs/grafana/v9.0/datasources/azuremonitor/deprecated-application-insights/) for help migrating these queries to Metrics and Logs queries.
|
||||
- [Azure Monitor documentation](https://docs.microsoft.com/en-us/azure/azure-monitor/)
|
||||
- [Kusto Query Language (KQL) reference](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/)
|
||||
- [Grafana community forum](https://community.grafana.com/)
|
||||
|
||||
262
docs/sources/datasources/azure-monitor/alerting/index.md
Normal file
262
docs/sources/datasources/azure-monitor/alerting/index.md
Normal file
@@ -0,0 +1,262 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/alerting/
|
||||
description: Set up alerts using Azure Monitor data in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- azure
|
||||
- monitor
|
||||
- alerting
|
||||
- alerts
|
||||
- metrics
|
||||
- logs
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Alerting
|
||||
title: Azure Monitor alerting
|
||||
weight: 500
|
||||
refs:
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
alerting-fundamentals:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/
|
||||
create-alert-rule:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
grafana-managed-recording-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
troubleshoot:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
---
|
||||
|
||||
# Azure Monitor alerting
|
||||
|
||||
The Azure Monitor data source supports [Grafana Alerting](ref:alerting) and [Grafana-managed recording rules](ref:grafana-managed-recording-rules), allowing you to create alert rules based on Azure metrics, logs, traces, and resource data. You can monitor your Azure environment and receive notifications when specific conditions are met.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have the appropriate permissions to create alert rules in Grafana.
|
||||
- Verify your Azure Monitor data source is configured and working correctly.
|
||||
- Familiarize yourself with [Grafana Alerting concepts](ref:alerting-fundamentals).
|
||||
- **Important**: Verify your data source uses a supported authentication method. Refer to [Authentication requirements](#authentication-requirements).
|
||||
|
||||
## Supported query types for alerting
|
||||
|
||||
All Azure Monitor query types support alerting and recording rules:
|
||||
|
||||
| Query type | Use case | Notes |
|
||||
| -------------------- | -------------------------------------------------- | -------------------------------------------------------- |
|
||||
| Metrics | Threshold-based alerts on Azure resource metrics | Best suited for alerting; returns time-series data |
|
||||
| Logs | Alert on log patterns, error counts, or thresholds | Use KQL to aggregate data into numeric values |
|
||||
| Azure Resource Graph | Alert on resource state or configuration changes | Use count aggregations to return numeric data |
|
||||
| Traces | Alert on trace data and application performance | Use aggregations to return numeric values for evaluation |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Alert queries must return numeric data that Grafana can evaluate against a threshold. Queries that return only text or non-numeric data cannot be used directly for alerting.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Authentication requirements
|
||||
|
||||
Alerting and recording rules run as background processes without a user context. This means they require service-level authentication and don't work with all authentication methods.
|
||||
|
||||
| Authentication method | Supported |
|
||||
| -------------------------------- | ------------------------------------- |
|
||||
| App Registration (client secret) | ✓ |
|
||||
| Managed Identity | ✓ |
|
||||
| Workload Identity | ✓ |
|
||||
| Current User | ✓ (with fallback service credentials) |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
If you use **Current User** authentication, you must configure **fallback service credentials** for alerting and recording rules to function. User credentials aren't available for background operations, so Grafana uses the fallback credentials instead. Refer to [configure the data source](ref:configure-azure-monitor) for details on setting up fallback credentials.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Create an alert rule
|
||||
|
||||
To create an alert rule using Azure Monitor data:
|
||||
|
||||
1. Go to **Alerting** > **Alert rules**.
|
||||
1. Click **New alert rule**.
|
||||
1. Enter a name for your alert rule.
|
||||
1. In the **Define query and alert condition** section:
|
||||
- Select your Azure Monitor data source.
|
||||
- Configure your query (for example, a Metrics query for CPU usage or a Logs query using KQL).
|
||||
- Add a **Reduce** expression if your query returns multiple series.
|
||||
- Add a **Threshold** expression to define the alert condition.
|
||||
1. Configure the **Set evaluation behavior**:
|
||||
- Select or create a folder and evaluation group.
|
||||
- Set the evaluation interval (how often the alert is checked).
|
||||
- Set the pending period (how long the condition must be true before firing).
|
||||
1. Add labels and annotations to provide context for notifications.
|
||||
1. Click **Save rule**.
|
||||
|
||||
For detailed instructions, refer to [Create a Grafana-managed alert rule](ref:create-alert-rule).
|
||||
|
||||
## Example: VM CPU usage alert
|
||||
|
||||
This example creates an alert that fires when virtual machine CPU usage exceeds 80%:
|
||||
|
||||
1. Create a new alert rule.
|
||||
1. Configure the query:
|
||||
- **Service**: Metrics
|
||||
- **Resource**: Select your virtual machine
|
||||
- **Metric namespace**: `Microsoft.Compute/virtualMachines`
|
||||
- **Metric**: `Percentage CPU`
|
||||
- **Aggregation**: `Average`
|
||||
1. Add expressions:
|
||||
- **Reduce**: Last (to get the most recent data point)
|
||||
- **Threshold**: Is above 80
|
||||
1. Set evaluation to run every 1 minute with a 5-minute pending period.
|
||||
1. Save the rule.
|
||||
|
||||
## Example: Error log count alert
|
||||
|
||||
This example alerts when error logs exceed a threshold using a KQL query:
|
||||
|
||||
1. Create a new alert rule.
|
||||
1. Configure the query:
|
||||
- **Service**: Logs
|
||||
- **Resource**: Select your Log Analytics workspace
|
||||
- **Query**:
|
||||
```kusto
|
||||
AppExceptions
|
||||
| where TimeGenerated > ago(5m)
|
||||
| summarize ErrorCount = count() by bin(TimeGenerated, 1m)
|
||||
```
|
||||
1. Add expressions:
|
||||
- **Reduce**: Max (to get the highest count in the period)
|
||||
- **Threshold**: Is above 10
|
||||
1. Set evaluation to run every 5 minutes.
|
||||
1. Save the rule.
|
||||
|
||||
## Example: Resource count alert
|
||||
|
||||
This example alerts when the number of running virtual machines drops below a threshold using Azure Resource Graph:
|
||||
|
||||
1. Create a new alert rule.
|
||||
1. Configure the query:
|
||||
- **Service**: Azure Resource Graph
|
||||
- **Subscriptions**: Select your subscriptions
|
||||
- **Query**:
|
||||
|
||||
```kusto
|
||||
resources
|
||||
| where type == "microsoft.compute/virtualmachines"
|
||||
| where properties.extended.instanceView.powerState.displayStatus == "VM running"
|
||||
| summarize RunningVMs = count()
|
||||
```
|
||||
|
||||
1. Add expressions:
|
||||
- **Reduce**: Last
|
||||
- **Threshold**: Is below 3
|
||||
1. Set evaluation to run every 5 minutes.
|
||||
1. Save the rule.
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these recommendations to create reliable and efficient alerts with Azure Monitor data.
|
||||
|
||||
### Use appropriate query intervals
|
||||
|
||||
- Set the alert evaluation interval to be greater than or equal to the minimum data resolution from Azure Monitor.
|
||||
- Azure Monitor Metrics typically have 1-minute granularity at minimum.
|
||||
- Avoid very short intervals (less than 1 minute) as they may cause evaluation timeouts or miss data points.
|
||||
|
||||
### Reduce multiple series
|
||||
|
||||
When your Azure Monitor query returns multiple time series (for example, CPU usage across multiple VMs), use the **Reduce** expression to aggregate them:
|
||||
|
||||
- **Last**: Use the most recent value
|
||||
- **Mean**: Average across all series
|
||||
- **Max/Min**: Use the highest or lowest value
|
||||
- **Sum**: Total across all series
|
||||
|
||||
### Optimize Log Analytics queries
|
||||
|
||||
For Logs queries used in alerting:
|
||||
|
||||
- Use `summarize` to aggregate data into numeric values.
|
||||
- Include appropriate time filters using `ago()` or `TimeGenerated`.
|
||||
- Avoid returning large result sets; aggregate data in the query.
|
||||
- Test queries in Explore before using them in alert rules.
|
||||
|
||||
### Handle no data conditions
|
||||
|
||||
Configure what happens when no data is returned:
|
||||
|
||||
1. In the alert rule, find **Configure no data and error handling**.
|
||||
1. Choose an appropriate action:
|
||||
- **No Data**: Keep the alert in its current state
|
||||
- **Alerting**: Treat no data as an alert condition
|
||||
- **OK**: Treat no data as a healthy state
|
||||
|
||||
### Test queries before alerting
|
||||
|
||||
Always verify your query returns expected data before creating an alert:
|
||||
|
||||
1. Go to **Explore**.
|
||||
1. Select your Azure Monitor data source.
|
||||
1. Run the query you plan to use for alerting.
|
||||
1. Confirm the data format and values are correct.
|
||||
1. Verify the query returns numeric data suitable for threshold evaluation.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If your Azure Monitor alerts aren't working as expected, use the following sections to diagnose and resolve common issues.
|
||||
|
||||
### Alerts not firing
|
||||
|
||||
- Verify the data source uses a supported authentication method. If using Current User authentication, ensure fallback service credentials are configured.
|
||||
- Check that the query returns numeric data in Explore.
|
||||
- Ensure the evaluation interval allows enough time for data to be available.
|
||||
- Review the alert rule's health and any error messages in the Alerting UI.
|
||||
|
||||
### Authentication errors in alert evaluation
|
||||
|
||||
If you see authentication errors when alerts evaluate:
|
||||
|
||||
- Confirm the data source is configured with App Registration, Managed Identity, Workload Identity, or Current User with fallback service credentials.
|
||||
- If using App Registration, verify the client secret hasn't expired.
|
||||
- If using Current User, verify that fallback service credentials are configured and valid.
|
||||
- Check that the service principal has appropriate permissions on Azure resources.
|
||||
|
||||
### Query timeout errors
|
||||
|
||||
- Simplify complex KQL queries.
|
||||
- Reduce the time range in Log Analytics queries.
|
||||
- Add more specific filters to narrow result sets.
|
||||
|
||||
For additional troubleshooting help, refer to [Troubleshoot Azure Monitor](ref:troubleshoot).
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Grafana Alerting documentation](ref:alerting)
|
||||
- [Create alert rules](ref:create-alert-rule)
|
||||
- [Azure Monitor query editor](ref:query-editor)
|
||||
- [Grafana-managed recording rules](ref:grafana-managed-recording-rules)
|
||||
218
docs/sources/datasources/azure-monitor/annotations/index.md
Normal file
218
docs/sources/datasources/azure-monitor/annotations/index.md
Normal file
@@ -0,0 +1,218 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/annotations/
|
||||
description: Use annotations with the Azure Monitor data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- azure
|
||||
- monitor
|
||||
- annotations
|
||||
- events
|
||||
- logs
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Annotations
|
||||
title: Azure Monitor annotations
|
||||
weight: 450
|
||||
refs:
|
||||
annotate-visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
---
|
||||
|
||||
# Azure Monitor annotations
|
||||
|
||||
[Annotations](ref:annotate-visualizations) overlay rich event information on top of graphs. You can use Azure Monitor Log Analytics queries to create annotations that mark important events, deployments, alerts, or other significant occurrences on your dashboards.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have configured the Azure Monitor data source.
|
||||
- You need access to a Log Analytics workspace containing the data you want to use for annotations.
|
||||
- Annotations use Log Analytics (KQL) queries only. Metrics, Traces, and Azure Resource Graph queries are not supported for annotations.
|
||||
|
||||
## Create an annotation query
|
||||
|
||||
To add an Azure Monitor annotation to a dashboard:
|
||||
|
||||
1. Open the dashboard where you want to add annotations.
|
||||
1. Click **Dashboard settings** (gear icon) in the top navigation.
|
||||
1. Select **Annotations** in the left menu.
|
||||
1. Click **Add annotation query**.
|
||||
1. Enter a **Name** for the annotation (e.g., "Azure Activity", "Deployments").
|
||||
1. Select your **Azure Monitor** data source.
|
||||
1. Choose the **Logs** service.
|
||||
1. Select a **Resource** (Log Analytics workspace or Application Insights resource).
|
||||
1. Write a KQL query that returns the annotation data.
|
||||
1. Click **Apply** to save.
|
||||
|
||||
## Query requirements
|
||||
|
||||
Your KQL query should return columns that Grafana can use to create annotations:
|
||||
|
||||
| Column | Required | Description |
|
||||
| ------------------ | ----------- | ------------------------------------------------------------------------------------------------ |
|
||||
| `TimeGenerated` | Yes | The timestamp for the annotation. Grafana uses this to position the annotation on the time axis. |
|
||||
| `Text` | Recommended | The annotation text displayed when you hover over or click the annotation. |
|
||||
| Additional columns | Optional | Any other columns returned become annotation tags. |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Always include a time filter in your query to limit results to the dashboard's time range. Use the `$__timeFilter()` macro.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Annotation query examples
|
||||
|
||||
The following examples demonstrate common annotation use cases.
|
||||
|
||||
### Azure Activity Log events
|
||||
|
||||
Display Azure Activity Log events such as resource modifications, deployments, and administrative actions:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where Level == "Error" or Level == "Warning" or CategoryValue == "Administrative"
|
||||
| project TimeGenerated, Text=OperationNameValue, Level, ResourceGroup, Caller
|
||||
| order by TimeGenerated desc
|
||||
| take 100
|
||||
```
|
||||
|
||||
### Deployment events
|
||||
|
||||
Show deployment-related activity:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where OperationNameValue contains "deployments"
|
||||
| project TimeGenerated, Text=strcat("Deployment: ", OperationNameValue), Status=ActivityStatusValue, ResourceGroup
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### Application Insights exceptions
|
||||
|
||||
Mark application exceptions as annotations:
|
||||
|
||||
```kusto
|
||||
AppExceptions
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| project TimeGenerated, Text=strcat(ProblemId, ": ", OuterMessage), SeverityLevel, AppRoleName
|
||||
| order by TimeGenerated desc
|
||||
| take 50
|
||||
```
|
||||
|
||||
### Custom events from Application Insights
|
||||
|
||||
Display custom events logged by your application:
|
||||
|
||||
```kusto
|
||||
AppEvents
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where Name == "DeploymentStarted" or Name == "DeploymentCompleted"
|
||||
| project TimeGenerated, Text=Name, AppRoleName
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### Security alerts
|
||||
|
||||
Show security-related alerts:
|
||||
|
||||
```kusto
|
||||
SecurityAlert
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| project TimeGenerated, Text=AlertName, Severity=AlertSeverity, Description
|
||||
| order by TimeGenerated desc
|
||||
| take 50
|
||||
```
|
||||
|
||||
### Resource health events
|
||||
|
||||
Display resource health status changes:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where CategoryValue == "ResourceHealth"
|
||||
| project TimeGenerated, Text=OperationNameValue, Status=ActivityStatusValue, ResourceId
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### VM start and stop events
|
||||
|
||||
Mark virtual machine state changes:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where OperationNameValue has_any ("start", "deallocate", "restart")
|
||||
| where ResourceProviderValue == "MICROSOFT.COMPUTE"
|
||||
| project TimeGenerated, Text=OperationNameValue, VM=Resource, Status=ActivityStatusValue
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### Autoscale events
|
||||
|
||||
Show autoscale operations:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where OperationNameValue contains "autoscale"
|
||||
| project TimeGenerated, Text=strcat("Autoscale: ", OperationNameValue), Status=ActivityStatusValue, ResourceGroup
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
## Customize annotation appearance
|
||||
|
||||
After creating an annotation query, you can customize its appearance:
|
||||
|
||||
| Setting | Description |
|
||||
| ------------- | -------------------------------------------------------------------------------------------------------- |
|
||||
| **Color** | Choose a color for the annotation markers. Use different colors to distinguish between annotation types. |
|
||||
| **Show in** | Select which panels display the annotations. |
|
||||
| **Filter by** | Add filters to limit when annotations appear. |
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these recommendations when creating annotations:
|
||||
|
||||
1. **Limit results**: Always use `take` or `limit` to restrict the number of annotations. Too many annotations can clutter your dashboard and impact performance.
|
||||
|
||||
2. **Use time filters**: Include `$__timeFilter()` to ensure queries only return data within the dashboard's time range.
|
||||
|
||||
3. **Create meaningful text**: Use `strcat()` or `project` to create descriptive annotation text that provides context at a glance.
|
||||
|
||||
4. **Add relevant tags**: Include columns like `ResourceGroup`, `Severity`, or `Status` that become clickable tags for filtering.
|
||||
|
||||
5. **Use descriptive names**: Name your annotations clearly (e.g., "Production Deployments", "Critical Alerts") so dashboard users understand what they represent.
|
||||
|
||||
## Troubleshoot annotations
|
||||
|
||||
If annotations aren't appearing as expected, try the following solutions.
|
||||
|
||||
### Annotations don't appear
|
||||
|
||||
- Verify the query returns data in the selected time range.
|
||||
- Check that the query includes a `TimeGenerated` column.
|
||||
- Test the query in the Azure Portal Log Analytics query editor.
|
||||
- Ensure the annotation is enabled (toggle is on).
|
||||
|
||||
### Too many annotations
|
||||
|
||||
- Add more specific filters to your query.
|
||||
- Use `take` to limit results.
|
||||
- Narrow the time range.
|
||||
|
||||
### Annotations appear at wrong times
|
||||
|
||||
- Verify the `TimeGenerated` column contains the correct timestamp.
|
||||
- Check your dashboard's timezone settings.
|
||||
605
docs/sources/datasources/azure-monitor/configure/index.md
Normal file
605
docs/sources/datasources/azure-monitor/configure/index.md
Normal file
@@ -0,0 +1,605 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/configure/
|
||||
description: Guide for configuring the Azure Monitor data source in Grafana.
|
||||
keywords:
|
||||
- grafana
|
||||
- microsoft
|
||||
- azure
|
||||
- monitor
|
||||
- application
|
||||
- insights
|
||||
- log
|
||||
- analytics
|
||||
- guide
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Configure
|
||||
title: Configure the Azure Monitor data source
|
||||
weight: 200
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
configure-grafana-feature-toggles:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#feature_toggles
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#feature_toggles
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
configure-grafana-azure-auth:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/
|
||||
build-dashboards:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
configure-grafana-azure:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
|
||||
data-source-management:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
configure-grafana-azure-auth-scopes:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
configure-pdc:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
---
|
||||
|
||||
# Configure the Azure Monitor data source
|
||||
|
||||
This document explains how to configure the Azure Monitor data source and the available configuration options.
|
||||
For general information about data sources, refer to [Grafana data sources](ref:data-sources) and [Data source management](ref:data-source-management).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before configuring the Azure Monitor data source, ensure you have the following:
|
||||
|
||||
- **Grafana permissions:** You must have the `Organization administrator` role to configure data sources.
|
||||
Organization administrators can also [configure the data source via YAML](#provision-the-data-source) with the Grafana provisioning system or [using Terraform](#configure-with-terraform).
|
||||
|
||||
- **Azure prerequisites:** Depending on your chosen authentication method, you may need:
|
||||
- A Microsoft Entra ID (formerly Azure AD) app registration with a service principal (for App Registration authentication)
|
||||
- A Managed Identity enabled on your Azure VM or App Service (for Managed Identity authentication)
|
||||
- Workload identity configured in your Kubernetes cluster (for Workload Identity authentication)
|
||||
- Microsoft Entra ID authentication configured for Grafana login (for Current User authentication)
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
**Grafana Cloud users:** Managed Identity and Workload Identity authentication methods are not available in Grafana Cloud because they require Grafana to run on your Azure infrastructure. Use **App Registration** authentication instead.
|
||||
{{< /admonition >}}
|
||||
|
||||
- **Azure RBAC permissions:** The identity used to authenticate must have the `Reader` role on the Azure subscription containing the resources you want to monitor.
|
||||
For Log Analytics queries, the identity also needs appropriate permissions on the Log Analytics workspaces to be queried.
|
||||
Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The Azure Monitor data source plugin is built into Grafana. No additional installation is required.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Add the data source
|
||||
|
||||
To add the Azure Monitor data source:
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Click **Add new connection**.
|
||||
1. Type `Azure Monitor` in the search bar.
|
||||
1. Select **Azure Monitor**.
|
||||
1. Click **Add new data source** in the upper right.
|
||||
|
||||
You're taken to the **Settings** tab where you can configure the data source.
|
||||
|
||||
## Choose an authentication method
|
||||
|
||||
The Azure Monitor data source supports four authentication methods. Choose based on where Grafana is hosted and your security requirements:
|
||||
|
||||
| Authentication method | Best for | Requirements |
|
||||
| --------------------- | ------------------------------------------ | -------------------------------------------------------------- |
|
||||
| **App Registration** | Any Grafana deployment | Microsoft Entra ID app registration with client secret |
|
||||
| **Managed Identity** | Grafana hosted in Azure (VMs, App Service) | Managed identity enabled on the Azure resource |
|
||||
| **Workload Identity** | Grafana in Kubernetes (AKS) | Workload identity federation configured |
|
||||
| **Current User** | User-level access control | Microsoft Entra ID authentication configured for Grafana login |
|
||||
|
||||
## Configure authentication
|
||||
|
||||
Select one of the following authentication methods and complete the configuration.
|
||||
|
||||
### App Registration
|
||||
|
||||
Use a Microsoft Entra ID app registration (service principal) to authenticate. This method works with any Grafana deployment.
|
||||
|
||||
#### App Registration prerequisites
|
||||
|
||||
1. Create an app registration in Microsoft Entra ID.
|
||||
Refer to the [Azure documentation for creating a service principal](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in).
|
||||
|
||||
1. Create a client secret for the app registration.
|
||||
Refer to the [Azure documentation for creating a client secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).
|
||||
|
||||
1. Assign the `Reader` role to the app registration on the subscription or resources you want to monitor.
|
||||
Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
|
||||
|
||||
#### App Registration UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| **Authentication** | Select **App Registration**. |
|
||||
| **Azure Cloud** | The Azure environment to connect to. Select **Azure** for the public cloud, or choose Azure Government or Azure China for national clouds. |
|
||||
| **Directory (tenant) ID** | The GUID that identifies your Microsoft Entra ID tenant. |
|
||||
| **Application (client) ID** | The GUID for the app registration you created. |
|
||||
| **Client secret** | The secret key for the app registration. Keep this secure and rotate periodically. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
|
||||
#### Provision App Registration with YAML
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: clientsecret
|
||||
cloudName: azuremonitor # See supported cloud names below
|
||||
tenantId: <tenant-id>
|
||||
clientId: <client-id>
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
secureJsonData:
|
||||
clientSecret: <client-secret>
|
||||
version: 1
|
||||
```
|
||||
|
||||
### Managed Identity
|
||||
|
||||
Use Azure Managed Identity for secure, credential-free authentication when Grafana is hosted in Azure.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Managed Identity is available in [Azure Managed Grafana](https://azure.microsoft.com/en-us/products/managed-grafana) or self-hosted Grafana deployed in Azure. It is not available in Grafana Cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Managed Identity prerequisites
|
||||
|
||||
- Grafana must be hosted in Azure (App Service, Azure VMs, or Azure Managed Grafana).
|
||||
- Managed identity must be enabled on the Azure resource hosting Grafana.
|
||||
- The managed identity must have the `Reader` role on the subscription or resources you want to monitor.
|
||||
|
||||
For details on Azure managed identities, refer to the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
|
||||
|
||||
#### Managed Identity Grafana server configuration
|
||||
|
||||
Enable managed identity in the Grafana server configuration:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
```
|
||||
|
||||
To use a user-assigned managed identity instead of the system-assigned identity, also set:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
managed_identity_client_id = <USER_ASSIGNED_IDENTITY_CLIENT_ID>
|
||||
```
|
||||
|
||||
Refer to [Grafana Azure configuration](ref:configure-grafana-azure) for more details.
|
||||
|
||||
#### Managed Identity UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------------ | --------------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Select **Managed Identity**. The directory ID, application ID, and client secret fields are hidden. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-managed-identity-2.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Managed Identity" >}}
|
||||
|
||||
#### Provision Managed Identity with YAML
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: msi
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
### Workload Identity
|
||||
|
||||
Use Azure Workload Identity for secure authentication in Kubernetes environments like AKS.
|
||||
|
||||
#### Workload Identity prerequisites
|
||||
|
||||
- Grafana must be running in a Kubernetes environment with workload identity federation configured.
|
||||
- The workload identity must have the `Reader` role on the subscription or resources you want to monitor.
|
||||
|
||||
For details, refer to the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/).
|
||||
|
||||
#### Workload Identity Grafana server configuration
|
||||
|
||||
Enable workload identity in the Grafana server configuration:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
```
|
||||
|
||||
Optional configuration variables:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
workload_identity_tenant_id = <IDENTITY_TENANT_ID> # Microsoft Entra ID tenant containing the managed identity
|
||||
workload_identity_client_id = <IDENTITY_CLIENT_ID> # Client ID if different from default
|
||||
workload_identity_token_file = <TOKEN_FILE_PATH> # Path to the token file
|
||||
```
|
||||
|
||||
Refer to [Grafana Azure configuration](ref:configure-grafana-azure) and the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/) for more details.
|
||||
|
||||
#### Workload Identity UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------------ | ---------------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Select **Workload Identity**. The directory ID, application ID, and client secret fields are hidden. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-workload-identity.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Workload Identity" >}}
|
||||
|
||||
#### Provision Workload Identity with YAML
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: workloadidentity
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
### Current User
|
||||
|
||||
Forward the logged-in Grafana user's Azure credentials to the data source for user-level access control.
|
||||
|
||||
{{< admonition type="warning" >}}
|
||||
Current User authentication is an [experimental feature](/docs/release-life-cycle/). Engineering and on-call support is not available. Documentation is limited. No SLA is provided. Contact Grafana Support to enable this feature in Grafana Cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Current User prerequisites
|
||||
|
||||
Your Grafana instance must be configured with Microsoft Entra ID authentication. Refer to the [Microsoft Entra ID authentication documentation](ref:configure-grafana-azure-auth).
|
||||
|
||||
#### Configure your Azure App Registration
|
||||
|
||||
The App Registration used for Grafana login requires additional configuration:
|
||||
|
||||
**Enable token issuance:**
|
||||
|
||||
1. In the Azure Portal, open your App Registration.
|
||||
1. Select **Authentication** in the side menu.
|
||||
1. Under **Implicit grant and hybrid flows**, check both **Access tokens** and **ID tokens**.
|
||||
1. Save your changes.
|
||||
|
||||
**Add API permissions:**
|
||||
|
||||
1. In the Azure Portal, open your App Registration.
|
||||
1. Select **API Permissions** in the side menu.
|
||||
1. Ensure these permissions are present under **Microsoft Graph**: `openid`, `profile`, `email`, and `offline_access`.
|
||||
1. Add the following permissions:
|
||||
- **Azure Service Management** > **Delegated permissions** > `user_impersonation`
|
||||
- **APIs my organization uses** > Search for **Log Analytics API** > **Delegated permissions** > `Data.Read`
|
||||
|
||||
Refer to the [Azure documentation](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-configure-app-access-web-apis) for more information.
|
||||
|
||||
**Update Grafana scopes:**
|
||||
|
||||
Update the `scopes` section in your Grafana Azure authentication configuration to include the `.default` scope:
|
||||
|
||||
```
|
||||
.default openid email profile
|
||||
```
|
||||
|
||||
#### Current User Grafana server configuration
|
||||
|
||||
Enable current user authentication in the Grafana server configuration:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
user_identity_enabled = true
|
||||
```
|
||||
|
||||
By default, this also enables fallback service credentials. To disable fallback credentials at the instance level:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
user_identity_enabled = true
|
||||
user_identity_fallback_credentials_enabled = false
|
||||
```
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To use fallback service credentials, the [feature toggle](ref:configure-grafana-feature-toggles) `idForwarding` must be set to `true`.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Limitations and fallback credentials
|
||||
|
||||
Current User authentication doesn't support backend functionality like alerting, reporting, and recorded queries because user credentials aren't available for background operations.
|
||||
|
||||
To support these features, configure **fallback service credentials**. When enabled, Grafana uses the fallback credentials for backend operations. Note that operations using fallback credentials are limited to the permissions of those credentials, not the user's permissions.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Query and resource caching is disabled by default for data sources using Current User authentication.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Current User UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| -------------------------------- | ------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Select **Current User**. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
| **Fallback Service Credentials** | Enable and configure credentials for backend features like alerting. |
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-current-user.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Current User authentication" >}}
|
||||
|
||||
#### Provision Current User with YAML
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The `oauthPassThru` property is required for Current User authentication. The `disableGrafanaCache` property prevents returning cached responses for resources users don't have access to.
|
||||
{{< /admonition >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: currentuser
|
||||
oauthPassThru: true
|
||||
disableGrafanaCache: true
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
## Additional configuration options
|
||||
|
||||
These settings apply to all authentication methods.
|
||||
|
||||
### General settings
|
||||
|
||||
| Setting | Description |
|
||||
| ----------- | ------------------------------------------------------------------------------- |
|
||||
| **Name** | The data source name used in panels and queries. Example: `azure-monitor-prod`. |
|
||||
| **Default** | Toggle to make this the default data source for new panels. |
|
||||
|
||||
### Enable Basic Logs
|
||||
|
||||
Toggle **Enable Basic Logs** to allow queries against [Basic Logs tables](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/basic-logs-query?tabs=portal-1) in supported Log Analytics Workspaces.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Querying Basic Logs tables incurs additional costs on a per-query basis.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Private data source connect (Grafana Cloud only)
|
||||
|
||||
If you're using Grafana Cloud and need to connect to Azure resources in a private network, use Private Data Source Connect (PDC).
|
||||
|
||||
1. Click the **Private data source connect** dropdown to select your PDC configuration.
|
||||
1. Click **Manage private data source connect** to view your PDC connection details.
|
||||
|
||||
For more information, refer to [Private data source connect](ref:private-data-source-connect) and [Configure PDC](ref:configure-pdc).
|
||||
|
||||
## Supported cloud names
|
||||
|
||||
When provisioning the data source, use the following `cloudName` values:
|
||||
|
||||
| Azure Cloud | `cloudName` value |
|
||||
| -------------------------------- | ------------------------ |
|
||||
| Microsoft Azure public cloud | `azuremonitor` (default) |
|
||||
| Microsoft Chinese national cloud | `chinaazuremonitor` |
|
||||
| US Government cloud | `govazuremonitor` |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
For Current User authentication, the cloud names differ: use `AzureCloud` for public cloud, `AzureChinaCloud` for the Chinese national cloud, and `AzureUSGovernment` for the US Government cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Verify the connection
|
||||
|
||||
After configuring the data source, click **Save & test**. A successful connection displays a message confirming that the credentials are valid and have access to the configured default subscription.
|
||||
|
||||
If the test fails, verify:
|
||||
|
||||
- Your credentials are correct (tenant ID, client ID, client secret)
|
||||
- The identity has the required Azure RBAC permissions
|
||||
- For Managed Identity or Workload Identity, that the Grafana server configuration is correct
|
||||
- Network connectivity to Azure endpoints
|
||||
|
||||
## Provision the data source
|
||||
|
||||
You can define and configure the Azure Monitor data source in YAML files as part of the Grafana provisioning system.
|
||||
For more information about provisioning, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
|
||||
### Provision quick reference
|
||||
|
||||
| Authentication method | `azureAuthType` value | Required fields |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- |
|
||||
| App Registration | `clientsecret` | `tenantId`, `clientId`, `clientSecret` |
|
||||
| Managed Identity | `msi` | None (uses VM identity) |
|
||||
| Workload Identity | `workloadidentity` | None (uses pod identity) |
|
||||
| Current User | `currentuser` | `oauthPassThru: true`, `disableGrafanaCache: true` |
|
||||
|
||||
All methods support the optional `subscriptionId` field to set a default subscription.
|
||||
|
||||
For complete YAML examples, see the [authentication method sections](#configure-authentication) above.
|
||||
|
||||
## Configure with Terraform
|
||||
|
||||
You can configure the Azure Monitor data source using the [Grafana Terraform provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs). This approach enables infrastructure-as-code workflows and version control for your Grafana configuration.
|
||||
|
||||
### Terraform prerequisites
|
||||
|
||||
- [Terraform](https://www.terraform.io/downloads) installed
|
||||
- Grafana Terraform provider configured with appropriate credentials
|
||||
- For Grafana Cloud: A [Cloud Access Policy token](https://grafana.com/docs/grafana-cloud/account-management/authentication-and-permissions/access-policies/) with data source permissions
|
||||
|
||||
### Provider configuration
|
||||
|
||||
Configure the Grafana provider to connect to your Grafana instance:
|
||||
|
||||
```hcl
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
version = ">= 2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# For Grafana Cloud
|
||||
provider "grafana" {
|
||||
url = "<YOUR_GRAFANA_CLOUD_STACK_URL>"
|
||||
auth = "<YOUR_SERVICE_ACCOUNT_TOKEN>"
|
||||
}
|
||||
|
||||
# For self-hosted Grafana
|
||||
# provider "grafana" {
|
||||
# url = "http://localhost:3000"
|
||||
# auth = "<API_KEY_OR_SERVICE_ACCOUNT_TOKEN>"
|
||||
# }
|
||||
```
|
||||
|
||||
### Terraform examples
|
||||
|
||||
The following examples show how to configure the Azure Monitor data source for each authentication method.
|
||||
|
||||
**App Registration (client secret):**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "clientsecret"
|
||||
cloudName = "azuremonitor"
|
||||
tenantId = "<TENANT_ID>"
|
||||
clientId = "<CLIENT_ID>"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
|
||||
secure_json_data_encoded = jsonencode({
|
||||
clientSecret = "<CLIENT_SECRET>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Managed Identity:**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "msi"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Workload Identity:**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "workloadidentity"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Current User:**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "currentuser"
|
||||
oauthPassThru = true
|
||||
disableGrafanaCache = true
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**With Basic Logs enabled:**
|
||||
|
||||
Add `enableBasicLogs = true` to any of the above configurations:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "clientsecret"
|
||||
cloudName = "azuremonitor"
|
||||
tenantId = "<TENANT_ID>"
|
||||
clientId = "<CLIENT_ID>"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
enableBasicLogs = true
|
||||
})
|
||||
|
||||
secure_json_data_encoded = jsonencode({
|
||||
clientSecret = "<CLIENT_SECRET>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
For more information about the Grafana Terraform provider, refer to the [provider documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs) and the [grafana_data_source resource](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source).
|
||||
@@ -21,6 +21,7 @@ labels:
|
||||
menuTitle: Query editor
|
||||
title: Azure Monitor query editor
|
||||
weight: 300
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
query-transform-data-query-options:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -32,30 +33,85 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
troubleshoot-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
configure-grafana-feature-toggles:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/
|
||||
template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
alerting-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
annotations-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
---
|
||||
|
||||
# Azure Monitor query editor
|
||||
|
||||
This topic explains querying specific to the Azure Monitor data source.
|
||||
For general documentation on querying data sources in Grafana, see [Query and transform data](ref:query-transform-data).
|
||||
Grafana provides a query editor for the Azure Monitor data source, which is located on the [Explore page](ref:explore). You can also access the Azure Monitor query editor from a dashboard panel. Click the menu in the upper right of the panel and select **Edit**.
|
||||
|
||||
## Choose a query editing mode
|
||||
This document explains querying specific to the Azure Monitor data source.
|
||||
For general documentation on querying data sources in Grafana, refer to [Query and transform data](ref:query-transform-data).
|
||||
|
||||
The Azure Monitor data source's query editor has three modes depending on which Azure service you want to query:
|
||||
The Azure Monitor data source can query data from Azure Monitor Metrics and Logs, the Azure Resource Graph, and Application Insights Traces. Each source has its own specialized query editor.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have [configured the Azure Monitor data source](ref:configure-azure-monitor).
|
||||
- Verify your credentials have appropriate permissions for the resources you want to query.
|
||||
|
||||
## Key concepts
|
||||
|
||||
If you're new to Azure Monitor, here are some key terms used throughout this documentation:
|
||||
|
||||
| Term | Description |
|
||||
| ------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **KQL (Kusto Query Language)** | The query language used for Azure Monitor Logs and Azure Resource Graph. KQL uses a pipe-based syntax similar to Unix commands and is optimized for read-only data exploration. If you know SQL, the [SQL to Kusto cheat sheet](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/sqlcheatsheet) can help you get started. |
|
||||
| **Log Analytics workspace** | An Azure resource that collects and stores log data from your Azure resources, applications, and services. You query this data using KQL. |
|
||||
| **Application Insights** | Azure's application performance monitoring (APM) service. It collects telemetry data like requests, exceptions, and traces from your applications. |
|
||||
| **Metrics vs. Logs** | **Metrics** are lightweight numeric values collected at regular intervals (e.g., CPU percentage). **Logs** are detailed records of events with varying schemas (e.g., request logs, error messages). Metrics use a visual query builder; Logs require KQL. |
|
||||
|
||||
## Choose a query editor mode
|
||||
|
||||
The Azure Monitor data source's query editor has four modes depending on which Azure service you want to query:
|
||||
|
||||
- **Metrics** for [Azure Monitor Metrics](#query-azure-monitor-metrics)
|
||||
- **Logs** for [Azure Monitor Logs](#query-azure-monitor-logs)
|
||||
- [**Azure Resource Graph**](#query-azure-resource-graph)
|
||||
- **Traces** for [Application Insights Traces](#query-application-insights-traces)
|
||||
- **Azure Resource Graph** for [Azure Resource Graph](#query-azure-resource-graph)
|
||||
|
||||
## Query Azure Monitor Metrics
|
||||
|
||||
Azure Monitor Metrics collects numeric data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and you can query them to investigate your resources' health and usage and maximise availability and performance.
|
||||
Azure Monitor Metrics collects numeric data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and you can query them to investigate your resources' health and usage and maximize availability and performance.
|
||||
|
||||
Monitor Metrics use a lightweight format that stores only numeric data in a specific structure and supports near real-time scenarios, making it useful for fast detection of issues.
|
||||
In contrast, Azure Monitor Logs can store a variety of data types, each with their own structure.
|
||||
|
||||
{{< figure src="/static/img/docs/azure-monitor/query-editor-metrics.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Logs Metrics sample query visualizing CPU percentage over time" >}}
|
||||
{{< figure src="/static/img/docs/azure-monitor/query-editor-metrics.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor Metrics sample query visualizing CPU percentage over time" >}}
|
||||
|
||||
### Create a Metrics query
|
||||
|
||||
@@ -85,7 +141,7 @@ Optionally, you can apply further aggregations or filter by dimensions.
|
||||
|
||||
The available options change depending on what is relevant to the selected metric.
|
||||
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
|
||||
### Format legend aliases
|
||||
|
||||
@@ -109,7 +165,7 @@ For example:
|
||||
| `{{ dimensionname }}` | _(Legacy for backward compatibility)_ Replaced with the name of the first dimension. |
|
||||
| `{{ dimensionvalue }}` | _(Legacy for backward compatibility)_ Replaced with the value of the first dimension. |
|
||||
|
||||
### Filter using dimensions
|
||||
### Filter with dimensions
|
||||
|
||||
Some metrics also have dimensions, which associate additional metadata.
|
||||
Dimensions are represented as key-value pairs assigned to each value of a metric.
|
||||
@@ -121,7 +177,7 @@ For more information on multi-dimensional metrics, refer to the [Azure Monitor d
|
||||
|
||||
## Query Azure Monitor Logs
|
||||
|
||||
Azure Monitor Logs collects and organises log and performance data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and makes many sources of data available to query together with the [Kusto Query Language (KQL)](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/).
|
||||
Azure Monitor Logs collects and organizes log and performance data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and makes many sources of data available to query together with the [Kusto Query Language (KQL)](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/).
|
||||
|
||||
While Azure Monitor Metrics stores only simplified numerical data, Logs can store different data types, each with their own structure.
|
||||
You can also perform complex analysis of Logs data by using KQL.
|
||||
@@ -130,6 +186,32 @@ The Azure Monitor data source also supports querying of [Basic Logs](https://lea
|
||||
|
||||
{{< figure src="/static/img/docs/azure-monitor/query-editor-logs.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor Logs sample query comparing successful requests to failed requests" >}}
|
||||
|
||||
### Logs query builder (public preview)
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The Logs query builder is a [public preview feature](/docs/release-life-cycle/). It may not be enabled in all Grafana environments.
|
||||
{{< /admonition >}}
|
||||
|
||||
The Logs query builder provides a visual interface for building Azure Monitor Logs queries without writing KQL. This is helpful if you're new to KQL or want to quickly build simple queries.
|
||||
|
||||
**To enable the Logs query builder:**
|
||||
|
||||
1. Enable the `azureMonitorLogsBuilderEditor` [feature toggle](ref:configure-grafana-feature-toggles) in your Grafana configuration.
|
||||
1. Restart Grafana for the change to take effect.
|
||||
|
||||
**To switch between Builder and Code modes:**
|
||||
|
||||
When the feature is enabled, a **Builder / Code** toggle appears in the Logs query editor:
|
||||
|
||||
- **Builder**: Use the visual interface to select tables, columns, filters, and aggregations. The builder generates the KQL query for you.
|
||||
- **Code**: Write KQL queries directly. Use this mode for complex queries that require full KQL capabilities.
|
||||
|
||||
New queries default to Builder mode. Existing queries that were created with raw KQL remain in Code mode.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
You can switch from Builder to Code mode at any time to view or edit the generated KQL. However, switching from Code to Builder mode may not preserve complex queries that can't be represented in the builder interface.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Create a Logs query
|
||||
|
||||
**To create a Logs query:**
|
||||
@@ -140,13 +222,13 @@ The Azure Monitor data source also supports querying of [Basic Logs](https://lea
|
||||
|
||||
Alternatively, you can dynamically query all resources under a single resource group or subscription.
|
||||
{{< admonition type="note" >}}
|
||||
If a timespan is specified in the query, the overlap of the timespan between the query and the dashboard will be used as the query timespan. See the [API documentation for
|
||||
If a time span is specified in the query, the overlap between the query time span and the dashboard time range will be used. See the [API documentation for
|
||||
details.](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters)
|
||||
{{< /admonition >}}
|
||||
|
||||
1. Enter your KQL query.
|
||||
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
|
||||
**To create a Basic Logs query:**
|
||||
|
||||
@@ -161,7 +243,7 @@ You can also augment queries by using [template variables](../template-variables
|
||||
{{< /admonition >}}
|
||||
1. Enter your KQL query.
|
||||
|
||||
You can also augment queries by using [template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/).
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
|
||||
### Logs query examples
|
||||
|
||||
@@ -174,24 +256,28 @@ The Azure documentation includes resources to help you learn KQL:
|
||||
- [Tutorial: Use Kusto queries in Azure Monitor](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/tutorial?pivots=azuremonitor)
|
||||
- [SQL to Kusto cheat sheet](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/sqlcheatsheet)
|
||||
|
||||
> **Time-range:** The time-range that will be used for the query can be modified via the time-range switch. Selecting `Query` will only make use of time-ranges specified within the query.
|
||||
> Specifying `Dashboard` will only make use of the Grafana time-range.
|
||||
> If there are no time-ranges specified within the query, the default Log Analytics time-range will apply.
|
||||
> For more details on this change, refer to the [Azure Monitor Logs API documentation](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters).
|
||||
> If the `Intersection` option was previously chosen it will be migrated by default to `Dashboard`.
|
||||
{{< admonition type="note" >}}
|
||||
**Time-range:** The time-range used for the query can be modified via the time-range switch:
|
||||
|
||||
This example query returns a virtual machine's CPU performance, averaged over 5ms time grains:
|
||||
- Selecting **Query** uses only time-ranges specified within the query.
|
||||
- Selecting **Dashboard** uses only the Grafana dashboard time-range.
|
||||
- If no time-range is specified in the query, the default Log Analytics time-range applies.
|
||||
|
||||
For more details, refer to the [Azure Monitor Logs API documentation](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters). If you previously used the `Intersection` option, it has been migrated to `Dashboard`.
|
||||
{{< /admonition >}}
|
||||
|
||||
This example query returns a virtual machine's CPU performance, averaged over 5-minute time grains:
|
||||
|
||||
```kusto
|
||||
Perf
|
||||
# $__timeFilter is a special Grafana macro that filters the results to the time span of the dashboard
|
||||
// $__timeFilter is a special Grafana macro that filters the results to the time span of the dashboard
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where CounterName == "% Processor Time"
|
||||
| summarize avg(CounterValue) by bin(TimeGenerated, 5m), Computer
|
||||
| order by TimeGenerated asc
|
||||
```
|
||||
|
||||
Use time series queries for values that change over time, usually for graph visualisations such as the Time series panel.
|
||||
Use time series queries for values that change over time, usually for graph visualizations such as the Time series panel.
|
||||
Each query should return at least a datetime column and numeric value column.
|
||||
The result must also be sorted in ascending order by the datetime column.
|
||||
|
||||
@@ -357,21 +443,33 @@ Application Insights stores trace data in an underlying Log Analytics workspace
|
||||
This query type only supports Application Insights resources.
|
||||
{{< /admonition >}}
|
||||
|
||||
Running a query of this kind will return all trace data within the timespan specified by the panel/dashboard.
|
||||
1. (Optional) Specify an **Operation ID** value to filter traces.
|
||||
1. (Optional) Specify **event types** to filter by.
|
||||
1. (Optional) Specify **event properties** to filter by.
|
||||
1. (Optional) Change the **Result format** to switch between tabular format and trace format.
|
||||
|
||||
Optionally, you can apply further filtering or select a specific Operation ID to query. The result format can also be switched between a tabular format or the trace format which will return the data in a format that can be used with the Trace visualization.
|
||||
{{< admonition type="note" >}}
|
||||
Selecting the trace format filters events to only the `trace` type. Use this format with the Trace visualization.
|
||||
{{< /admonition >}}
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Selecting the trace format will filter events with the `trace` type.
|
||||
{{< /admonition >}}
|
||||
Running a query returns all trace data within the time span specified by the panel or dashboard time range.
|
||||
|
||||
1. Specify an Operation ID value.
|
||||
1. Specify event types to filter by.
|
||||
1. Specify event properties to filter by.
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
## Use queries for alerting and recording rules
|
||||
|
||||
## Working with large Azure resource data sets
|
||||
All Azure Monitor query types (Metrics, Logs, Azure Resource Graph, and Traces) can be used with Grafana Alerting and recording rules.
|
||||
|
||||
For detailed information about creating alert rules, supported query types, authentication requirements, and examples, refer to [Azure Monitor alerting](ref:alerting-azure-monitor).
|
||||
|
||||
## Work with large Azure resource datasets
|
||||
|
||||
If a request exceeds the [maximum allowed value of records](https://docs.microsoft.com/en-us/azure/governance/resource-graph/concepts/work-with-data#paging-results), the result is paginated and only the first page of results are returned.
|
||||
You can use filters to reduce the amount of records returned under that value.
|
||||
|
||||
## Next steps
|
||||
|
||||
- [Use template variables](../template-variables/) to create dynamic, reusable dashboards
|
||||
- [Add annotations](ref:annotations-azure-monitor) to overlay events on your graphs
|
||||
- [Set up alerting](ref:alerting-azure-monitor) to create alert rules based on Azure Monitor data
|
||||
- [Troubleshoot](ref:troubleshoot-azure-monitor) common query and configuration issues
|
||||
|
||||
@@ -23,6 +23,7 @@ labels:
|
||||
menuTitle: Template variables
|
||||
title: Azure Monitor template variables
|
||||
weight: 400
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
variables:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -34,6 +35,11 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
---
|
||||
|
||||
# Azure Monitor template variables
|
||||
@@ -42,58 +48,173 @@ Instead of hard-coding details such as resource group or resource name values in
|
||||
This helps you create more interactive, dynamic, and reusable dashboards.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables) documentation.
|
||||
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables).
|
||||
|
||||
## Use query variables
|
||||
## Before you begin
|
||||
|
||||
You can specify these Azure Monitor data source queries in the Variable edit view's **Query Type** field.
|
||||
- Ensure you have [configured the Azure Monitor data source](ref:configure-azure-monitor).
|
||||
- If you want template variables to auto-populate subscriptions, set a **Default Subscription** in the data source configuration.
|
||||
|
||||
| Name | Description |
|
||||
| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Subscriptions** | Returns subscriptions. |
|
||||
| **Resource Groups** | Returns resource groups for a specified subscription. Supports multi-value. |
|
||||
| **Namespaces** | Returns metric namespaces for the specified subscription. If a resource group is provided, only the namespaces within that group are returned. |
|
||||
| **Regions** | Returns regions for the specified subscription |
|
||||
| **Resource Names** | Returns a list of resource names for a specified subscription, resource group and namespace. Supports multi-value. |
|
||||
| **Metric Names** | Returns a list of metric names for a resource. |
|
||||
| **Workspaces** | Returns a list of workspaces for the specified subscription. |
|
||||
| **Logs** | Use a KQL query to return values. |
|
||||
| **Custom Namespaces** | Returns metric namespaces for the specified resource. |
|
||||
| **Custom Metric Names** | Returns a list of custom metric names for the specified resource. |
|
||||
## Create a template variable
|
||||
|
||||
To create a template variable for Azure Monitor:
|
||||
|
||||
1. Open the dashboard where you want to add the variable.
|
||||
1. Click **Dashboard settings** (gear icon) in the top navigation.
|
||||
1. Select **Variables** in the left menu.
|
||||
1. Click **Add variable**.
|
||||
1. Enter a **Name** for your variable (e.g., `subscription`, `resourceGroup`, `resource`).
|
||||
1. In the **Type** dropdown, select **Query**.
|
||||
1. In the **Data source** dropdown, select your Azure Monitor data source.
|
||||
1. In the **Query Type** dropdown, select the appropriate query type (see [Available query types](#available-query-types)).
|
||||
1. Configure any additional fields required by the selected query type.
|
||||
1. Click **Run query** to preview the variable values.
|
||||
1. Configure display options such as **Multi-value** or **Include All option** as needed.
|
||||
1. Click **Apply** to save the variable.
|
||||
|
||||
## Available query types
|
||||
|
||||
The Azure Monitor data source provides the following query types for template variables:
|
||||
|
||||
| Query type | Description |
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Subscriptions** | Returns a list of Azure subscriptions accessible to the configured credentials. |
|
||||
| **Resource Groups** | Returns resource groups for a specified subscription. Supports multi-value selection. |
|
||||
| **Namespaces** | Returns metric namespaces for the specified subscription. If a resource group is specified, returns only namespaces within that group. |
|
||||
| **Regions** | Returns Azure regions available for the specified subscription. |
|
||||
| **Resource Names** | Returns resource names for a specified subscription, resource group, and namespace. Supports multi-value selection. |
|
||||
| **Metric Names** | Returns available metric names for a specified resource. |
|
||||
| **Workspaces** | Returns Log Analytics workspaces for the specified subscription. |
|
||||
| **Logs** | Executes a KQL query and returns the results as variable values. See [Create a Logs variable](#create-a-logs-variable). |
|
||||
| **Custom Namespaces** | Returns custom metric namespaces for a specified resource. |
|
||||
| **Custom Metric Names** | Returns custom metric names for a specified resource. |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Custom metrics cannot be emitted against a subscription or resource group. Select resources only when you need to retrieve custom metric namespaces or custom metric names associated with a specific resource.
|
||||
Custom metrics cannot be emitted against a subscription or resource group. Select specific resources when retrieving custom metric namespaces or custom metric names.
|
||||
{{< /admonition >}}
|
||||
|
||||
You can use any Log Analytics Kusto Query Language (KQL) query that returns a single list of values in the `Query` field.
|
||||
For example:
|
||||
## Create cascading variables
|
||||
|
||||
| Query | List of values returned |
|
||||
| ----------------------------------------------------------------------------------------- | --------------------------------------- |
|
||||
| `workspace("myWorkspace").Heartbeat \| distinct Computer` | Virtual machines |
|
||||
| `workspace("$workspace").Heartbeat \| distinct Computer` | Virtual machines with template variable |
|
||||
| `workspace("$workspace").Perf \| distinct ObjectName` | Objects from the Perf table |
|
||||
| `workspace("$workspace").Perf \| where ObjectName == "$object"` `\| distinct CounterName` | Metric names from the Perf table |
|
||||
Cascading variables (also called dependent or chained variables) allow you to create dropdown menus that filter based on previous selections. This is useful for drilling down from subscription to resource group to specific resource.
|
||||
|
||||
### Query variable example
|
||||
### Example: Subscription → Resource Group → Resource Name
|
||||
|
||||
This time series query uses query variables:
|
||||
**Step 1: Create a Subscription variable**
|
||||
|
||||
1. Create a variable named `subscription`.
|
||||
1. Set **Query Type** to **Subscriptions**.
|
||||
|
||||
**Step 2: Create a Resource Group variable**
|
||||
|
||||
1. Create a variable named `resourceGroup`.
|
||||
1. Set **Query Type** to **Resource Groups**.
|
||||
1. In the **Subscription** field, select `$subscription`.
|
||||
|
||||
**Step 3: Create a Resource Name variable**
|
||||
|
||||
1. Create a variable named `resource`.
|
||||
1. Set **Query Type** to **Resource Names**.
|
||||
1. In the **Subscription** field, select `$subscription`.
|
||||
1. In the **Resource Group** field, select `$resourceGroup`.
|
||||
1. Select the appropriate **Namespace** for your resources (e.g., `Microsoft.Compute/virtualMachines`).
|
||||
|
||||
Now when you change the subscription, the resource group dropdown updates automatically, and when you change the resource group, the resource name dropdown updates.
|
||||
|
||||
## Create a Logs variable
|
||||
|
||||
The **Logs** query type lets you use a KQL query to populate variable values. The query must return a single column of values.
|
||||
|
||||
**To create a Logs variable:**
|
||||
|
||||
1. Create a new variable with **Query Type** set to **Logs**.
|
||||
1. Select a **Resource** (Log Analytics workspace or Application Insights resource).
|
||||
1. Enter a KQL query that returns a single column.
|
||||
|
||||
### Logs variable query examples
|
||||
|
||||
| Query | Returns |
|
||||
| ----------------------------------------- | ------------------------------------- |
|
||||
| `Heartbeat \| distinct Computer` | List of virtual machine names |
|
||||
| `Perf \| distinct ObjectName` | List of performance object names |
|
||||
| `AzureActivity \| distinct ResourceGroup` | List of resource groups with activity |
|
||||
| `AppRequests \| distinct Name` | List of application request names |
|
||||
|
||||
You can reference other variables in your Logs query:
|
||||
|
||||
```kusto
|
||||
workspace("$workspace").Heartbeat | distinct Computer
|
||||
```
|
||||
|
||||
```kusto
|
||||
workspace("$workspace").Perf
|
||||
| where ObjectName == "$object"
|
||||
| distinct CounterName
|
||||
```
|
||||
|
||||
## Variable refresh options
|
||||
|
||||
Control when your variables refresh by setting the **Refresh** option:
|
||||
|
||||
| Option | Behavior |
|
||||
| ------------------------ | ----------------------------------------------------------------------------------------- |
|
||||
| **On dashboard load** | Variables refresh each time the dashboard loads. Best for data that changes infrequently. |
|
||||
| **On time range change** | Variables refresh when the dashboard time range changes. Use for time-sensitive queries. |
|
||||
|
||||
For dashboards with many variables or complex queries, use **On dashboard load** to improve performance.
|
||||
|
||||
## Use variables in queries
|
||||
|
||||
After you create template variables, you can use them in your Azure Monitor queries by referencing them with the `$` prefix.
|
||||
|
||||
### Metrics query example
|
||||
|
||||
In a Metrics query, select your variables in the resource picker fields:
|
||||
|
||||
- **Subscription**: `$subscription`
|
||||
- **Resource Group**: `$resourceGroup`
|
||||
- **Resource Name**: `$resource`
|
||||
|
||||
### Logs query example
|
||||
|
||||
Reference variables directly in your KQL queries:
|
||||
|
||||
```kusto
|
||||
Perf
|
||||
| where ObjectName == "$object" and CounterName == "$metric"
|
||||
| where TimeGenerated >= $__timeFrom() and TimeGenerated <= $__timeTo()
|
||||
| where $__contains(Computer, $computer)
|
||||
| where $__contains(Computer, $computer)
|
||||
| summarize avg(CounterValue) by bin(TimeGenerated, $__interval), Computer
|
||||
| order by TimeGenerated asc
|
||||
```
|
||||
|
||||
### Multi-value variables
|
||||
## Multi-value variables
|
||||
|
||||
It is possible to select multiple values for **Resource Groups** and **Resource Names** and use a single metrics query pointing to those values as long as they:
|
||||
You can enable **Multi-value** selection for **Resource Groups** and **Resource Names** variables. When using multi-value variables in a Metrics query, all selected resources must:
|
||||
|
||||
- Belong to the same subscription.
|
||||
- Are in the same region.
|
||||
- Are of the same type (namespace).
|
||||
- Belong to the same subscription
|
||||
- Be in the same Azure region
|
||||
- Be of the same resource type (namespace)
|
||||
|
||||
Also, note that if a template variable pointing to multiple resource groups or names is used in another template variable as a parameter (e.g. to retrieve metric names), only the first value will be used. This means that the combination of the first resource group and name selected should be valid.
|
||||
{{< admonition type="note" >}}
|
||||
When a multi-value variable is used as a parameter in another variable query (for example, to retrieve metric names), only the first selected value is used. Ensure the first resource group and resource name combination is valid.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Troubleshoot template variables
|
||||
|
||||
If you encounter issues with template variables, try the following solutions.
|
||||
|
||||
### Variable returns no values
|
||||
|
||||
- Verify the Azure Monitor data source is configured correctly and can connect to Azure.
|
||||
- Check that the credentials have appropriate permissions to list the requested resources.
|
||||
- For cascading variables, ensure parent variables have valid selections.
|
||||
|
||||
### Variable values are outdated
|
||||
|
||||
- Check the **Refresh** setting and adjust if needed.
|
||||
- Click the refresh icon next to the variable dropdown to manually refresh.
|
||||
|
||||
### Multi-value selection not working in queries
|
||||
|
||||
- Ensure the resources meet the requirements (same subscription, region, and type).
|
||||
- For Logs queries, use the `$__contains()` macro to handle multi-value variables properly.
|
||||
|
||||
320
docs/sources/datasources/azure-monitor/troubleshooting/index.md
Normal file
320
docs/sources/datasources/azure-monitor/troubleshooting/index.md
Normal file
@@ -0,0 +1,320 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/troubleshooting/
|
||||
description: Troubleshooting guide for the Azure Monitor data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- azure
|
||||
- monitor
|
||||
- troubleshooting
|
||||
- errors
|
||||
- authentication
|
||||
- query
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshoot
|
||||
title: Troubleshoot Azure Monitor data source issues
|
||||
weight: 500
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
---
|
||||
|
||||
# Troubleshoot Azure Monitor data source issues
|
||||
|
||||
This document provides solutions to common issues you may encounter when configuring or using the Azure Monitor data source.
|
||||
|
||||
## Configuration and authentication errors
|
||||
|
||||
These errors typically occur when setting up the data source or when authentication credentials are invalid.
|
||||
|
||||
### "Authorization failed" or "Access denied"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Save & test fails with "Authorization failed"
|
||||
- Queries return "Access denied" errors
|
||||
- Subscriptions don't load when clicking **Load Subscriptions**
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| -------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| App registration doesn't have required permissions | Assign the `Reader` role to the app registration on the subscription or resource group you want to monitor. Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current). |
|
||||
| Incorrect tenant ID, client ID, or client secret | Verify the credentials in the Azure Portal under **App registrations** > your app > **Overview** (for IDs) and **Certificates & secrets** (for secret). |
|
||||
| Client secret has expired | Create a new client secret in Azure and update the data source configuration. |
|
||||
| Managed Identity not enabled on the Azure resource | For VMs, enable managed identity in the Azure Portal under **Identity**. For App Service, enable it under **Identity** in the app settings. |
|
||||
| Managed Identity not assigned the Reader role | Assign the `Reader` role to the managed identity on the target subscription or resources. |
|
||||
|
||||
### "Invalid client secret" or "Client secret not found"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Authentication fails immediately after configuration
|
||||
- Error message references invalid credentials
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Ensure you copied the client secret **value**, not the secret ID. In Azure Portal under **Certificates & secrets**, the secret value is only shown once when created. The secret ID is a different identifier and won't work for authentication.
|
||||
2. Verify the client secret was copied correctly (no extra spaces or truncation).
|
||||
3. Check if the secret has expired in Azure Portal under **App registrations** > your app > **Certificates & secrets**.
|
||||
4. Create a new secret and update the data source configuration.
|
||||
|
||||
### "Tenant not found" or "Invalid tenant ID"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Data source test fails with tenant-related errors
|
||||
- Unable to authenticate
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the Directory (tenant) ID in Azure Portal under **Microsoft Entra ID** > **Overview**.
|
||||
2. Ensure you're using the correct Azure cloud setting (Azure, Azure Government, or Azure China).
|
||||
3. Check that the tenant ID is a valid GUID format.
|
||||
|
||||
### Managed Identity not working
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Managed Identity option is available but authentication fails
|
||||
- Error: "Managed identity authentication is not available"
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify `managed_identity_enabled = true` is set in the Grafana server configuration under `[azure]`.
|
||||
2. Confirm the Azure resource hosting Grafana has managed identity enabled.
|
||||
3. For user-assigned managed identity, ensure `managed_identity_client_id` is set correctly.
|
||||
4. Verify the managed identity has the `Reader` role on the target resources.
|
||||
5. Restart Grafana after changing server configuration.
|
||||
|
||||
### Workload Identity not working
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Workload Identity authentication fails in Kubernetes/AKS environment
|
||||
- Token file errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify `workload_identity_enabled = true` is set in the Grafana server configuration.
|
||||
2. Check that the service account is correctly annotated for workload identity.
|
||||
3. Verify the federated credential is configured in Azure.
|
||||
4. Ensure the token path is accessible to the Grafana pod.
|
||||
5. Check the workload identity webhook is running in the cluster.
|
||||
|
||||
## Query errors
|
||||
|
||||
These errors occur when executing queries against Azure Monitor services.
|
||||
|
||||
### "No data" or empty results
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query executes without error but returns no data
|
||||
- Charts show "No data" message
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Time range doesn't contain data | Expand the dashboard time range or verify data exists in Azure Portal. |
|
||||
| Wrong resource selected | Verify you've selected the correct subscription, resource group, and resource. |
|
||||
| Metric not available for resource | Not all metrics are available for all resources. Check available metrics in Azure Portal under the resource's **Metrics** blade. |
|
||||
| Metric has no values | Some metrics only populate under certain conditions (e.g., error counts when errors occur). |
|
||||
| Permissions issue | Verify the identity has read access to the specific resource. |
|
||||
|
||||
### "Bad request" or "Invalid query"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query fails with 400 error
|
||||
- Error message indicates query syntax issues
|
||||
|
||||
**Solutions for Logs queries:**
|
||||
|
||||
1. Validate your KQL syntax in the Azure Portal Log Analytics query editor.
|
||||
2. Check for typos in table names or column names.
|
||||
3. Ensure referenced tables exist in the selected workspace.
|
||||
4. Verify the time range is valid (not in the future, not too far in the past for data retention).
|
||||
|
||||
**Solutions for Metrics queries:**
|
||||
|
||||
1. Verify the metric name is valid for the selected resource type.
|
||||
2. Check that dimension filters use valid dimension names and values.
|
||||
3. Ensure the aggregation type is supported for the selected metric.
|
||||
|
||||
### "Resource not found"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query fails with 404 error
|
||||
- Resource picker shows resources that can't be queried
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the resource still exists in Azure (it may have been deleted or moved).
|
||||
2. Check that the subscription is correct.
|
||||
3. Refresh the resource picker by re-selecting the subscription.
|
||||
4. Verify the identity has access to the resource's resource group.
|
||||
|
||||
### Logs query timeout
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query runs for a long time then fails
|
||||
- Error mentions timeout or query limits
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Narrow the time range to reduce data volume.
|
||||
2. Add filters to reduce the result set.
|
||||
3. Use `summarize` to aggregate data instead of returning raw rows.
|
||||
4. Consider using Basic Logs for large datasets (if enabled).
|
||||
5. Break complex queries into smaller parts.
|
||||
|
||||
### "Metrics not available" for a resource
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Resource appears in picker but no metrics are listed
|
||||
- Metric dropdown is empty
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the resource type supports Azure Monitor metrics.
|
||||
2. Check if the resource is in a region that supports metrics.
|
||||
3. Some resources require diagnostic settings to emit metrics—configure these in Azure Portal.
|
||||
4. Try selecting a different namespace for the resource.
|
||||
|
||||
## Azure Resource Graph errors
|
||||
|
||||
These errors are specific to Azure Resource Graph (ARG) queries.
|
||||
|
||||
### "Query execution failed"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- ARG query fails with execution errors
|
||||
- Results don't match expected resources
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Validate query syntax in Azure Portal Resource Graph Explorer.
|
||||
2. Check that you have access to the subscriptions being queried.
|
||||
3. Verify table names are correct (e.g., `Resources`, `ResourceContainers`).
|
||||
4. Some ARG features require specific permissions, check [ARG documentation](https://docs.microsoft.com/en-us/azure/governance/resource-graph/).
|
||||
|
||||
### Query returns incomplete results
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Not all expected resources appear in results
|
||||
- Results seem truncated
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. ARG queries are paginated. The data source handles pagination automatically, but very large result sets may be limited.
|
||||
2. Add filters to reduce result set size.
|
||||
3. Verify you have access to all subscriptions containing the resources.
|
||||
|
||||
## Application Insights Traces errors
|
||||
|
||||
These errors are specific to the Traces query type.
|
||||
|
||||
### "No traces found"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Trace query returns empty results
|
||||
- Operation ID search finds nothing
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the Application Insights resource is collecting trace data.
|
||||
2. Check that the time range includes when the traces were generated.
|
||||
3. Ensure the Operation ID is correct (copy directly from another trace or log).
|
||||
4. Verify the identity has access to the Application Insights resource.
|
||||
|
||||
## Template variable errors
|
||||
|
||||
For detailed troubleshooting of template variables, refer to the [template variables troubleshooting section](ref:template-variables).
|
||||
|
||||
### Variables return no values
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the data source connection is working (test it in the data source settings).
|
||||
2. Check that parent variables (for cascading variables) have valid selections.
|
||||
3. Verify the identity has permissions to list the requested resources.
|
||||
4. For Logs variables, ensure the KQL query returns a single column.
|
||||
|
||||
### Variables are slow to load
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Set variable refresh to **On dashboard load** instead of **On time range change**.
|
||||
2. Reduce the scope of variable queries (e.g., filter by resource group instead of entire subscription).
|
||||
3. For Logs variables, optimize the KQL query to return results faster.
|
||||
|
||||
## Connection and network errors
|
||||
|
||||
These errors indicate problems with network connectivity between Grafana and Azure services.
|
||||
|
||||
### "Connection refused" or timeout errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Data source test fails with network errors
|
||||
- Queries timeout without returning results
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify network connectivity from Grafana to Azure endpoints.
|
||||
2. Check firewall rules allow outbound HTTPS (port 443) to Azure services.
|
||||
3. For private networks, ensure Private Link or VPN is configured correctly.
|
||||
4. For Grafana Cloud, configure [Private Data Source Connect](ref:configure-azure-monitor) if accessing private resources.
|
||||
|
||||
### SSL/TLS certificate errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Certificate validation failures
|
||||
- SSL handshake errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Ensure the system time is correct (certificate validation fails with incorrect time).
|
||||
2. Verify corporate proxy isn't intercepting HTTPS traffic.
|
||||
3. Check that required CA certificates are installed on the Grafana server.
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you've tried the solutions above and still encounter issues:
|
||||
|
||||
1. Check the [Grafana community forums](https://community.grafana.com/) for similar issues.
|
||||
1. Review the [Azure Monitor data source GitHub issues](https://github.com/grafana/grafana/issues) for known bugs.
|
||||
1. Enable debug logging in Grafana to capture detailed error information.
|
||||
1. Contact Grafana Support if you're an Enterprise, Cloud Pro or Cloud Contracted user.
|
||||
1. When reporting issues, include:
|
||||
- Grafana version
|
||||
- Error messages (redact sensitive information)
|
||||
- Steps to reproduce
|
||||
- Relevant configuration (redact credentials)
|
||||
@@ -52,6 +52,7 @@ The following documents will help you get started with the InfluxDB data source
|
||||
- [Configure the InfluxDB data source](./configure-influxdb-data-source/)
|
||||
- [InfluxDB query editor](./query-editor/)
|
||||
- [InfluxDB templates and variables](./template-variables/)
|
||||
- [Troubleshoot issues with the InfluxDB data source](./troubleshooting/)
|
||||
|
||||
Once you have configured the data source you can:
|
||||
|
||||
|
||||
291
docs/sources/datasources/influxdb/troubleshooting/index.md
Normal file
291
docs/sources/datasources/influxdb/troubleshooting/index.md
Normal file
@@ -0,0 +1,291 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/influxdb/troubleshooting/
|
||||
description: Troubleshooting the InfluxDB data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- influxdb
|
||||
- troubleshooting
|
||||
- errors
|
||||
- flux
|
||||
- influxql
|
||||
- sql
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshooting
|
||||
title: Troubleshoot issues with the InfluxDB data source
|
||||
weight: 600
|
||||
---
|
||||
|
||||
# Troubleshoot issues with the InfluxDB data source
|
||||
|
||||
This document provides troubleshooting information for common errors you may encounter when using the InfluxDB data source in Grafana.
|
||||
|
||||
## Connection errors
|
||||
|
||||
The following errors occur when Grafana cannot establish or maintain a connection to InfluxDB.
|
||||
|
||||
### Failed to connect to InfluxDB
|
||||
|
||||
**Error message:** "error performing influxQL query" or "error performing flux query" or "error performing sql query"
|
||||
|
||||
**Cause:** Grafana cannot establish a network connection to the InfluxDB server.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify that the InfluxDB URL is correct in the data source configuration.
|
||||
1. Check that InfluxDB is running and accessible from the Grafana server.
|
||||
1. Ensure the URL includes the protocol (`http://` or `https://`).
|
||||
1. Verify the port is correct (the InfluxDB default API port is `8086`).
|
||||
1. Ensure there are no firewall rules blocking the connection.
|
||||
1. For Grafana Cloud, ensure you have configured [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) if your InfluxDB instance is not publicly accessible.
|
||||
|
||||
### Request timed out
|
||||
|
||||
**Error message:** "context deadline exceeded" or "request timeout"
|
||||
|
||||
**Cause:** The connection to InfluxDB timed out before receiving a response.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the network latency between Grafana and InfluxDB.
|
||||
1. Verify that InfluxDB is not overloaded or experiencing performance issues.
|
||||
1. Increase the timeout setting in the data source configuration under **Advanced HTTP Settings**.
|
||||
1. Reduce the time range or complexity of your query.
|
||||
|
||||
## Authentication errors
|
||||
|
||||
The following errors occur when there are issues with authentication credentials or permissions.
|
||||
|
||||
### Unauthorized (401)
|
||||
|
||||
**Error message:** "401 Unauthorized" or "authorization failed"
|
||||
|
||||
**Cause:** The authentication credentials are invalid or missing.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify that the token or password is correct in the data source configuration.
|
||||
1. For Flux and SQL, ensure the token has not expired.
|
||||
1. For InfluxQL with InfluxDB 2.x, verify the token is set as an `Authorization` header with the value `Token <your-token>`.
|
||||
1. For InfluxDB 1.x, verify the username and password are correct.
|
||||
1. Check that the token has the required permissions to access the specified bucket or database.
|
||||
|
||||
### Forbidden (403)
|
||||
|
||||
**Error message:** "403 Forbidden" or "access denied"
|
||||
|
||||
**Cause:** The authenticated user or token does not have permission to access the requested resource.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the token has read access to the specified bucket or database.
|
||||
1. Check the token's permissions in the InfluxDB UI under **API Tokens**.
|
||||
1. Ensure the organization ID is correct for Flux queries.
|
||||
1. For InfluxQL with InfluxDB 2.x, verify the DBRP mapping is configured correctly.
|
||||
|
||||
## Configuration errors
|
||||
|
||||
The following errors occur when the data source is not configured correctly.
|
||||
|
||||
### Unknown influx version
|
||||
|
||||
**Error message:** "unknown influx version"
|
||||
|
||||
**Cause:** The query language is not properly configured in the data source settings.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Open the data source configuration in Grafana.
|
||||
1. Verify that a valid query language is selected: **Flux**, **InfluxQL**, or **SQL**.
|
||||
1. Ensure the selected query language matches your InfluxDB version:
|
||||
- Flux: InfluxDB 1.8+ and 2.x
|
||||
- InfluxQL: InfluxDB 1.x and 2.x (with DBRP mapping)
|
||||
- SQL: InfluxDB 3.x only
|
||||
|
||||
### Invalid data source info received
|
||||
|
||||
**Error message:** "invalid data source info received"
|
||||
|
||||
**Cause:** The data source configuration is incomplete or corrupted.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Delete and recreate the data source.
|
||||
1. Ensure all required fields are populated based on your query language:
|
||||
- **Flux:** URL, Organization, Token, Default Bucket
|
||||
- **InfluxQL:** URL, Database, User, Password
|
||||
- **SQL:** URL, Database, Token
|
||||
|
||||
### DBRP mapping required
|
||||
|
||||
**Error message:** "database not found" or queries return no data with InfluxQL on InfluxDB 2.x
|
||||
|
||||
**Cause:** InfluxQL queries on InfluxDB 2.x require a Database and Retention Policy (DBRP) mapping.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Create a DBRP mapping in InfluxDB using the CLI or API.
|
||||
1. Refer to [Manage DBRP Mappings](https://docs.influxdata.com/influxdb/cloud/query-data/influxql/dbrp/) for guidance.
|
||||
1. Verify the database name in Grafana matches the DBRP mapping.
|
||||
|
||||
## Query errors
|
||||
|
||||
The following errors occur when there are issues with query syntax or execution.
|
||||
|
||||
### Query syntax error
|
||||
|
||||
**Error message:** "error parsing query: found THING" or "failed to parse query: found WERE, expected ; at line 1, char 38"
|
||||
|
||||
**Cause:** The query contains invalid syntax.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check your query syntax for typos or invalid keywords.
|
||||
1. For InfluxQL, verify the query follows the correct syntax:
|
||||
|
||||
```sql
|
||||
SELECT <field> FROM <measurement> WHERE <condition>
|
||||
```
|
||||
|
||||
1. For Flux, ensure proper pipe-forward syntax and function calls.
|
||||
1. Use the InfluxDB UI or CLI to test your query directly.
|
||||
|
||||
### Query timeout limit exceeded
|
||||
|
||||
**Error message:** "query-timeout limit exceeded"
|
||||
|
||||
**Cause:** The query took longer than the configured timeout limit in InfluxDB.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Add more specific filters to limit the data scanned.
|
||||
1. Increase the query timeout setting in InfluxDB if you have admin access.
|
||||
1. Optimize your query to reduce complexity.
|
||||
|
||||
### Too many series or data points
|
||||
|
||||
**Error message:** "max-series-per-database limit exceeded" or "A query returned too many data points and the results have been truncated"
|
||||
|
||||
**Cause:** The query is returning more data than the configured limits allow.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Add filters to limit the number of series returned.
|
||||
1. Increase the **Max series** setting in the data source configuration under **Advanced Database Settings**.
|
||||
1. Use aggregation functions to reduce the number of data points.
|
||||
1. For Flux, use `aggregateWindow()` to downsample data.
|
||||
|
||||
### No time column found
|
||||
|
||||
**Error message:** "no time column found"
|
||||
|
||||
**Cause:** The query result does not include a time column, which is required for time series visualization.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Ensure your query includes a time field.
|
||||
1. For Flux, verify the query includes `_time` in the output.
|
||||
1. For SQL, ensure the query returns a timestamp column.
|
||||
1. Check that the time field is not being filtered out or excluded.
|
||||
|
||||
## Health check errors
|
||||
|
||||
The following errors occur when testing the data source connection.
|
||||
|
||||
### Error getting flux query buckets
|
||||
|
||||
**Error message:** "error getting flux query buckets"
|
||||
|
||||
**Cause:** The health check query `buckets()` failed to return results.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the token has permission to list buckets.
|
||||
1. Check that the organization ID is correct.
|
||||
1. Ensure InfluxDB is running and accessible.
|
||||
|
||||
### Error connecting InfluxDB influxQL
|
||||
|
||||
**Error message:** "error connecting InfluxDB influxQL"
|
||||
|
||||
**Cause:** The health check query `SHOW MEASUREMENTS` failed.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the database name is correct.
|
||||
1. Check that the user has permission to run `SHOW MEASUREMENTS`.
|
||||
1. Ensure the database exists and contains measurements.
|
||||
1. For InfluxDB 2.x, verify DBRP mapping is configured.
|
||||
|
||||
### 0 measurements found
|
||||
|
||||
**Error message:** "data source is working. 0 measurements found"
|
||||
|
||||
**Cause:** The connection is successful, but the database contains no measurements.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify you are connecting to the correct database.
|
||||
1. Check that data has been written to the database.
|
||||
1. If the database is new, add some test data to verify the connection.
|
||||
|
||||
## Other common issues
|
||||
|
||||
The following issues don't produce specific error messages but are commonly encountered.
|
||||
|
||||
### Empty query results
|
||||
|
||||
**Cause:** The query returns no data.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the time range includes data in your database.
|
||||
1. Check that the measurement and field names are correct.
|
||||
1. Test the query directly in the InfluxDB UI or CLI.
|
||||
1. Ensure filters are not excluding all data.
|
||||
1. For InfluxQL, verify the retention policy contains data for the selected time range.
|
||||
|
||||
### Slow query performance
|
||||
|
||||
**Cause:** Queries take a long time to execute.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Add more specific filters to limit the data scanned.
|
||||
1. Increase the **Min time interval** setting to reduce the number of data points.
|
||||
1. Check InfluxDB server performance and resource utilization.
|
||||
1. For Flux, use `aggregateWindow()` to downsample data before visualization.
|
||||
1. Consider using continuous queries or tasks to pre-aggregate data.
|
||||
|
||||
### Data appears delayed or missing recent points
|
||||
|
||||
**Cause:** The visualization doesn't show the most recent data.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the dashboard time range and refresh settings.
|
||||
1. Verify the **Min time interval** is not set too high.
|
||||
1. Ensure InfluxDB has finished writing the data.
|
||||
1. Check for clock synchronization issues between Grafana and InfluxDB.
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you continue to experience issues after following this troubleshooting guide:
|
||||
|
||||
1. Check the [InfluxDB documentation](https://docs.influxdata.com/) for API-specific guidance.
|
||||
1. Review the [Grafana community forums](https://community.grafana.com/) for similar issues.
|
||||
1. Contact Grafana Support if you're an Enterprise, Cloud Pro or Cloud Contracted user.
|
||||
1. When reporting issues, include:
|
||||
- Grafana version
|
||||
- InfluxDB version and product (OSS, Cloud, Enterprise)
|
||||
- Query language (Flux, InfluxQL, or SQL)
|
||||
- Error messages (redact sensitive information)
|
||||
- Steps to reproduce
|
||||
- Relevant configuration such as data source settings, HTTP method, and TLS settings (redact tokens, passwords, and other credentials)
|
||||
@@ -107,8 +107,8 @@ Here is an overview of version support through 2026:
|
||||
| 12.0.x | May 5, 2025 | February 5, 2026 | Patch Support |
|
||||
| 12.1.x | July 22, 2025 | April 22, 2026 | Patch Support |
|
||||
| 12.2.x | September 23, 2025 | June 23, 2026 | Patch Support |
|
||||
| 12.3.x | November 18, 2025 | August 18, 2026 | Yet to be released |
|
||||
| 12.4.x (Last minor of 12) | February 24, 2026 | November 24, 2026 | Yet to be released |
|
||||
| 12.3.x | November 19, 2025 | August 19, 2026 | Patch Support |
|
||||
| 12.4.x (Last minor of 12) | February 24, 2026 | May 24, 2027 | Yet to be released |
|
||||
| 13.0.0 | TBD | TBD | Yet to be released |
|
||||
|
||||
## How are these versions supported?
|
||||
|
||||
@@ -149,7 +149,10 @@ To add a new annotation query to a dashboard, follow these steps:
|
||||
You can also click **Open advanced data source picker** to see more options, including adding a data source (Admins only).
|
||||
|
||||
1. If you don't want to use the annotation query right away, clear the **Enabled** checkbox.
|
||||
1. If you don't want the annotation query toggle to be displayed in the dashboard, select the **Hidden** checkbox.
|
||||
1. Select one of the following options in the **Show annotation controls in** drop-down list to control where annotations are displayed:
|
||||
- **Above dashboard** - The annotation toggle is displayed above the dashboard. This is the default.
|
||||
- **Controls menu** - The annotation toggle is displayed in the dashboard controls menu instead of above the dashboard. The dashboard controls menu appears as a button in the dashboard toolbar.
|
||||
- **Hidden** - The annotation toggle is not displayed on the dashboard.
|
||||
1. Select a color for the event markers.
|
||||
1. In the **Show in** drop-down, choose one of the following options:
|
||||
- **All panels** - The annotations are displayed on all panels that support annotations.
|
||||
|
||||
@@ -245,11 +245,12 @@ To configure repeats, follow these steps:
|
||||
1. Click **Save**.
|
||||
1. Toggle off the edit mode switch.
|
||||
|
||||
### Repeating rows and the Dashboard special data source
|
||||
### Repeating rows and tabs and the Dashboard special data source
|
||||
|
||||
<!-- is this next section still true? -->
|
||||
|
||||
If a row includes panels using the special [Dashboard data source](ref:built-in-special-data-sources)—the data source that uses a result set from another panel in the same dashboard—then corresponding panels in repeated rows will reference the panel in the original row, not the ones in the repeated rows.
|
||||
The same behavior applies to tabs.
|
||||
|
||||
For example, in a dashboard:
|
||||
|
||||
|
||||
@@ -223,17 +223,25 @@ To export a dashboard in its current state as a PDF, follow these steps:
|
||||
|
||||
1. Click the **X** at the top-right corner to close the share drawer.
|
||||
|
||||
### Export a dashboard as JSON
|
||||
### Export a dashboard as code
|
||||
|
||||
Export a Grafana JSON file that contains everything you need, including layout, variables, styles, data sources, queries, and so on, so that you can later import the dashboard. To export a JSON file, follow these steps:
|
||||
|
||||
1. Click **Dashboards** in the main menu.
|
||||
1. Open the dashboard you want to export.
|
||||
1. Click the **Export** drop-down list in the top-right corner and select **Export as JSON**.
|
||||
1. Click the **Export** drop-down list in the top-right corner and select **Export as code**.
|
||||
|
||||
The **Export dashboard JSON** drawer opens.
|
||||
The **Export dashboard** drawer opens.
|
||||
|
||||
1. Select the dashboard JSON model that you to export:
|
||||
- **Classic** - Export dashboards created using the [current dashboard schema](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/visualizations/dashboards/build-dashboards/view-dashboard-json-model/).
|
||||
- **V1 Resource** - Export dashboards created using the [current dashboard schema](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/visualizations/dashboards/build-dashboards/view-dashboard-json-model/) wrapped in the `spec` property of the [V1 Kubernetes-style resource](https://play.grafana.org/swagger?api=dashboard.grafana.app-v2alpha1). Choose between **JSON** and **YAML** format.
|
||||
- **V2 Resource** - Export dashboards created using the [V2 Resource schema](https://play.grafana.org/swagger?api=dashboard.grafana.app-v2beta1). Choose between **JSON** and **YAML** format.
|
||||
|
||||
1. Do one of the following:
|
||||
- Toggle the **Export for sharing externally** switch to generate the JSON with a different data source UID.
|
||||
- Toggle the **Remove deployment details** switch to make the dashboard externally shareable.
|
||||
|
||||
1. Toggle the **Export the dashboard to use in another instance** switch to generate the JSON with a different data source UID.
|
||||
1. Click **Download file** or **Copy to clipboard**.
|
||||
1. Click the **X** at the top-right corner to close the share drawer.
|
||||
|
||||
|
||||
@@ -343,6 +343,33 @@ test.describe('Panels test: Table - Kitchen Sink', { tag: ['@panels', '@table']
|
||||
// TODO -- saving for another day.
|
||||
});
|
||||
|
||||
test('Tests nested table expansion', async ({ gotoDashboardPage, selectors, page }) => {
|
||||
const dashboardPage = await gotoDashboardPage({
|
||||
uid: DASHBOARD_UID,
|
||||
queryParams: new URLSearchParams({ editPanel: '4' }),
|
||||
});
|
||||
|
||||
await expect(
|
||||
dashboardPage.getByGrafanaSelector(selectors.components.Panels.Panel.title('Nested tables'))
|
||||
).toBeVisible();
|
||||
|
||||
await waitForTableLoad(page);
|
||||
|
||||
await expect(page.locator('[role="row"]')).toHaveCount(3); // header + 2 rows
|
||||
|
||||
const firstRowExpander = dashboardPage
|
||||
.getByGrafanaSelector(selectors.components.Panels.Visualization.TableNG.RowExpander)
|
||||
.first();
|
||||
|
||||
await firstRowExpander.click();
|
||||
await expect(page.locator('[role="row"]')).not.toHaveCount(3); // more rows are present now, it is dynamic tho.
|
||||
|
||||
// TODO: test sorting
|
||||
|
||||
await firstRowExpander.click();
|
||||
await expect(page.locator('[role="row"]')).toHaveCount(3); // back to original state
|
||||
});
|
||||
|
||||
test('Tests tooltip interactions', async ({ gotoDashboardPage, selectors }) => {
|
||||
const dashboardPage = await gotoDashboardPage({
|
||||
uid: DASHBOARD_UID,
|
||||
|
||||
@@ -804,11 +804,6 @@
|
||||
"count": 2
|
||||
}
|
||||
},
|
||||
"packages/grafana-ui/src/components/Table/TableNG/utils.ts": {
|
||||
"@typescript-eslint/consistent-type-assertions": {
|
||||
"count": 1
|
||||
}
|
||||
},
|
||||
"packages/grafana-ui/src/components/Table/TableRT/Filter.tsx": {
|
||||
"@typescript-eslint/no-explicit-any": {
|
||||
"count": 1
|
||||
@@ -1835,11 +1830,6 @@
|
||||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/features/dashboard-scene/inspect/InspectJsonTab.tsx": {
|
||||
"no-restricted-syntax": {
|
||||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/features/dashboard-scene/pages/DashboardScenePage.tsx": {
|
||||
"@typescript-eslint/consistent-type-assertions": {
|
||||
"count": 2
|
||||
|
||||
@@ -526,6 +526,8 @@ github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY=
|
||||
github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
|
||||
github.com/centrifugal/centrifuge v0.37.2/go.mod h1:aj4iRJGhzi3SlL8iUtVezxway1Xf8g+hmNQkLLO7sS8=
|
||||
github.com/centrifugal/protocol v0.16.2/go.mod h1:Q7OpS/8HMXDnL7f9DpNx24IhG96MP88WPpVTTCdrokI=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
|
||||
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
|
||||
@@ -1369,6 +1371,7 @@ github.com/rabbitmq/amqp091-go v1.9.0 h1:qrQtyzB4H8BQgEuJwhmVQqVHB9O4+MNDJCCAcpc
|
||||
github.com/rabbitmq/amqp091-go v1.9.0/go.mod h1:+jPrT9iY2eLjRaMSRHUhc3z14E/l85kv/f+6luSD3pc=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/redis/rueidis v1.0.64/go.mod h1:Lkhr2QTgcoYBhxARU7kJRO8SyVlgUuEkcJO1Y8MCluA=
|
||||
github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU=
|
||||
github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
|
||||
github.com/richardartoul/molecule v1.0.0 h1:+LFA9cT7fn8KF39zy4dhOnwcOwRoqKiBkPqKqya+8+U=
|
||||
|
||||
@@ -658,10 +658,6 @@ const injectedRtkApi = api
|
||||
query: (queryArg) => ({ url: `/dashboards/db`, method: 'POST', body: queryArg.saveDashboardCommand }),
|
||||
invalidatesTags: ['dashboards'],
|
||||
}),
|
||||
getHomeDashboard: build.query<GetHomeDashboardApiResponse, GetHomeDashboardApiArg>({
|
||||
query: () => ({ url: `/dashboards/home` }),
|
||||
providesTags: ['dashboards'],
|
||||
}),
|
||||
importDashboard: build.mutation<ImportDashboardApiResponse, ImportDashboardApiArg>({
|
||||
query: (queryArg) => ({ url: `/dashboards/import`, method: 'POST', body: queryArg.importDashboardRequest }),
|
||||
invalidatesTags: ['dashboards'],
|
||||
@@ -2574,8 +2570,6 @@ export type PostDashboardApiResponse = /** status 200 (empty) */ {
|
||||
export type PostDashboardApiArg = {
|
||||
saveDashboardCommand: SaveDashboardCommand;
|
||||
};
|
||||
export type GetHomeDashboardApiResponse = /** status 200 (empty) */ GetHomeDashboardResponse;
|
||||
export type GetHomeDashboardApiArg = void;
|
||||
export type ImportDashboardApiResponse =
|
||||
/** status 200 (empty) */ ImportDashboardResponseResponseObjectReturnedWhenImportingADashboard;
|
||||
export type ImportDashboardApiArg = {
|
||||
@@ -4399,51 +4393,6 @@ export type SaveDashboardCommand = {
|
||||
overwrite?: boolean;
|
||||
userId?: number;
|
||||
};
|
||||
export type AnnotationActions = {
|
||||
canAdd?: boolean;
|
||||
canDelete?: boolean;
|
||||
canEdit?: boolean;
|
||||
};
|
||||
export type AnnotationPermission = {
|
||||
dashboard?: AnnotationActions;
|
||||
organization?: AnnotationActions;
|
||||
};
|
||||
export type DashboardMeta = {
|
||||
annotationsPermissions?: AnnotationPermission;
|
||||
apiVersion?: string;
|
||||
canAdmin?: boolean;
|
||||
canDelete?: boolean;
|
||||
canEdit?: boolean;
|
||||
canSave?: boolean;
|
||||
canStar?: boolean;
|
||||
created?: string;
|
||||
createdBy?: string;
|
||||
expires?: string;
|
||||
/** Deprecated: use FolderUID instead */
|
||||
folderId?: number;
|
||||
folderTitle?: string;
|
||||
folderUid?: string;
|
||||
folderUrl?: string;
|
||||
hasAcl?: boolean;
|
||||
isFolder?: boolean;
|
||||
isSnapshot?: boolean;
|
||||
isStarred?: boolean;
|
||||
provisioned?: boolean;
|
||||
provisionedExternalId?: string;
|
||||
publicDashboardEnabled?: boolean;
|
||||
slug?: string;
|
||||
type?: string;
|
||||
updated?: string;
|
||||
updatedBy?: string;
|
||||
url?: string;
|
||||
version?: number;
|
||||
};
|
||||
export type GetHomeDashboardResponse = {
|
||||
dashboard?: Json;
|
||||
meta?: DashboardMeta;
|
||||
} & {
|
||||
redirectUri?: string;
|
||||
};
|
||||
export type ImportDashboardResponseResponseObjectReturnedWhenImportingADashboard = {
|
||||
dashboardId?: number;
|
||||
description?: string;
|
||||
@@ -4535,6 +4484,45 @@ export type PublicDashboardDto = {
|
||||
timeSelectionEnabled?: boolean;
|
||||
uid?: string;
|
||||
};
|
||||
export type AnnotationActions = {
|
||||
canAdd?: boolean;
|
||||
canDelete?: boolean;
|
||||
canEdit?: boolean;
|
||||
};
|
||||
export type AnnotationPermission = {
|
||||
dashboard?: AnnotationActions;
|
||||
organization?: AnnotationActions;
|
||||
};
|
||||
export type DashboardMeta = {
|
||||
annotationsPermissions?: AnnotationPermission;
|
||||
apiVersion?: string;
|
||||
canAdmin?: boolean;
|
||||
canDelete?: boolean;
|
||||
canEdit?: boolean;
|
||||
canSave?: boolean;
|
||||
canStar?: boolean;
|
||||
created?: string;
|
||||
createdBy?: string;
|
||||
expires?: string;
|
||||
/** Deprecated: use FolderUID instead */
|
||||
folderId?: number;
|
||||
folderTitle?: string;
|
||||
folderUid?: string;
|
||||
folderUrl?: string;
|
||||
hasAcl?: boolean;
|
||||
isFolder?: boolean;
|
||||
isSnapshot?: boolean;
|
||||
isStarred?: boolean;
|
||||
provisioned?: boolean;
|
||||
provisionedExternalId?: string;
|
||||
publicDashboardEnabled?: boolean;
|
||||
slug?: string;
|
||||
type?: string;
|
||||
updated?: string;
|
||||
updatedBy?: string;
|
||||
url?: string;
|
||||
version?: number;
|
||||
};
|
||||
export type DashboardFullWithMeta = {
|
||||
dashboard?: Json;
|
||||
meta?: DashboardMeta;
|
||||
@@ -6619,8 +6607,6 @@ export const {
|
||||
useSearchDashboardSnapshotsQuery,
|
||||
useLazySearchDashboardSnapshotsQuery,
|
||||
usePostDashboardMutation,
|
||||
useGetHomeDashboardQuery,
|
||||
useLazyGetHomeDashboardQuery,
|
||||
useImportDashboardMutation,
|
||||
useInterpolateDashboardMutation,
|
||||
useListPublicDashboardsQuery,
|
||||
|
||||
@@ -499,6 +499,9 @@ export const versionedComponents = {
|
||||
},
|
||||
},
|
||||
TableNG: {
|
||||
RowExpander: {
|
||||
'12.4.0': 'data-testid tableng row expander',
|
||||
},
|
||||
Filters: {
|
||||
HeaderButton: {
|
||||
'12.1.0': 'data-testid tableng header filter',
|
||||
|
||||
@@ -16,17 +16,22 @@ interface Props {
|
||||
title?: string;
|
||||
offset?: number;
|
||||
dragClass?: string;
|
||||
onDragStart?: (event: React.PointerEvent<HTMLDivElement>) => void;
|
||||
onOpenMenu?: () => void;
|
||||
}
|
||||
|
||||
export function HoverWidget({ menu, title, dragClass, children, offset = -32, onOpenMenu }: Props) {
|
||||
export function HoverWidget({ menu, title, dragClass, children, offset = -32, onOpenMenu, onDragStart }: Props) {
|
||||
const styles = useStyles2(getStyles);
|
||||
const draggableRef = useRef<HTMLDivElement>(null);
|
||||
const selectors = e2eSelectors.components.Panels.Panel.HoverWidget;
|
||||
// Capture the pointer to keep the widget visible while dragging
|
||||
const onPointerDown = useCallback((e: React.PointerEvent<HTMLDivElement>) => {
|
||||
draggableRef.current?.setPointerCapture(e.pointerId);
|
||||
}, []);
|
||||
const onPointerDown = useCallback(
|
||||
(e: React.PointerEvent<HTMLDivElement>) => {
|
||||
draggableRef.current?.setPointerCapture(e.pointerId);
|
||||
onDragStart?.(e);
|
||||
},
|
||||
[onDragStart]
|
||||
);
|
||||
|
||||
const onPointerUp = useCallback((e: React.PointerEvent<HTMLDivElement>) => {
|
||||
draggableRef.current?.releasePointerCapture(e.pointerId);
|
||||
|
||||
@@ -384,6 +384,7 @@ export function PanelChrome({
|
||||
menu={menu}
|
||||
title={typeof title === 'string' ? title : undefined}
|
||||
dragClass={dragClass}
|
||||
onDragStart={onDragStart}
|
||||
offset={hoverHeaderOffset}
|
||||
onOpenMenu={onOpenMenu}
|
||||
>
|
||||
|
||||
@@ -119,7 +119,14 @@ describe('Get y range', () => {
|
||||
values: [2, 1.999999999999999, 2.000000000000001, 2, 2],
|
||||
type: FieldType.number,
|
||||
config: {},
|
||||
state: { range: { min: 1.999999999999999, max: 2.000000000000001, delta: 0 } },
|
||||
state: { range: { min: 1.9999999999999999999, max: 2.000000000000000001, delta: 0 } },
|
||||
};
|
||||
const decimalsNotCloseYField: Field = {
|
||||
name: 'y',
|
||||
values: [2, 0.0094, 0.0053, 0.0078, 0.0061],
|
||||
type: FieldType.number,
|
||||
config: {},
|
||||
state: { range: { min: 0.0053, max: 0.0094, delta: 0.0041 } },
|
||||
};
|
||||
const xField: Field = {
|
||||
name: 'x',
|
||||
@@ -183,6 +190,11 @@ describe('Get y range', () => {
|
||||
field: decimalsCloseYField,
|
||||
expected: [2, 4],
|
||||
},
|
||||
{
|
||||
description: 'decimal values which are not close to equal should not be rounded out',
|
||||
field: decimalsNotCloseYField,
|
||||
expected: [0.0053, 0.0094],
|
||||
},
|
||||
])(`should return correct range for $description`, ({ field, expected }) => {
|
||||
const actual = getYRange(getAlignedFrame(field));
|
||||
expect(actual).toEqual(expected);
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
FieldType,
|
||||
getFieldColorModeForField,
|
||||
GrafanaTheme2,
|
||||
guessDecimals,
|
||||
isLikelyAscendingVector,
|
||||
nullToValue,
|
||||
roundDecimals,
|
||||
@@ -76,8 +77,6 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
|
||||
min = Math.min(min!, field.config.min ?? Infinity);
|
||||
max = Math.max(max!, field.config.max ?? -Infinity);
|
||||
|
||||
// console.log({ min, max });
|
||||
|
||||
// if noValue is set, ensure that it is included in the range as well
|
||||
const noValue = +field.config?.noValue!;
|
||||
if (!Number.isNaN(noValue)) {
|
||||
@@ -85,9 +84,11 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
|
||||
max = Math.max(max, noValue);
|
||||
}
|
||||
|
||||
const decimals = field.config.decimals ?? Math.max(guessDecimals(min), guessDecimals(max));
|
||||
|
||||
// call roundDecimals to mirror what is going to eventually happen in uplot
|
||||
let roundedMin = roundDecimals(min, field.config.decimals ?? 0);
|
||||
let roundedMax = roundDecimals(max, field.config.decimals ?? 0);
|
||||
let roundedMin = roundDecimals(min, decimals);
|
||||
let roundedMax = roundDecimals(max, decimals);
|
||||
|
||||
// if the rounded min and max are different,
|
||||
// we can return the real min and max.
|
||||
@@ -102,11 +103,9 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
|
||||
roundedMax = 1;
|
||||
} else if (roundedMin < 0) {
|
||||
// both are negative
|
||||
// max = 0;
|
||||
roundedMin *= 2;
|
||||
} else {
|
||||
// both are positive
|
||||
// min = 0;
|
||||
roundedMax *= 2;
|
||||
}
|
||||
|
||||
|
||||
@@ -154,8 +154,18 @@ export function TableNG(props: TableNGProps) {
|
||||
|
||||
const resizeHandler = useColumnResize(onColumnResize);
|
||||
|
||||
const rows = useMemo(() => frameToRecords(data), [data]);
|
||||
const hasNestedFrames = useMemo(() => getIsNestedTable(data.fields), [data]);
|
||||
const nestedFramesFieldName = useMemo(() => {
|
||||
if (!hasNestedFrames) {
|
||||
return;
|
||||
}
|
||||
const firstNestedField = data.fields.find((f) => f.type === FieldType.nestedFrames);
|
||||
if (!firstNestedField) {
|
||||
return;
|
||||
}
|
||||
return getDisplayName(firstNestedField);
|
||||
}, [data, hasNestedFrames]);
|
||||
const rows = useMemo(() => frameToRecords(data, nestedFramesFieldName), [data, nestedFramesFieldName]);
|
||||
const getTextColorForBackground = useMemo(() => memoize(_getTextColorForBackground, { maxSize: 1000 }), []);
|
||||
|
||||
const {
|
||||
@@ -374,7 +384,11 @@ export function TableNG(props: TableNGProps) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const expandedRecords = applySort(frameToRecords(nestedData), nestedData.fields, sortColumns);
|
||||
const expandedRecords = applySort(
|
||||
frameToRecords(nestedData, nestedFramesFieldName),
|
||||
nestedData.fields,
|
||||
sortColumns
|
||||
);
|
||||
if (!expandedRecords.length) {
|
||||
return (
|
||||
<div className={styles.noDataNested}>
|
||||
@@ -398,7 +412,7 @@ export function TableNG(props: TableNGProps) {
|
||||
width: COLUMN.EXPANDER_WIDTH,
|
||||
minWidth: COLUMN.EXPANDER_WIDTH,
|
||||
}),
|
||||
[commonDataGridProps, data.fields.length, expandedRows, sortColumns, styles]
|
||||
[commonDataGridProps, data.fields.length, expandedRows, sortColumns, styles, nestedFramesFieldName]
|
||||
);
|
||||
|
||||
const fromFields = useCallback(
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { css } from '@emotion/css';
|
||||
|
||||
import { GrafanaTheme2 } from '@grafana/data';
|
||||
import { selectors } from '@grafana/e2e-selectors';
|
||||
import { t } from '@grafana/i18n';
|
||||
|
||||
import { useStyles2 } from '../../../../themes/ThemeContext';
|
||||
@@ -16,13 +17,21 @@ export function RowExpander({ onCellExpand, isExpanded }: RowExpanderNGProps) {
|
||||
}
|
||||
}
|
||||
return (
|
||||
<div role="button" tabIndex={0} className={styles.expanderCell} onClick={onCellExpand} onKeyDown={handleKeyDown}>
|
||||
<div
|
||||
role="button"
|
||||
tabIndex={0}
|
||||
className={styles.expanderCell}
|
||||
onClick={onCellExpand}
|
||||
onKeyDown={handleKeyDown}
|
||||
data-testid={selectors.components.Panels.Visualization.TableNG.RowExpander}
|
||||
>
|
||||
<Icon
|
||||
aria-label={
|
||||
isExpanded
|
||||
? t('grafana-ui.row-expander-ng.aria-label-collapse', 'Collapse row')
|
||||
: t('grafana-ui.row-expander.aria-label-expand', 'Expand row')
|
||||
}
|
||||
aria-expanded={isExpanded}
|
||||
name={isExpanded ? 'angle-down' : 'angle-right'}
|
||||
size="lg"
|
||||
/>
|
||||
|
||||
@@ -79,7 +79,6 @@ export interface TableRow {
|
||||
|
||||
// Nested table properties
|
||||
data?: DataFrame;
|
||||
__nestedFrames?: DataFrame[];
|
||||
__expanded?: boolean; // For row expansion state
|
||||
|
||||
// Generic typing for column values
|
||||
@@ -262,7 +261,7 @@ export type TableCellStyles = (theme: GrafanaTheme2, options: TableCellStyleOpti
|
||||
export type Comparator = (a: TableCellValue, b: TableCellValue) => number;
|
||||
|
||||
// Type for converting a DataFrame into an array of TableRows
|
||||
export type FrameToRowsConverter = (frame: DataFrame) => TableRow[];
|
||||
export type FrameToRowsConverter = (frame: DataFrame, nestedFramesFieldName?: string) => TableRow[];
|
||||
|
||||
// Type for mapping column names to their field types
|
||||
export type ColumnTypes = Record<string, FieldType>;
|
||||
|
||||
@@ -675,10 +675,12 @@ export function applySort(
|
||||
/**
|
||||
* @internal
|
||||
*/
|
||||
export const frameToRecords = (frame: DataFrame): TableRow[] => {
|
||||
export const frameToRecords = (frame: DataFrame, nestedFramesFieldName?: string): TableRow[] => {
|
||||
const fnBody = `
|
||||
const rows = Array(frame.length);
|
||||
const values = frame.fields.map(f => f.values);
|
||||
const hasNestedFrames = '${nestedFramesFieldName ?? ''}'.length > 0;
|
||||
|
||||
let rowCount = 0;
|
||||
for (let i = 0; i < frame.length; i++) {
|
||||
rows[rowCount] = {
|
||||
@@ -686,11 +688,14 @@ export const frameToRecords = (frame: DataFrame): TableRow[] => {
|
||||
__index: i,
|
||||
${frame.fields.map((field, fieldIdx) => `${JSON.stringify(getDisplayName(field))}: values[${fieldIdx}][i]`).join(',')}
|
||||
};
|
||||
rowCount += 1;
|
||||
if (rows[rowCount-1]['__nestedFrames']){
|
||||
const childFrame = rows[rowCount-1]['__nestedFrames'];
|
||||
rows[rowCount] = {__depth: 1, __index: i, data: childFrame[0]}
|
||||
rowCount += 1;
|
||||
rowCount++;
|
||||
|
||||
if (hasNestedFrames) {
|
||||
const childFrame = rows[rowCount-1][${JSON.stringify(nestedFramesFieldName)}];
|
||||
if (childFrame){
|
||||
rows[rowCount] = {__depth: 1, __index: i, data: childFrame[0]}
|
||||
rowCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return rows;
|
||||
@@ -698,8 +703,9 @@ export const frameToRecords = (frame: DataFrame): TableRow[] => {
|
||||
|
||||
// Creates a function that converts a DataFrame into an array of TableRows
|
||||
// Uses new Function() for performance as it's faster than creating rows using loops
|
||||
const convert = new Function('frame', fnBody) as FrameToRowsConverter;
|
||||
return convert(frame);
|
||||
// eslint-disable-next-line @typescript-eslint/consistent-type-assertions
|
||||
const convert = new Function('frame', 'nestedFramesFieldName', fnBody) as FrameToRowsConverter;
|
||||
return convert(frame, nestedFramesFieldName);
|
||||
};
|
||||
|
||||
/* ----------------------------- Data grid comparator ---------------------------- */
|
||||
|
||||
@@ -493,7 +493,9 @@ func (hs *HTTPServer) postDashboard(c *contextmodel.ReqContext, cmd dashboards.S
|
||||
|
||||
// swagger:route GET /dashboards/home dashboards getHomeDashboard
|
||||
//
|
||||
// Get home dashboard.
|
||||
// NOTE: the home dashboard is configured in preferences. This API will be removed in G13
|
||||
//
|
||||
// Deprecated: true
|
||||
//
|
||||
// Responses:
|
||||
// 200: getHomeDashboardResponse
|
||||
|
||||
@@ -40,7 +40,7 @@ func NewResourcePermissionsAuthorizer(
|
||||
return &ResourcePermissionsAuthorizer{
|
||||
accessClient: accessClient,
|
||||
parentProvider: parentProvider,
|
||||
logger: log.New("iam.resource-permissions-authorizer"),
|
||||
logger: log.New("iam.authorizer.resource-permissions"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,8 +216,7 @@ func (r *ResourcePermissionsAuthorizer) FilterList(ctx context.Context, list run
|
||||
// Skip item on error fetching parent
|
||||
r.logger.Warn("filter list: error fetching parent, skipping item",
|
||||
"error", err.Error(),
|
||||
"namespace",
|
||||
item.Namespace,
|
||||
"namespace", item.Namespace,
|
||||
"group", target.ApiGroup,
|
||||
"resource", target.Resource,
|
||||
"name", target.Name,
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"k8s.io/kube-openapi/pkg/spec3"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
|
||||
"github.com/grafana/authlib/authn"
|
||||
"github.com/grafana/authlib/types"
|
||||
|
||||
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
|
||||
@@ -142,6 +143,8 @@ func NewAPIService(
|
||||
features featuremgmt.FeatureToggles,
|
||||
zClient zanzana.Client,
|
||||
reg prometheus.Registerer,
|
||||
tokenExchanger authn.TokenExchanger,
|
||||
authorizerDialConfigs map[schema.GroupResource]iamauthorizer.DialConfig,
|
||||
) *IdentityAccessManagementAPIBuilder {
|
||||
store := legacy.NewLegacySQLStores(dbProvider)
|
||||
resourcePermissionsStorage := resourcepermission.ProvideStorageBackend(dbProvider)
|
||||
@@ -150,9 +153,8 @@ func NewAPIService(
|
||||
resourceAuthorizer := gfauthorizer.NewResourceAuthorizer(accessClient)
|
||||
coreRoleAuthorizer := iamauthorizer.NewCoreRoleAuthorizer(accessClient)
|
||||
|
||||
// TODO: in a follow up PR, make this configurable
|
||||
resourceParentProvider := iamauthorizer.NewApiParentProvider(
|
||||
iamauthorizer.NewRemoteConfigProvider(map[schema.GroupResource]iamauthorizer.DialConfig{}, nil),
|
||||
iamauthorizer.NewRemoteConfigProvider(authorizerDialConfigs, tokenExchanger),
|
||||
iamauthorizer.Versions,
|
||||
)
|
||||
|
||||
|
||||
@@ -105,7 +105,8 @@ func (c *filesConnector) Connect(ctx context.Context, name string, opts runtime.
|
||||
return
|
||||
}
|
||||
folders := resources.NewFolderManager(readWriter, folderClient, resources.NewEmptyFolderTree())
|
||||
dualReadWriter := resources.NewDualReadWriter(readWriter, parser, folders, c.access)
|
||||
authorizer := resources.NewRepositoryAuthorizer(repo.Config(), c.access)
|
||||
dualReadWriter := resources.NewDualReadWriter(readWriter, parser, folders, authorizer)
|
||||
query := r.URL.Query()
|
||||
opts := resources.DualWriteOptions{
|
||||
Ref: query.Get("ref"),
|
||||
|
||||
@@ -154,9 +154,12 @@ func TestJobProgressRecorderWarningStatus(t *testing.T) {
|
||||
// Verify the final status includes warnings
|
||||
require.NotNil(t, finalStatus.Warnings)
|
||||
assert.Len(t, finalStatus.Warnings, 3)
|
||||
assert.Contains(t, finalStatus.Warnings[0], "deprecated API used")
|
||||
assert.Contains(t, finalStatus.Warnings[1], "missing optional field")
|
||||
assert.Contains(t, finalStatus.Warnings[2], "validation warning")
|
||||
expectedWarnings := []string{
|
||||
"deprecated API used (file: dashboards/test.json, name: test-resource, action: updated)",
|
||||
"missing optional field (file: dashboards/test2.json, name: test-resource-2, action: created)",
|
||||
"validation warning (file: datasources/test.yaml, name: test-resource-3, action: created)",
|
||||
}
|
||||
assert.ElementsMatch(t, finalStatus.Warnings, expectedWarnings)
|
||||
|
||||
// Verify the state is set to Warning
|
||||
assert.Equal(t, provisioning.JobStateWarning, finalStatus.State)
|
||||
|
||||
@@ -3,12 +3,13 @@ package resources
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
authlib "github.com/grafana/authlib/types"
|
||||
"github.com/grafana/grafana-app-sdk/logging"
|
||||
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/apps/provisioning/pkg/repository"
|
||||
@@ -20,18 +21,11 @@ import (
|
||||
|
||||
// DualReadWriter is a wrapper around a repository that can read from and write resources
|
||||
// into both the Git repository as well as in Grafana. It isn't a dual writer in the sense of what unistore handling calls dual writing.
|
||||
|
||||
// Standard provisioning Authorizer has already run by the time DualReadWriter is called
|
||||
// for incoming requests from actors, external or internal. However, since it is the files
|
||||
// connector that redirects here, the external resources such as dashboards
|
||||
// end up requiring additional authorization checks which the DualReadWriter performs here.
|
||||
|
||||
// TODO: it does not support folders yet
|
||||
type DualReadWriter struct {
|
||||
repo repository.ReaderWriter
|
||||
parser Parser
|
||||
folders *FolderManager
|
||||
access authlib.AccessChecker
|
||||
repo repository.ReaderWriter
|
||||
parser Parser
|
||||
folders *FolderManager
|
||||
authorizer Authorizer
|
||||
}
|
||||
|
||||
type DualWriteOptions struct {
|
||||
@@ -47,8 +41,8 @@ type DualWriteOptions struct {
|
||||
Branch string // Configured default branch
|
||||
}
|
||||
|
||||
func NewDualReadWriter(repo repository.ReaderWriter, parser Parser, folders *FolderManager, access authlib.AccessChecker) *DualReadWriter {
|
||||
return &DualReadWriter{repo: repo, parser: parser, folders: folders, access: access}
|
||||
func NewDualReadWriter(repo repository.ReaderWriter, parser Parser, folders *FolderManager, authorizer Authorizer) *DualReadWriter {
|
||||
return &DualReadWriter{repo: repo, parser: parser, folders: folders, authorizer: authorizer}
|
||||
}
|
||||
|
||||
func (r *DualReadWriter) Read(ctx context.Context, path string, ref string) (*ParsedResource, error) {
|
||||
@@ -76,8 +70,7 @@ func (r *DualReadWriter) Read(ctx context.Context, path string, ref string) (*Pa
|
||||
return nil, fmt.Errorf("error running dryRun: %w", err)
|
||||
}
|
||||
|
||||
// Authorize based on the existing resource
|
||||
if err = r.authorize(ctx, parsed, utils.VerbGet); err != nil {
|
||||
if err = r.authorizer.AuthorizeResource(ctx, parsed, utils.VerbGet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -85,7 +78,7 @@ func (r *DualReadWriter) Read(ctx context.Context, path string, ref string) (*Pa
|
||||
}
|
||||
|
||||
func (r *DualReadWriter) Delete(ctx context.Context, opts DualWriteOptions) (*ParsedResource, error) {
|
||||
if err := repository.IsWriteAllowed(r.repo.Config(), opts.Ref); err != nil {
|
||||
if err := r.authorizer.AuthorizeWrite(ctx, opts.Ref); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -111,7 +104,7 @@ func (r *DualReadWriter) Delete(ctx context.Context, opts DualWriteOptions) (*Pa
|
||||
return nil, fmt.Errorf("parse file: %w", err)
|
||||
}
|
||||
|
||||
if err = r.authorize(ctx, parsed, utils.VerbDelete); err != nil {
|
||||
if err = r.authorizer.AuthorizeResource(ctx, parsed, utils.VerbDelete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -143,7 +136,7 @@ func (r *DualReadWriter) Delete(ctx context.Context, opts DualWriteOptions) (*Pa
|
||||
// CreateFolder creates a new folder in the repository
|
||||
// FIXME: fix signature to return ParsedResource
|
||||
func (r *DualReadWriter) CreateFolder(ctx context.Context, opts DualWriteOptions) (*provisioning.ResourceWrapper, error) {
|
||||
if err := repository.IsWriteAllowed(r.repo.Config(), opts.Ref); err != nil {
|
||||
if err := r.authorizer.AuthorizeWrite(ctx, opts.Ref); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -151,9 +144,12 @@ func (r *DualReadWriter) CreateFolder(ctx context.Context, opts DualWriteOptions
|
||||
return nil, fmt.Errorf("not a folder path")
|
||||
}
|
||||
|
||||
if err := r.authorizeCreateFolder(ctx, opts.Path); err != nil {
|
||||
// For create operations, use empty name to check parent folder permissions
|
||||
folderParsed := folderParsedResource(opts.Path, opts.Ref, r.repo.Config(), "")
|
||||
if err := r.authorizer.AuthorizeResource(ctx, folderParsed, utils.VerbCreate); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: authorized to create folders under first existing ancestor folder
|
||||
|
||||
// Now actually create the folder
|
||||
if err := r.repo.Create(ctx, opts.Path, opts.Ref, nil, opts.Message); err != nil {
|
||||
@@ -201,17 +197,90 @@ func (r *DualReadWriter) CreateFolder(ctx context.Context, opts DualWriteOptions
|
||||
|
||||
// CreateResource creates a new resource in the repository
|
||||
func (r *DualReadWriter) CreateResource(ctx context.Context, opts DualWriteOptions) (*ParsedResource, error) {
|
||||
return r.createOrUpdate(ctx, true, opts)
|
||||
if err := r.authorizer.AuthorizeWrite(ctx, opts.Ref); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := &repository.FileInfo{
|
||||
Data: opts.Data,
|
||||
Path: opts.Path,
|
||||
Ref: opts.Ref,
|
||||
}
|
||||
|
||||
parsed, err := r.parser.Parse(ctx, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: check if the resource does not exist in the database.
|
||||
|
||||
// Make sure the value is valid
|
||||
if !opts.SkipDryRun {
|
||||
if err := parsed.DryRun(ctx); err != nil {
|
||||
logger := logging.FromContext(ctx).With("path", opts.Path, "name", parsed.Obj.GetName(), "ref", opts.Ref)
|
||||
logger.Warn("failed to dry run resource on create", "error", err)
|
||||
|
||||
return nil, fmt.Errorf("error running dryRun: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(parsed.Errors) > 0 {
|
||||
// Now returns BadRequest (400) for validation errors
|
||||
return nil, fmt.Errorf("errors while parsing file [%v]", parsed.Errors)
|
||||
}
|
||||
|
||||
// TODO: is this the right way?
|
||||
// Check if resource already exists - create should fail if it does
|
||||
if err = r.ensureExisting(ctx, parsed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if parsed.Existing != nil {
|
||||
return nil, apierrors.NewConflict(parsed.GVR.GroupResource(), parsed.Obj.GetName(),
|
||||
fmt.Errorf("resource already exists"))
|
||||
}
|
||||
|
||||
// Authorization check: Check if we can create the resource in the folder from the file
|
||||
if err = r.authorizer.AuthorizeResource(ctx, parsed, utils.VerbCreate); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: authorized to create folders under first existing ancestor folder
|
||||
|
||||
data, err := parsed.ToSaveBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Always use the provisioning identity when writing
|
||||
ctx, _, err = identity.WithProvisioningIdentity(ctx, parsed.Obj.GetNamespace())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to use provisioning identity %w", err)
|
||||
}
|
||||
|
||||
// TODO: handle the error repository.ErrFileAlreadyExists
|
||||
err = r.repo.Create(ctx, opts.Path, opts.Ref, data, opts.Message)
|
||||
if err != nil {
|
||||
return nil, err // raw error is useful
|
||||
}
|
||||
|
||||
// Directly update the grafana database
|
||||
// Behaves the same running sync after writing
|
||||
// FIXME: to make sure if behaves in the same way as in sync, we should
|
||||
// we should refactor the code to use the same function.
|
||||
if r.shouldUpdateGrafanaDB(opts, parsed) {
|
||||
if _, err := r.folders.EnsureFolderPathExist(ctx, opts.Path); err != nil {
|
||||
return nil, fmt.Errorf("ensure folder path exists: %w", err)
|
||||
}
|
||||
|
||||
err = parsed.Run(ctx)
|
||||
}
|
||||
|
||||
return parsed, err
|
||||
}
|
||||
|
||||
// UpdateResource updates a resource in the repository
|
||||
func (r *DualReadWriter) UpdateResource(ctx context.Context, opts DualWriteOptions) (*ParsedResource, error) {
|
||||
return r.createOrUpdate(ctx, false, opts)
|
||||
}
|
||||
|
||||
// Create or updates a resource in the repository
|
||||
func (r *DualReadWriter) createOrUpdate(ctx context.Context, create bool, opts DualWriteOptions) (*ParsedResource, error) {
|
||||
if err := repository.IsWriteAllowed(r.repo.Config(), opts.Ref); err != nil {
|
||||
if err := r.authorizer.AuthorizeWrite(ctx, opts.Ref); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -230,7 +299,7 @@ func (r *DualReadWriter) createOrUpdate(ctx context.Context, create bool, opts D
|
||||
if !opts.SkipDryRun {
|
||||
if err := parsed.DryRun(ctx); err != nil {
|
||||
logger := logging.FromContext(ctx).With("path", opts.Path, "name", parsed.Obj.GetName(), "ref", opts.Ref)
|
||||
logger.Warn("failed to dry run resource on create", "error", err)
|
||||
logger.Warn("failed to dry run resource on update", "error", err)
|
||||
|
||||
return nil, fmt.Errorf("error running dryRun: %w", err)
|
||||
}
|
||||
@@ -241,12 +310,15 @@ func (r *DualReadWriter) createOrUpdate(ctx context.Context, create bool, opts D
|
||||
return nil, fmt.Errorf("errors while parsing file [%v]", parsed.Errors)
|
||||
}
|
||||
|
||||
// Verify that we can create (or update) the referenced resource
|
||||
verb := utils.VerbUpdate
|
||||
if parsed.Action == provisioning.ResourceActionCreate {
|
||||
verb = utils.VerbCreate
|
||||
// Populate existing resource to check permissions in the correct folder
|
||||
if err = r.ensureExisting(ctx, parsed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = r.authorize(ctx, parsed, verb); err != nil {
|
||||
|
||||
// TODO: what to do with a name or kind change?
|
||||
|
||||
// Authorization check: Check if we can update the existing resource in its current folder
|
||||
if err = r.authorizer.AuthorizeResource(ctx, parsed, utils.VerbUpdate); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -261,12 +333,7 @@ func (r *DualReadWriter) createOrUpdate(ctx context.Context, create bool, opts D
|
||||
return nil, fmt.Errorf("unable to use provisioning identity %w", err)
|
||||
}
|
||||
|
||||
// Create or update
|
||||
if create {
|
||||
err = r.repo.Create(ctx, opts.Path, opts.Ref, data, opts.Message)
|
||||
} else {
|
||||
err = r.repo.Update(ctx, opts.Path, opts.Ref, data, opts.Message)
|
||||
}
|
||||
err = r.repo.Update(ctx, opts.Path, opts.Ref, data, opts.Message)
|
||||
if err != nil {
|
||||
return nil, err // raw error is useful
|
||||
}
|
||||
@@ -288,7 +355,7 @@ func (r *DualReadWriter) createOrUpdate(ctx context.Context, create bool, opts D
|
||||
|
||||
// MoveResource moves a resource from one path to another in the repository
|
||||
func (r *DualReadWriter) MoveResource(ctx context.Context, opts DualWriteOptions) (*ParsedResource, error) {
|
||||
if err := repository.IsWriteAllowed(r.repo.Config(), opts.Ref); err != nil {
|
||||
if err := r.authorizer.AuthorizeWrite(ctx, opts.Ref); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -315,7 +382,32 @@ func (r *DualReadWriter) MoveResource(ctx context.Context, opts DualWriteOptions
|
||||
}
|
||||
|
||||
func (r *DualReadWriter) moveDirectory(ctx context.Context, opts DualWriteOptions) (*ParsedResource, error) {
|
||||
// For directory moves, we just perform the repository move without parsing
|
||||
// Reject directory move operations for configured branch - use bulk operations instead
|
||||
if r.isConfiguredBranch(opts) {
|
||||
return nil, &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Code: http.StatusMethodNotAllowed,
|
||||
Reason: metav1.StatusReasonMethodNotAllowed,
|
||||
Message: "directory move operations are not available for configured branch. Use bulk move operations via the jobs API instead",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Check permissions to delete the original folder
|
||||
originalFolderID := ParseFolder(opts.OriginalPath, r.repo.Config().Name).ID
|
||||
originalFolderParsed := folderParsedResource(opts.OriginalPath, opts.Ref, r.repo.Config(), originalFolderID)
|
||||
if err := r.authorizer.AuthorizeResource(ctx, originalFolderParsed, utils.VerbDelete); err != nil {
|
||||
return nil, fmt.Errorf("not authorized to move from original folder: %w", err)
|
||||
}
|
||||
|
||||
// Check permissions to create at the new folder location (empty name for create)
|
||||
newFolderParsed := folderParsedResource(opts.Path, opts.Ref, r.repo.Config(), "")
|
||||
if err := r.authorizer.AuthorizeResource(ctx, newFolderParsed, utils.VerbCreate); err != nil {
|
||||
return nil, fmt.Errorf("not authorized to move to new folder: %w", err)
|
||||
}
|
||||
|
||||
// For branch operations, we just perform the repository move without updating Grafana DB
|
||||
// Always use the provisioning identity when writing
|
||||
ctx, _, err := identity.WithProvisioningIdentity(ctx, r.repo.Config().Namespace)
|
||||
if err != nil {
|
||||
@@ -349,35 +441,6 @@ func (r *DualReadWriter) moveDirectory(ctx context.Context, opts DualWriteOption
|
||||
},
|
||||
}
|
||||
|
||||
// Handle folder management for main branch
|
||||
if r.shouldUpdateGrafanaDB(opts, nil) {
|
||||
// Ensure destination folder path exists
|
||||
if _, err := r.folders.EnsureFolderPathExist(ctx, opts.Path); err != nil {
|
||||
return nil, fmt.Errorf("ensure destination folder path exists: %w", err)
|
||||
}
|
||||
|
||||
// Try to delete the old folder structure from grafana (if it exists)
|
||||
// This handles cleanup when folders are moved to new locations
|
||||
oldFolderName, err := r.folders.EnsureFolderPathExist(ctx, opts.OriginalPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ensure original folder path exists: %w", err)
|
||||
}
|
||||
|
||||
if oldFolderName != "" {
|
||||
oldFolder, err := r.folders.GetFolder(ctx, oldFolderName)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return nil, fmt.Errorf("get old folder for cleanup: %w", err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = r.folders.Client().Delete(ctx, oldFolder.GetName(), metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return nil, fmt.Errorf("delete old folder from storage: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
@@ -394,8 +457,13 @@ func (r *DualReadWriter) moveFile(ctx context.Context, opts DualWriteOptions) (*
|
||||
return nil, fmt.Errorf("parse original file: %w", err)
|
||||
}
|
||||
|
||||
// Authorize delete on the original path
|
||||
if err = r.authorize(ctx, parsed, utils.VerbDelete); err != nil {
|
||||
// Populate existing resource to check delete permission in the correct folder
|
||||
if err = r.ensureExisting(ctx, parsed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Authorize delete on the original path (checks existing resource's folder if it exists)
|
||||
if err = r.authorizer.AuthorizeResource(ctx, parsed, utils.VerbDelete); err != nil {
|
||||
return nil, fmt.Errorf("not authorized to delete original file: %w", err)
|
||||
}
|
||||
|
||||
@@ -433,13 +501,20 @@ func (r *DualReadWriter) moveFile(ctx context.Context, opts DualWriteOptions) (*
|
||||
return nil, fmt.Errorf("errors while parsing moved file [%v]", newParsed.Errors)
|
||||
}
|
||||
|
||||
// Authorize create on the new path
|
||||
verb := utils.VerbCreate
|
||||
if newParsed.Action == provisioning.ResourceActionUpdate {
|
||||
verb = utils.VerbUpdate
|
||||
// Populate existing resource at destination to check if we're overwriting something
|
||||
if err = r.ensureExisting(ctx, newParsed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = r.authorize(ctx, newParsed, verb); err != nil {
|
||||
return nil, fmt.Errorf("not authorized to create new file: %w", err)
|
||||
|
||||
// Authorize for the target resource
|
||||
// - If resource exists at destination: Check if we can update it in its folder
|
||||
// - If no resource at destination: Check if we can create in the new folder
|
||||
verb := utils.VerbUpdate
|
||||
if newParsed.Existing == nil {
|
||||
verb = utils.VerbCreate
|
||||
}
|
||||
if err = r.authorizer.AuthorizeResource(ctx, newParsed, verb); err != nil {
|
||||
return nil, fmt.Errorf("not authorized for destination: %w", err)
|
||||
}
|
||||
|
||||
data, err := newParsed.ToSaveBytes()
|
||||
@@ -497,95 +572,51 @@ func (r *DualReadWriter) moveFile(ctx context.Context, opts DualWriteOptions) (*
|
||||
return newParsed, nil
|
||||
}
|
||||
|
||||
func (r *DualReadWriter) authorize(ctx context.Context, parsed *ParsedResource, verb string) error {
|
||||
id, err := identity.GetRequester(ctx)
|
||||
// ensureExisting populates parsed.Existing if a resource with the given name exists in storage.
|
||||
// Returns nil if no resource exists, if Client is nil, or if Existing is already populated.
|
||||
// This is used before authorization checks to ensure we validate permissions against the actual
|
||||
// existing resource's folder, not just the folder specified in the file.
|
||||
func (r *DualReadWriter) ensureExisting(ctx context.Context, parsed *ParsedResource) error {
|
||||
if parsed.Client == nil || parsed.Existing != nil {
|
||||
return nil // Already populated or can't check
|
||||
}
|
||||
|
||||
existing, err := parsed.Client.Get(ctx, parsed.Obj.GetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return apierrors.NewUnauthorized(err.Error())
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil // No existing resource
|
||||
}
|
||||
return fmt.Errorf("failed to check for existing resource: %w", err)
|
||||
}
|
||||
|
||||
var name string
|
||||
if parsed.Existing != nil {
|
||||
name = parsed.Existing.GetName()
|
||||
} else {
|
||||
name = parsed.Obj.GetName()
|
||||
}
|
||||
|
||||
rsp, err := r.access.Check(ctx, id, authlib.CheckRequest{
|
||||
Group: parsed.GVR.Group,
|
||||
Resource: parsed.GVR.Resource,
|
||||
Namespace: id.GetNamespace(),
|
||||
Name: name,
|
||||
Verb: verb,
|
||||
}, parsed.Meta.GetFolder())
|
||||
if err != nil || !rsp.Allowed {
|
||||
return apierrors.NewForbidden(parsed.GVR.GroupResource(), parsed.Obj.GetName(),
|
||||
fmt.Errorf("no access to read the embedded file"))
|
||||
}
|
||||
|
||||
idType, _, err := authlib.ParseTypeID(id.GetID())
|
||||
if err != nil {
|
||||
return apierrors.NewForbidden(parsed.GVR.GroupResource(), parsed.Obj.GetName(), fmt.Errorf("could not determine identity type to check access"))
|
||||
}
|
||||
// only apply role based access if identity is not of type access policy
|
||||
if idType == authlib.TypeAccessPolicy || id.GetOrgRole().Includes(identity.RoleEditor) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return apierrors.NewForbidden(parsed.GVR.GroupResource(), parsed.Obj.GetName(),
|
||||
fmt.Errorf("must be admin or editor to access files from provisioning"))
|
||||
}
|
||||
|
||||
func (r *DualReadWriter) authorizeCreateFolder(ctx context.Context, _ string) error {
|
||||
id, err := identity.GetRequester(ctx)
|
||||
if err != nil {
|
||||
return apierrors.NewUnauthorized(err.Error())
|
||||
}
|
||||
|
||||
// Simple role based access for now
|
||||
if id.GetOrgRole().Includes(identity.RoleEditor) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return apierrors.NewForbidden(FolderResource.GroupResource(), "",
|
||||
fmt.Errorf("must be admin or editor to access folders with provisioning"))
|
||||
parsed.Existing = existing
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *DualReadWriter) deleteFolder(ctx context.Context, opts DualWriteOptions) (*ParsedResource, error) {
|
||||
// if the ref is set, it is not the active branch, so just delete the files from the branch
|
||||
// and do not delete the items from grafana itself
|
||||
if !r.shouldUpdateGrafanaDB(opts, nil) {
|
||||
err := r.repo.Delete(ctx, opts.Path, opts.Ref, opts.Message)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error deleting folder from repository: %w", err)
|
||||
// Reject directory delete operations for configured branch - use bulk operations instead
|
||||
if r.isConfiguredBranch(opts) {
|
||||
return nil, &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Code: http.StatusMethodNotAllowed,
|
||||
Reason: metav1.StatusReasonMethodNotAllowed,
|
||||
Message: "directory delete operations are not available for configured branch. Use bulk delete operations via the jobs API instead",
|
||||
},
|
||||
}
|
||||
|
||||
return folderDeleteResponse(ctx, opts.Path, opts.Ref, r.repo)
|
||||
}
|
||||
|
||||
// before deleting from the repo, first get all children resources to delete from grafana afterwards
|
||||
treeEntries, err := r.repo.ReadTree(ctx, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read repository tree: %w", err)
|
||||
}
|
||||
// note: parsedFolders will include the folder itself
|
||||
parsedResources, parsedFolders, err := r.getChildren(ctx, opts.Path, treeEntries)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse resources in folder: %w", err)
|
||||
}
|
||||
|
||||
// delete from the repo
|
||||
err = r.repo.Delete(ctx, opts.Path, opts.Ref, opts.Message)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("delete folder from repository: %w", err)
|
||||
}
|
||||
|
||||
// delete from grafana
|
||||
ctx, _, err = identity.WithProvisioningIdentity(ctx, r.repo.Config().Namespace)
|
||||
if err != nil {
|
||||
// Check permissions to delete the folder
|
||||
folderID := ParseFolder(opts.Path, r.repo.Config().Name).ID
|
||||
folderParsed := folderParsedResource(opts.Path, opts.Ref, r.repo.Config(), folderID)
|
||||
if err := r.authorizer.AuthorizeResource(ctx, folderParsed, utils.VerbDelete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := r.deleteChildren(ctx, parsedResources, parsedFolders); err != nil {
|
||||
return nil, fmt.Errorf("delete folder from grafana: %w", err)
|
||||
|
||||
// For branch operations, just delete from the repository without updating Grafana DB
|
||||
err := r.repo.Delete(ctx, opts.Path, opts.Ref, opts.Message)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error deleting folder from repository: %w", err)
|
||||
}
|
||||
|
||||
return folderDeleteResponse(ctx, opts.Path, opts.Ref, r.repo)
|
||||
@@ -610,6 +641,54 @@ func getPathType(isDir bool) string {
|
||||
return "file (no trailing '/')"
|
||||
}
|
||||
|
||||
// folderParsedResource creates a ParsedResource for a folder path.
|
||||
// This is used for authorization checks on folder operations.
|
||||
// For create operations, name should be empty string to check parent permissions.
|
||||
// For other operations, name should be the folder ID derived from the path.
|
||||
func folderParsedResource(path, ref string, repo *provisioning.Repository, name string) *ParsedResource {
|
||||
folderObj := &unstructured.Unstructured{}
|
||||
folderObj.SetName(name)
|
||||
folderObj.SetNamespace(repo.Namespace)
|
||||
|
||||
// TODO: which parent? top existing ancestor.
|
||||
|
||||
meta, _ := utils.MetaAccessor(folderObj)
|
||||
if meta != nil {
|
||||
// Set parent folder for folder operations
|
||||
parentFolder := ""
|
||||
if path != "" {
|
||||
parentPath := safepath.Dir(path)
|
||||
if parentPath != "" {
|
||||
parentFolder = ParseFolder(parentPath, repo.Name).ID
|
||||
} else {
|
||||
parentFolder = RootFolder(repo)
|
||||
}
|
||||
}
|
||||
meta.SetFolder(parentFolder)
|
||||
}
|
||||
|
||||
return &ParsedResource{
|
||||
Info: &repository.FileInfo{
|
||||
Path: path,
|
||||
Ref: ref,
|
||||
},
|
||||
Obj: folderObj,
|
||||
Meta: meta,
|
||||
GVK: schema.GroupVersionKind{
|
||||
Group: FolderResource.Group,
|
||||
Version: FolderResource.Version,
|
||||
Kind: "Folder",
|
||||
},
|
||||
GVR: FolderResource,
|
||||
Repo: provisioning.ResourceRepositoryInfo{
|
||||
Type: repo.Spec.Type,
|
||||
Namespace: repo.Namespace,
|
||||
Name: repo.Name,
|
||||
Title: repo.Spec.Title,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func folderDeleteResponse(ctx context.Context, path, ref string, repo repository.Repository) (*ParsedResource, error) {
|
||||
urls, err := getFolderURLs(ctx, path, ref, repo)
|
||||
if err != nil {
|
||||
@@ -640,60 +719,11 @@ func folderDeleteResponse(ctx context.Context, path, ref string, repo repository
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func (r *DualReadWriter) getChildren(ctx context.Context, folderPath string, treeEntries []repository.FileTreeEntry) ([]*ParsedResource, []Folder, error) {
|
||||
var resourcesInFolder []repository.FileTreeEntry
|
||||
var foldersInFolder []Folder
|
||||
for _, entry := range treeEntries {
|
||||
// make sure the path is supported (i.e. not ignored by git sync) and that the path is the folder itself or a child of the folder
|
||||
if IsPathSupported(entry.Path) != nil || !safepath.InDir(entry.Path, folderPath) {
|
||||
continue
|
||||
}
|
||||
// folders cannot be parsed as resources, so handle them separately
|
||||
if entry.Blob {
|
||||
resourcesInFolder = append(resourcesInFolder, entry)
|
||||
} else {
|
||||
folder := ParseFolder(entry.Path, r.repo.Config().Name)
|
||||
foldersInFolder = append(foldersInFolder, folder)
|
||||
}
|
||||
}
|
||||
|
||||
parsedResources := make([]*ParsedResource, len(resourcesInFolder))
|
||||
for i, entry := range resourcesInFolder {
|
||||
fileInfo, err := r.repo.Read(ctx, entry.Path, "")
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return nil, nil, fmt.Errorf("could not find resource in repository: %w", err)
|
||||
}
|
||||
|
||||
parsed, err := r.parser.Parse(ctx, fileInfo)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not parse resource: %w", err)
|
||||
}
|
||||
|
||||
parsedResources[i] = parsed
|
||||
}
|
||||
|
||||
return parsedResources, foldersInFolder, nil
|
||||
}
|
||||
|
||||
func (r *DualReadWriter) deleteChildren(ctx context.Context, childrenResources []*ParsedResource, folders []Folder) error {
|
||||
for _, parsed := range childrenResources {
|
||||
err := parsed.Client.Delete(ctx, parsed.Obj.GetName(), metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("failed to delete nested resource from grafana: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// we need to delete the folders furthest down in the tree first, as folder deletion will fail if there is anything inside of it
|
||||
safepath.SortByDepth(folders, func(f Folder) string { return f.Path }, false)
|
||||
|
||||
for _, f := range folders {
|
||||
err := r.folders.Client().Delete(ctx, f.ID, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete folder from grafana: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
// isConfiguredBranch returns true if the ref targets the configured branch
|
||||
// (empty ref means configured branch, or ref explicitly matches configured branch)
|
||||
func (r *DualReadWriter) isConfiguredBranch(opts DualWriteOptions) bool {
|
||||
configuredBranch := r.repo.Config().Branch()
|
||||
return opts.Ref == "" || opts.Ref == configuredBranch
|
||||
}
|
||||
|
||||
// shouldUpdateGrafanaDB returns true if we have an empty ref (targeting the configured branch)
|
||||
@@ -703,9 +733,5 @@ func (r *DualReadWriter) shouldUpdateGrafanaDB(opts DualWriteOptions, parsed *Pa
|
||||
return false
|
||||
}
|
||||
|
||||
if opts.Ref != "" && opts.Ref != opts.Branch {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return r.isConfiguredBranch(opts)
|
||||
}
|
||||
|
||||
@@ -77,6 +77,10 @@ var (
|
||||
"user.sync.user-externalUID-mismatch",
|
||||
errutil.WithPublicMessage("User externalUID mismatch"),
|
||||
)
|
||||
errSCIMAuthModuleMismatch = errutil.Unauthorized(
|
||||
"user.sync.scim-auth-module-mismatch",
|
||||
errutil.WithPublicMessage("User was provisioned via SCIM and must login via SAML"),
|
||||
)
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -308,6 +312,21 @@ func (s *UserSync) SyncUserHook(ctx context.Context, id *authn.Identity, _ *auth
|
||||
// just try to fetch the user one more to make the other request work.
|
||||
if errors.Is(err, user.ErrUserAlreadyExists) {
|
||||
usr, _, err = s.getUser(ctx, id)
|
||||
|
||||
// Check if this is a SCIM-provisioned user trying to login via an auth module that is not SAML or GCOM
|
||||
if err == nil && usr != nil && usr.IsProvisioned && id.AuthenticatedBy != login.GrafanaComAuthModule {
|
||||
_, authErr := s.authInfoService.GetAuthInfo(ctx, &login.GetAuthInfoQuery{
|
||||
UserId: usr.ID,
|
||||
AuthModule: id.AuthenticatedBy,
|
||||
})
|
||||
if errors.Is(authErr, user.ErrUserNotFound) {
|
||||
s.log.FromContext(ctx).Error("SCIM-provisioned user attempted login via non-SAML auth module",
|
||||
"user_id", usr.ID,
|
||||
"attempted_module", id.AuthenticatedBy,
|
||||
)
|
||||
return errSCIMAuthModuleMismatch.Errorf("user was provisioned via SCIM but attempted login via %s", id.AuthenticatedBy)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -1926,3 +1926,100 @@ func TestUserSync_SCIMLoginUsageStatSet(t *testing.T) {
|
||||
finalCount := finalStats["stats.features.scim.has_successful_login.count"].(int)
|
||||
require.Equal(t, int(1), finalCount)
|
||||
}
|
||||
|
||||
func TestUserSync_SyncUserHook_SCIMAuthModuleMismatch(t *testing.T) {
|
||||
userSrv := usertest.NewMockService(t)
|
||||
authInfoSrv := authinfotest.NewMockAuthInfoService(t)
|
||||
|
||||
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(nil, user.ErrUserNotFound).Once()
|
||||
|
||||
userSrv.On("Create", mock.Anything, mock.Anything).Return(nil, user.ErrUserAlreadyExists).Once()
|
||||
|
||||
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(&user.User{
|
||||
ID: 1,
|
||||
Email: "test@test.com",
|
||||
IsProvisioned: true,
|
||||
}, nil).Once()
|
||||
|
||||
authInfoSrv.On("GetAuthInfo", mock.Anything, mock.MatchedBy(func(q *login.GetAuthInfoQuery) bool {
|
||||
return q.AuthModule == "oauth_azuread"
|
||||
})).Return(nil, user.ErrUserNotFound).Once()
|
||||
|
||||
s := ProvideUserSync(
|
||||
userSrv,
|
||||
authinfoimpl.ProvideOSSUserProtectionService(),
|
||||
authInfoSrv,
|
||||
"atest.FakeQuotaService{},
|
||||
tracing.NewNoopTracerService(),
|
||||
featuremgmt.WithFeatures(),
|
||||
setting.NewCfg(),
|
||||
nil,
|
||||
)
|
||||
|
||||
email := "test@test.com"
|
||||
|
||||
err := s.SyncUserHook(context.Background(), &authn.Identity{
|
||||
AuthenticatedBy: "oauth_azuread",
|
||||
ClientParams: authn.ClientParams{
|
||||
SyncUser: true,
|
||||
AllowSignUp: true,
|
||||
LookUpParams: login.UserLookupParams{
|
||||
Email: &email,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
require.Error(t, err)
|
||||
assert.ErrorIs(t, err, errSCIMAuthModuleMismatch)
|
||||
assert.Contains(t, err.Error(), "SCIM")
|
||||
assert.Contains(t, err.Error(), "oauth_azuread")
|
||||
}
|
||||
|
||||
func TestUserSync_SyncUserHook_SCIMUserAllowsGCOMLogin(t *testing.T) {
|
||||
userSrv := usertest.NewMockService(t)
|
||||
authInfoSrv := authinfotest.NewMockAuthInfoService(t)
|
||||
|
||||
authInfoSrv.On("GetAuthInfo", mock.Anything, mock.MatchedBy(func(q *login.GetAuthInfoQuery) bool {
|
||||
return q.AuthModule == login.GrafanaComAuthModule && q.AuthId == "gcom-user-123"
|
||||
})).Return(nil, user.ErrUserNotFound).Once()
|
||||
|
||||
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(nil, user.ErrUserNotFound).Once()
|
||||
userSrv.On("Create", mock.Anything, mock.Anything).Return(nil, user.ErrUserAlreadyExists).Once()
|
||||
|
||||
authInfoSrv.On("GetAuthInfo", mock.Anything, mock.MatchedBy(func(q *login.GetAuthInfoQuery) bool {
|
||||
return q.AuthModule == login.GrafanaComAuthModule && q.AuthId == "gcom-user-123"
|
||||
})).Return(nil, user.ErrUserNotFound).Once()
|
||||
|
||||
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(&user.User{
|
||||
ID: 1,
|
||||
Email: "test@test.com",
|
||||
IsProvisioned: true,
|
||||
}, nil).Once()
|
||||
|
||||
s := ProvideUserSync(
|
||||
userSrv,
|
||||
authinfoimpl.ProvideOSSUserProtectionService(),
|
||||
authInfoSrv,
|
||||
"atest.FakeQuotaService{},
|
||||
tracing.NewNoopTracerService(),
|
||||
featuremgmt.WithFeatures(),
|
||||
setting.NewCfg(),
|
||||
nil,
|
||||
)
|
||||
|
||||
email := "test@test.com"
|
||||
|
||||
err := s.SyncUserHook(context.Background(), &authn.Identity{
|
||||
AuthenticatedBy: login.GrafanaComAuthModule,
|
||||
AuthID: "gcom-user-123",
|
||||
ClientParams: authn.ClientParams{
|
||||
SyncUser: true,
|
||||
AllowSignUp: true,
|
||||
LookUpParams: login.UserLookupParams{
|
||||
Email: &email,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,12 @@ import (
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
|
||||
authzv1 "github.com/grafana/authlib/authz/proto/v1"
|
||||
|
||||
dashboardV1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1"
|
||||
folders "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1"
|
||||
iamv0alpha1 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/services/accesscontrol"
|
||||
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
)
|
||||
|
||||
@@ -44,7 +48,8 @@ func getTypeInfo(group, resource string) (typeInfo, bool) {
|
||||
|
||||
func NewResourceInfoFromCheck(r *authzv1.CheckRequest) ResourceInfo {
|
||||
typ, relations := getTypeAndRelations(r.GetGroup(), r.GetResource())
|
||||
return newResource(
|
||||
|
||||
resource := newResource(
|
||||
typ,
|
||||
r.GetGroup(),
|
||||
r.GetResource(),
|
||||
@@ -53,6 +58,19 @@ func NewResourceInfoFromCheck(r *authzv1.CheckRequest) ResourceInfo {
|
||||
r.GetSubresource(),
|
||||
relations,
|
||||
)
|
||||
|
||||
// Special case for creating folders and resources in the root folder
|
||||
if r.GetVerb() == utils.VerbCreate {
|
||||
if resource.IsFolderResource() && resource.name == "" {
|
||||
resource.name = accesscontrol.GeneralFolderUID
|
||||
} else if resource.HasFolderSupport() && resource.folder == "" {
|
||||
resource.folder = accesscontrol.GeneralFolderUID
|
||||
}
|
||||
|
||||
return resource
|
||||
}
|
||||
|
||||
return resource
|
||||
}
|
||||
|
||||
func NewResourceInfoFromBatchItem(i *authzextv1.BatchCheckItem) ResourceInfo {
|
||||
@@ -164,3 +182,15 @@ func (r ResourceInfo) IsValidRelation(relation string) bool {
|
||||
func (r ResourceInfo) HasSubresource() bool {
|
||||
return r.subresource != ""
|
||||
}
|
||||
|
||||
var resourcesWithFolderSupport = map[string]bool{
|
||||
dashboardV1.DashboardResourceInfo.GroupResource().Group: true,
|
||||
}
|
||||
|
||||
func (r ResourceInfo) HasFolderSupport() bool {
|
||||
return resourcesWithFolderSupport[r.group]
|
||||
}
|
||||
|
||||
func (r ResourceInfo) IsFolderResource() bool {
|
||||
return r.group == folders.FolderResourceInfo.GroupResource().Group
|
||||
}
|
||||
|
||||
@@ -228,6 +228,9 @@ func TranslateToResourceTuple(subject string, action, kind, name string) (*openf
|
||||
}
|
||||
|
||||
if name == "*" {
|
||||
if m.group != "" && m.resource != "" {
|
||||
return NewGroupResourceTuple(subject, m.relation, m.group, m.resource, m.subresource), true
|
||||
}
|
||||
return NewGroupResourceTuple(subject, m.relation, translation.group, translation.resource, m.subresource), true
|
||||
}
|
||||
|
||||
|
||||
89
pkg/services/authz/zanzana/common/tuple_test.go
Normal file
89
pkg/services/authz/zanzana/common/tuple_test.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
openfgav1 "github.com/openfga/api/proto/openfga/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
)
|
||||
|
||||
type translationTestCase struct {
|
||||
testName string
|
||||
subject string
|
||||
action string
|
||||
kind string
|
||||
name string
|
||||
expected *openfgav1.TupleKey
|
||||
}
|
||||
|
||||
func TestTranslateToResourceTuple(t *testing.T) {
|
||||
tests := []translationTestCase{
|
||||
{
|
||||
testName: "dashboards:read in folders",
|
||||
subject: "user:1",
|
||||
action: "dashboards:read",
|
||||
kind: "folders",
|
||||
name: "*",
|
||||
expected: &openfgav1.TupleKey{
|
||||
User: "user:1",
|
||||
Relation: "get",
|
||||
Object: "group_resource:dashboard.grafana.app/dashboards",
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "dashboards:read for all dashboards",
|
||||
subject: "user:1",
|
||||
action: "dashboards:read",
|
||||
kind: "dashboards",
|
||||
name: "*",
|
||||
expected: &openfgav1.TupleKey{
|
||||
User: "user:1",
|
||||
Relation: "get",
|
||||
Object: "group_resource:dashboard.grafana.app/dashboards",
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "dashboards:read for general folder",
|
||||
subject: "user:1",
|
||||
action: "dashboards:read",
|
||||
kind: "folders",
|
||||
name: "general",
|
||||
expected: &openfgav1.TupleKey{
|
||||
User: "user:1",
|
||||
Relation: "resource_get",
|
||||
Object: "folder:general",
|
||||
Condition: &openfgav1.RelationshipCondition{
|
||||
Name: "subresource_filter",
|
||||
Context: &structpb.Struct{
|
||||
Fields: map[string]*structpb.Value{
|
||||
"subresources": structpb.NewListValue(&structpb.ListValue{
|
||||
Values: []*structpb.Value{structpb.NewStringValue("dashboard.grafana.app/dashboards")},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "folders:read",
|
||||
subject: "user:1",
|
||||
action: "folders:read",
|
||||
kind: "folders",
|
||||
name: "*",
|
||||
expected: &openfgav1.TupleKey{
|
||||
User: "user:1",
|
||||
Relation: "get",
|
||||
Object: "group_resource:folder.grafana.app/folders",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.testName, func(t *testing.T) {
|
||||
tuple, ok := TranslateToResourceTuple(test.subject, test.action, test.kind, test.name)
|
||||
require.True(t, ok)
|
||||
require.EqualExportedValues(t, test.expected, tuple)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -212,4 +212,16 @@ func testCheck(t *testing.T, server *Server) {
|
||||
require.NoError(t, err)
|
||||
assert.True(t, res.GetAllowed(), "user should be able to view dashboards in folder 6")
|
||||
})
|
||||
|
||||
t.Run("user:18 should be able to create folder in root folder", func(t *testing.T) {
|
||||
res, err := server.Check(newContextWithNamespace(), newReq("user:18", utils.VerbCreate, folderGroup, folderResource, "", "", ""))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, res.GetAllowed())
|
||||
})
|
||||
|
||||
t.Run("user:18 should be able to create dashboard in root folder", func(t *testing.T) {
|
||||
res, err := server.Check(newContextWithNamespace(), newReq("user:18", utils.VerbCreate, dashboardGroup, dashboardResource, "", "", ""))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, res.GetAllowed())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -71,6 +71,8 @@ func setup(t *testing.T, srv *Server) *Server {
|
||||
common.NewTypedResourceTuple("user:15", common.RelationGet, common.TypeUser, userGroup, userResource, statusSubresource, "1"),
|
||||
common.NewTypedResourceTuple("user:16", common.RelationGet, common.TypeServiceAccount, serviceAccountGroup, serviceAccountResource, statusSubresource, "1"),
|
||||
common.NewFolderTuple("user:17", common.RelationSetView, "4"),
|
||||
common.NewFolderTuple("user:18", common.RelationCreate, "general"),
|
||||
common.NewFolderResourceTuple("user:18", common.RelationCreate, dashboardGroup, dashboardResource, "", "general"),
|
||||
}
|
||||
|
||||
return setupOpenFGADatabase(t, srv, tuples)
|
||||
|
||||
@@ -304,8 +304,15 @@ type DeleteDashboardCommand struct {
|
||||
RemovePermissions bool
|
||||
}
|
||||
|
||||
type ProvisioningConfig struct {
|
||||
Name string
|
||||
OrgID int64
|
||||
Folder string
|
||||
AllowUIUpdates bool
|
||||
}
|
||||
|
||||
type DeleteOrphanedProvisionedDashboardsCommand struct {
|
||||
ReaderNames []string
|
||||
Config []ProvisioningConfig
|
||||
}
|
||||
|
||||
type DashboardProvisioningSearchResults struct {
|
||||
@@ -405,6 +412,8 @@ type DashboardSearchProjection struct {
|
||||
FolderTitle string
|
||||
SortMeta int64
|
||||
Tags []string
|
||||
ManagedBy utils.ManagerKind
|
||||
ManagerId string
|
||||
Deleted *time.Time
|
||||
}
|
||||
|
||||
|
||||
@@ -877,24 +877,32 @@ func (dr *DashboardServiceImpl) waitForSearchQuery(ctx context.Context, query *d
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) DeleteOrphanedProvisionedDashboards(ctx context.Context, cmd *dashboards.DeleteOrphanedProvisionedDashboardsCommand) error {
|
||||
// cleanup duplicate provisioned dashboards first (this will have the same name and external_id)
|
||||
// note: only works in modes 1-3
|
||||
if err := dr.DeleteDuplicateProvisionedDashboards(ctx); err != nil {
|
||||
dr.log.Error("Failed to delete duplicate provisioned dashboards", "error", err)
|
||||
}
|
||||
|
||||
// check each org for orphaned provisioned dashboards
|
||||
orgs, err := dr.orgService.Search(ctx, &org.SearchOrgsQuery{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
orgIDs := make([]int64, 0, len(orgs))
|
||||
for _, org := range orgs {
|
||||
orgIDs = append(orgIDs, org.ID)
|
||||
}
|
||||
|
||||
if err := dr.DeleteDuplicateProvisionedDashboards(ctx, orgIDs, cmd.Config); err != nil {
|
||||
dr.log.Error("Failed to delete duplicate provisioned dashboards", "error", err)
|
||||
}
|
||||
|
||||
currentNames := make([]string, 0, len(cmd.Config))
|
||||
for _, cfg := range cmd.Config {
|
||||
currentNames = append(currentNames, cfg.Name)
|
||||
}
|
||||
|
||||
for _, org := range orgs {
|
||||
ctx, _ := identity.WithServiceIdentity(ctx, org.ID)
|
||||
// find all dashboards in the org that have a file repo set that is not in the given readers list
|
||||
foundDashs, err := dr.searchProvisionedDashboardsThroughK8s(ctx, &dashboards.FindPersistedDashboardsQuery{
|
||||
ManagedBy: utils.ManagerKindClassicFP, //nolint:staticcheck
|
||||
ManagerIdentityNotIn: cmd.ReaderNames,
|
||||
ManagerIdentityNotIn: currentNames,
|
||||
OrgId: org.ID,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -921,7 +929,129 @@ func (dr *DashboardServiceImpl) DeleteOrphanedProvisionedDashboards(ctx context.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) DeleteDuplicateProvisionedDashboards(ctx context.Context) error {
|
||||
// searchExistingProvisionedData fetches provisioned data for the purposes of
|
||||
// duplication cleanup. Returns the set of folder UIDs for folders with the
|
||||
// given title, and the set of resources contained in those folders.
|
||||
func (dr *DashboardServiceImpl) searchExistingProvisionedData(
|
||||
ctx context.Context, orgID int64, folderTitle string,
|
||||
) ([]string, []dashboards.DashboardSearchProjection, error) {
|
||||
ctx, user := identity.WithServiceIdentity(ctx, orgID)
|
||||
cmd := folder.SearchFoldersQuery{
|
||||
OrgID: orgID,
|
||||
SignedInUser: user,
|
||||
Title: folderTitle,
|
||||
TitleExactMatch: true,
|
||||
}
|
||||
|
||||
searchResults, err := dr.folderService.SearchFolders(ctx, cmd)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("checking if provisioning reset is required: %w", err)
|
||||
}
|
||||
|
||||
var matchingFolders []string //nolint:prealloc
|
||||
for _, result := range searchResults {
|
||||
f, err := dr.folderService.Get(ctx, &folder.GetFolderQuery{
|
||||
OrgID: orgID,
|
||||
UID: &result.UID,
|
||||
SignedInUser: user,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// We are only interested in folders at the top-level of the folder hierarchy.
|
||||
// Cleanup is not performed for provisioned folders that were moved to
|
||||
// a different location.
|
||||
if f.ParentUID != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
matchingFolders = append(matchingFolders, f.UID)
|
||||
}
|
||||
|
||||
if len(matchingFolders) == 0 {
|
||||
// If there are no folders with the same title as the provisioned folder we
|
||||
// are looking for, there is nothing to be cleaned up.
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
resources, err := dr.FindDashboards(ctx, &dashboards.FindPersistedDashboardsQuery{
|
||||
OrgId: orgID,
|
||||
SignedInUser: user,
|
||||
FolderUIDs: matchingFolders,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return matchingFolders, resources, nil
|
||||
}
|
||||
|
||||
// maybeResetProvisioning will check for duplicated provisioned dashboards in the database. These duplications
|
||||
// happen when multiple provisioned dashboards of the same title are found, or multiple provisioned
|
||||
// folders are found. In this case, provisioned resources are deleted, allowing the provisioning
|
||||
// process to start from scratch after this function returns.
|
||||
func (dr *DashboardServiceImpl) maybeResetProvisioning(ctx context.Context, orgs []int64, configs []dashboards.ProvisioningConfig) {
|
||||
if skipReason := canBeAutomaticallyCleanedUp(configs); skipReason != "" {
|
||||
dr.log.Info("not eligible for automated cleanup", "reason", skipReason)
|
||||
return
|
||||
}
|
||||
|
||||
folderTitle := configs[0].Folder
|
||||
provisionedNames := map[string]bool{}
|
||||
for _, c := range configs {
|
||||
provisionedNames[c.Name] = true
|
||||
}
|
||||
|
||||
for _, orgID := range orgs {
|
||||
ctx, user := identity.WithServiceIdentity(ctx, orgID)
|
||||
provFolders, resources, err := dr.searchExistingProvisionedData(ctx, orgID, folderTitle)
|
||||
if err != nil {
|
||||
dr.log.Error("failed to search for provisioned data for cleanup", "org", orgID, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
steps, err := cleanupSteps(provFolders, resources, provisionedNames)
|
||||
if err != nil {
|
||||
dr.log.Warn("not possible to perform automated duplicate cleanup", "org", orgID, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, step := range steps {
|
||||
var err error
|
||||
|
||||
switch step.Type {
|
||||
case searchstore.TypeDashboard:
|
||||
err = dr.deleteDashboard(ctx, 0, step.UID, orgID, false)
|
||||
case searchstore.TypeFolder:
|
||||
err = dr.folderService.Delete(ctx, &folder.DeleteFolderCommand{
|
||||
OrgID: orgID,
|
||||
SignedInUser: user,
|
||||
UID: step.UID,
|
||||
})
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
dr.log.Info("deleted duplicated provisioned resource",
|
||||
"type", step.Type, "uid", step.UID,
|
||||
)
|
||||
} else {
|
||||
dr.log.Error("failed to delete duplicated provisioned resource",
|
||||
"type", step.Type, "uid", step.UID, "error", err,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) DeleteDuplicateProvisionedDashboards(ctx context.Context, orgs []int64, configs []dashboards.ProvisioningConfig) error {
|
||||
// Start from scratch if duplications that cannot be fixed by the logic
|
||||
// below are found in the database.
|
||||
dr.maybeResetProvisioning(ctx, orgs, configs)
|
||||
|
||||
// cleanup duplicate provisioned dashboards (i.e., with the same name and external_id).
|
||||
// Note: only works in modes 1-3. This logic can be removed once mode5 is
|
||||
// enabled everywhere.
|
||||
duplicates, err := dr.dashboardStore.GetDuplicateProvisionedDashboards(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1511,6 +1641,8 @@ func (dr *DashboardServiceImpl) FindDashboards(ctx context.Context, query *dashb
|
||||
FolderTitle: folderTitle,
|
||||
FolderID: folderID,
|
||||
FolderSlug: slugify.Slugify(folderTitle),
|
||||
ManagedBy: hit.ManagedBy.Kind,
|
||||
ManagerId: hit.ManagedBy.ID,
|
||||
Tags: hit.Tags,
|
||||
}
|
||||
|
||||
|
||||
@@ -779,7 +779,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
|
||||
}, nil).Twice()
|
||||
|
||||
err := service.DeleteOrphanedProvisionedDashboards(context.Background(), &dashboards.DeleteOrphanedProvisionedDashboardsCommand{
|
||||
ReaderNames: []string{"test"},
|
||||
Config: []dashboards.ProvisioningConfig{{Name: "test"}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
@@ -874,7 +874,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
|
||||
}, nil).Once()
|
||||
|
||||
err := singleOrgService.DeleteOrphanedProvisionedDashboards(ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{
|
||||
ReaderNames: []string{"test"},
|
||||
Config: []dashboards.ProvisioningConfig{{Name: "test"}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
@@ -906,7 +906,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
|
||||
}, nil)
|
||||
|
||||
err := singleOrgService.DeleteOrphanedProvisionedDashboards(ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{
|
||||
ReaderNames: []string{"test"},
|
||||
Config: []dashboards.ProvisioningConfig{{Name: "test"}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
|
||||
107
pkg/services/dashboards/service/provisioning_cleanup.go
Normal file
107
pkg/services/dashboards/service/provisioning_cleanup.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/services/dashboards"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/searchstore"
|
||||
)
|
||||
|
||||
// canBeAutomaticallyCleanedUp determines whether this instance can be automatically cleaned up
|
||||
// if duplicated provisioned resources are found. To ensure the process does not delete
|
||||
// resources it shouldn't, automatic cleanups only happen if all provisioned dashboards
|
||||
// are stored in the same folder (by title), and no dashboards allow UI updates.
|
||||
func canBeAutomaticallyCleanedUp(configs []dashboards.ProvisioningConfig) string {
|
||||
if len(configs) == 0 {
|
||||
return "no provisioned dashboards"
|
||||
}
|
||||
|
||||
folderTitle := configs[0].Folder
|
||||
if len(folderTitle) == 0 {
|
||||
return fmt.Sprintf("dashboard has no folder: %s", configs[0].Name)
|
||||
}
|
||||
|
||||
for _, cfg := range configs {
|
||||
if cfg.AllowUIUpdates {
|
||||
return "contains dashboards with allowUiUpdates"
|
||||
}
|
||||
|
||||
if cfg.Folder != folderTitle {
|
||||
return "dashboards provisioned across multiple folders"
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
type deleteProvisionedResource struct {
|
||||
Type string
|
||||
UID string
|
||||
}
|
||||
|
||||
// cleanupSteps computes the sequence of steps to be performed in order to cleanup the
|
||||
// provisioning resources and allow the process to start from scratch when duplication
|
||||
// is detected. The sequence of steps will dictate the order in which dashboards and folders
|
||||
// are to be deleted.
|
||||
func cleanupSteps(provFolders []string, resources []dashboards.DashboardSearchProjection, configDashboards map[string]bool) ([]deleteProvisionedResource, error) {
|
||||
var hasDuplicatedProvisionedDashboard bool
|
||||
var hasUserCreatedResource bool
|
||||
var uniqueNames = map[string]struct{}{}
|
||||
var deleteProvisionedDashboards []deleteProvisionedResource //nolint:prealloc
|
||||
|
||||
for _, r := range resources {
|
||||
// nolint:staticcheck
|
||||
if r.IsFolder || r.ManagedBy != utils.ManagerKindClassicFP {
|
||||
hasUserCreatedResource = true
|
||||
continue
|
||||
}
|
||||
|
||||
// Only delete dashboards if they are included in the provisioning configuration
|
||||
// for this instance.
|
||||
if !configDashboards[r.ManagerId] {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, exists := uniqueNames[r.ManagerId]; exists {
|
||||
hasDuplicatedProvisionedDashboard = true
|
||||
}
|
||||
|
||||
uniqueNames[r.ManagerId] = struct{}{}
|
||||
deleteProvisionedDashboards = append(deleteProvisionedDashboards, deleteProvisionedResource{
|
||||
Type: searchstore.TypeDashboard,
|
||||
UID: r.UID,
|
||||
})
|
||||
}
|
||||
|
||||
if len(provFolders) == 0 {
|
||||
// When there are no provisioned folders, there is nothing to do.
|
||||
return nil, nil
|
||||
} else if len(provFolders) == 1 {
|
||||
// If only one folder was found, keep it and delete the provisioned dashboards if
|
||||
// duplication was found.
|
||||
if hasDuplicatedProvisionedDashboard {
|
||||
return deleteProvisionedDashboards, nil
|
||||
}
|
||||
} else {
|
||||
// If multiple folders were found *and* a user-created resource exists in
|
||||
// one of them, bail, as we wouldn't be able to delete one of the duplicated folders.
|
||||
if hasUserCreatedResource {
|
||||
return nil, errors.New("multiple provisioning folders exist with at least one user-created resource")
|
||||
}
|
||||
|
||||
// Delete provisioned dashboards first, and then the folders.
|
||||
steps := deleteProvisionedDashboards
|
||||
for _, uid := range provFolders {
|
||||
steps = append(steps, deleteProvisionedResource{
|
||||
Type: searchstore.TypeFolder,
|
||||
UID: uid,
|
||||
})
|
||||
}
|
||||
|
||||
return steps, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
279
pkg/services/dashboards/service/provisioning_cleanup_test.go
Normal file
279
pkg/services/dashboards/service/provisioning_cleanup_test.go
Normal file
@@ -0,0 +1,279 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/services/dashboards"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/searchstore"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_canBeAutomaticallyCleanedUp(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
configs []dashboards.ProvisioningConfig
|
||||
expectedSkip string
|
||||
}{
|
||||
{
|
||||
name: "no dashboards defined in the configuration",
|
||||
configs: []dashboards.ProvisioningConfig{},
|
||||
expectedSkip: "no provisioned dashboards",
|
||||
},
|
||||
{
|
||||
name: "first defined dashboard has no folder defined",
|
||||
configs: []dashboards.ProvisioningConfig{
|
||||
{Name: "1", Folder: ""},
|
||||
{Folder: "f1"},
|
||||
},
|
||||
expectedSkip: "dashboard has no folder: 1",
|
||||
},
|
||||
{
|
||||
name: "one of the provisioned dashboards has no folder defined",
|
||||
configs: []dashboards.ProvisioningConfig{
|
||||
{Name: "1", Folder: "f1"},
|
||||
{Name: "2", Folder: "f1"},
|
||||
{Name: "3", Folder: ""},
|
||||
{Name: "4", Folder: "f1"},
|
||||
},
|
||||
expectedSkip: "dashboards provisioned across multiple folders",
|
||||
},
|
||||
{
|
||||
name: "one of the provisioned dashboards allows UI updates",
|
||||
configs: []dashboards.ProvisioningConfig{
|
||||
{Name: "1", Folder: "f1"},
|
||||
{Name: "2", Folder: "f1", AllowUIUpdates: true},
|
||||
{Name: "3", Folder: "f1"},
|
||||
{Name: "4", Folder: "f1"},
|
||||
},
|
||||
expectedSkip: "contains dashboards with allowUiUpdates",
|
||||
},
|
||||
{
|
||||
name: "one of the provisioned dashboards is in a different folder",
|
||||
configs: []dashboards.ProvisioningConfig{
|
||||
{Name: "1", Folder: "f1"},
|
||||
{Name: "2", Folder: "f1"},
|
||||
{Name: "3", Folder: "f1"},
|
||||
{Name: "4", Folder: "different"},
|
||||
},
|
||||
expectedSkip: "dashboards provisioned across multiple folders",
|
||||
},
|
||||
{
|
||||
name: "can be skipped when all conditions are met",
|
||||
configs: []dashboards.ProvisioningConfig{
|
||||
{Name: "1", Folder: "f1"},
|
||||
{Name: "2", Folder: "f1"},
|
||||
{Name: "3", Folder: "f1"},
|
||||
{Name: "4", Folder: "f1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
require.Equal(t, tc.expectedSkip, canBeAutomaticallyCleanedUp(tc.configs))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_cleanupSteps(t *testing.T) {
|
||||
isDashboard, isFolder := false, true
|
||||
|
||||
fromUser := func(uid, name string, isFolder bool) dashboards.DashboardSearchProjection {
|
||||
return dashboards.DashboardSearchProjection{
|
||||
UID: uid,
|
||||
ManagerId: name,
|
||||
IsFolder: isFolder,
|
||||
}
|
||||
}
|
||||
|
||||
provisioned := func(uid, name string, isFolder bool) dashboards.DashboardSearchProjection {
|
||||
dashboard := fromUser(uid, name, isFolder)
|
||||
dashboard.ManagedBy = utils.ManagerKindClassicFP //nolint:staticcheck
|
||||
return dashboard
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
provisionedFolders []string
|
||||
provisionedResources []dashboards.DashboardSearchProjection
|
||||
configDashboards []string
|
||||
expectedSteps []deleteProvisionedResource
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "no provisioned folders, nothing to do",
|
||||
provisionedFolders: []string{},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple folders, a user-created dashboard in one of them",
|
||||
provisionedFolders: []string{"folder1", "folder2"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
fromUser("d3", "User1", isDashboard),
|
||||
provisioned("d4", "Provisioned3", isDashboard),
|
||||
},
|
||||
expectedErr: "multiple provisioning folders exist with at least one user-created resource",
|
||||
},
|
||||
{
|
||||
name: "multiple folders, a user-created folder in one of them",
|
||||
provisionedFolders: []string{"folder1", "folder2"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
provisioned("d3", "Provisioned3", isDashboard),
|
||||
fromUser("f1", "UserFolder1", isFolder),
|
||||
},
|
||||
expectedErr: "multiple provisioning folders exist with at least one user-created resource",
|
||||
},
|
||||
{
|
||||
name: "single folder, some dashboards duplicated",
|
||||
provisionedFolders: []string{"folder1"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
// Provisioned1 is duplicated.
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
provisioned("d3", "Provisioned1", isDashboard),
|
||||
provisioned("d4", "Provisioned3", isDashboard),
|
||||
},
|
||||
expectedSteps: []deleteProvisionedResource{
|
||||
{Type: searchstore.TypeDashboard, UID: "d1"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d2"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d3"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d4"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single folder, duplicated dashboards, user-created dashboards are ignored",
|
||||
provisionedFolders: []string{"folder1"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
// Provisioned1 is duplicated.
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
fromUser("d3", "User1", isDashboard),
|
||||
provisioned("d4", "Provisioned3", isDashboard),
|
||||
provisioned("d5", "Provisioned1", isDashboard),
|
||||
},
|
||||
// User dashboard (d3) is not deleted.
|
||||
expectedSteps: []deleteProvisionedResource{
|
||||
{Type: searchstore.TypeDashboard, UID: "d1"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d2"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d4"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d5"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single folder, duplicated dashboards, user-created folders are ignored",
|
||||
provisionedFolders: []string{"folder1"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
// Provisioned1 is duplicated.
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
provisioned("d3", "Provisioned3", isDashboard),
|
||||
provisioned("d4", "Provisioned1", isDashboard),
|
||||
fromUser("f1", "UserFolder1", isFolder),
|
||||
},
|
||||
// User folder (f1) is not deleted.
|
||||
expectedSteps: []deleteProvisionedResource{
|
||||
{Type: searchstore.TypeDashboard, UID: "d1"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d2"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d3"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d4"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple folders, only provisioned dashboards",
|
||||
provisionedFolders: []string{"folder1", "folder2"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
provisioned("d3", "Provisioned3", isDashboard),
|
||||
provisioned("d4", "Provisioned4", isDashboard),
|
||||
},
|
||||
// Delete all dashboards, then all folders.
|
||||
expectedSteps: []deleteProvisionedResource{
|
||||
{Type: searchstore.TypeDashboard, UID: "d1"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d2"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d3"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d4"},
|
||||
{Type: searchstore.TypeFolder, UID: "folder1"},
|
||||
{Type: searchstore.TypeFolder, UID: "folder2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single folder, only deletes dashboards defined in the config file",
|
||||
provisionedFolders: []string{"folder1"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
provisioned("d3", "Provisioned1", isDashboard),
|
||||
provisioned("d4", "Provisioned4", isDashboard),
|
||||
provisioned("d5", "Provisioned4", isDashboard),
|
||||
},
|
||||
// Delete duplicated dashboards, but keep Provisioned4, since it's not in the config file.
|
||||
expectedSteps: []deleteProvisionedResource{
|
||||
{Type: searchstore.TypeDashboard, UID: "d1"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d2"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d3"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single folder, no duplicated dashboards",
|
||||
provisionedFolders: []string{"folder1"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
provisioned("d3", "Provisioned3", isDashboard),
|
||||
provisioned("d4", "Provisioned4", isDashboard),
|
||||
},
|
||||
expectedSteps: nil, // no duplicates, nothing to do
|
||||
},
|
||||
{
|
||||
name: "single folder, no duplicated dashboards, multiple user-created resources",
|
||||
provisionedFolders: []string{"folder1"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
fromUser("f1", "UserFolder1", isFolder),
|
||||
provisioned("d3", "Provisioned3", isDashboard),
|
||||
fromUser("d4", "User1", isDashboard),
|
||||
provisioned("d5", "Provisioned4", isDashboard),
|
||||
fromUser("d6", "User2", isDashboard),
|
||||
fromUser("f2", "UserFolder2", isFolder),
|
||||
},
|
||||
expectedSteps: nil, // no duplicates in the provisioned set, nothing to do
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
provisionedSet := make(map[string]bool)
|
||||
for _, name := range tc.configDashboards {
|
||||
provisionedSet[name] = true
|
||||
}
|
||||
|
||||
steps, err := cleanupSteps(tc.provisionedFolders, tc.provisionedResources, provisionedSet)
|
||||
if tc.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedSteps, steps)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Equal(t, tc.expectedErr, err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -274,6 +274,11 @@ func (s *Service) listDashboardVersionsThroughK8s(
|
||||
continueToken = tempOut.GetContinue()
|
||||
}
|
||||
|
||||
// Update the continue token on the response to reflect the actual position after all fetched items.
|
||||
// Without this, the response would return the token from the first fetch, causing duplicate items
|
||||
// on subsequent pages when multiple fetches were needed to fill the requested limit.
|
||||
out.SetContinue(continueToken)
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -268,6 +268,58 @@ func TestListDashboardVersions(t *testing.T) {
|
||||
}}}, res)
|
||||
})
|
||||
|
||||
t.Run("List returns continue token when first fetch satisfies limit with more pages", func(t *testing.T) {
|
||||
dashboardService := dashboards.NewFakeDashboardService(t)
|
||||
dashboardVersionService := Service{dashSvc: dashboardService, features: featuremgmt.WithFeatures()}
|
||||
mockCli := new(client.MockK8sHandler)
|
||||
dashboardVersionService.k8sclient = mockCli
|
||||
dashboardVersionService.features = featuremgmt.WithFeatures()
|
||||
|
||||
dashboardService.On("GetDashboardUIDByID", mock.Anything,
|
||||
mock.AnythingOfType("*dashboards.GetDashboardRefByIDQuery")).
|
||||
Return(&dashboards.DashboardRef{UID: "uid"}, nil)
|
||||
query := dashver.ListDashboardVersionsQuery{DashboardID: 42, Limit: 2}
|
||||
mockCli.On("GetUsersFromMeta", mock.Anything, mock.Anything).Return(map[string]*user.User{}, nil)
|
||||
|
||||
firstPage := &unstructured.UnstructuredList{
|
||||
Items: []unstructured.Unstructured{
|
||||
{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid",
|
||||
"resourceVersion": "11",
|
||||
"generation": int64(4),
|
||||
"labels": map[string]any{
|
||||
utils.LabelKeyDeprecatedInternalID: "42", // nolint:staticcheck
|
||||
},
|
||||
},
|
||||
"spec": map[string]any{},
|
||||
}},
|
||||
{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid",
|
||||
"resourceVersion": "12",
|
||||
"generation": int64(5),
|
||||
"labels": map[string]any{
|
||||
utils.LabelKeyDeprecatedInternalID: "42", // nolint:staticcheck
|
||||
},
|
||||
},
|
||||
"spec": map[string]any{},
|
||||
}},
|
||||
},
|
||||
}
|
||||
firstMeta, err := meta.ListAccessor(firstPage)
|
||||
require.NoError(t, err)
|
||||
firstMeta.SetContinue("t1") // More pages exist
|
||||
|
||||
mockCli.On("List", mock.Anything, mock.Anything, mock.Anything).Return(firstPage, nil).Once()
|
||||
|
||||
res, err := dashboardVersionService.List(context.Background(), &query)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 2, len(res.Versions))
|
||||
require.Equal(t, "t1", res.ContinueToken) // Token from first fetch when limit is satisfied
|
||||
mockCli.AssertNumberOfCalls(t, "List", 1) // Only one fetch needed
|
||||
})
|
||||
|
||||
t.Run("List returns correct continue token across multiple pages", func(t *testing.T) {
|
||||
dashboardService := dashboards.NewFakeDashboardService(t)
|
||||
dashboardVersionService := Service{dashSvc: dashboardService, features: featuremgmt.WithFeatures()}
|
||||
@@ -333,7 +385,79 @@ func TestListDashboardVersions(t *testing.T) {
|
||||
res, err := dashboardVersionService.List(context.Background(), &query)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 3, len(res.Versions))
|
||||
require.Equal(t, "t1", res.ContinueToken) // Implementation returns continue token from first page
|
||||
require.Equal(t, "", res.ContinueToken) // Should return token from last fetch (empty = no more pages)
|
||||
mockCli.AssertNumberOfCalls(t, "List", 2)
|
||||
})
|
||||
|
||||
t.Run("List returns continue token from last fetch when more pages exist", func(t *testing.T) {
|
||||
dashboardService := dashboards.NewFakeDashboardService(t)
|
||||
dashboardVersionService := Service{dashSvc: dashboardService, features: featuremgmt.WithFeatures()}
|
||||
mockCli := new(client.MockK8sHandler)
|
||||
dashboardVersionService.k8sclient = mockCli
|
||||
dashboardVersionService.features = featuremgmt.WithFeatures()
|
||||
|
||||
dashboardService.On("GetDashboardUIDByID", mock.Anything,
|
||||
mock.AnythingOfType("*dashboards.GetDashboardRefByIDQuery")).
|
||||
Return(&dashboards.DashboardRef{UID: "uid"}, nil)
|
||||
query := dashver.ListDashboardVersionsQuery{DashboardID: 42, Limit: 3}
|
||||
mockCli.On("GetUsersFromMeta", mock.Anything, mock.Anything).Return(map[string]*user.User{}, nil)
|
||||
|
||||
firstPage := &unstructured.UnstructuredList{
|
||||
Items: []unstructured.Unstructured{
|
||||
{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid",
|
||||
"resourceVersion": "11",
|
||||
"generation": int64(4),
|
||||
"labels": map[string]any{
|
||||
utils.LabelKeyDeprecatedInternalID: "42", // nolint:staticcheck
|
||||
},
|
||||
},
|
||||
"spec": map[string]any{},
|
||||
}},
|
||||
{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid",
|
||||
"resourceVersion": "12",
|
||||
"generation": int64(5),
|
||||
"labels": map[string]any{
|
||||
utils.LabelKeyDeprecatedInternalID: "42", // nolint:staticcheck
|
||||
},
|
||||
},
|
||||
"spec": map[string]any{},
|
||||
}},
|
||||
},
|
||||
}
|
||||
firstMeta, err := meta.ListAccessor(firstPage)
|
||||
require.NoError(t, err)
|
||||
firstMeta.SetContinue("t1")
|
||||
|
||||
secondPage := &unstructured.UnstructuredList{
|
||||
Items: []unstructured.Unstructured{
|
||||
{Object: map[string]any{
|
||||
"metadata": map[string]any{
|
||||
"name": "uid",
|
||||
"resourceVersion": "13",
|
||||
"generation": int64(6),
|
||||
"labels": map[string]any{
|
||||
utils.LabelKeyDeprecatedInternalID: "42", // nolint:staticcheck
|
||||
},
|
||||
},
|
||||
"spec": map[string]any{},
|
||||
}},
|
||||
},
|
||||
}
|
||||
secondMeta, err := meta.ListAccessor(secondPage)
|
||||
require.NoError(t, err)
|
||||
secondMeta.SetContinue("t2") // More pages exist
|
||||
|
||||
mockCli.On("List", mock.Anything, mock.Anything, mock.Anything).Return(firstPage, nil).Once()
|
||||
mockCli.On("List", mock.Anything, mock.Anything, mock.Anything).Return(secondPage, nil).Once()
|
||||
|
||||
res, err := dashboardVersionService.List(context.Background(), &query)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 3, len(res.Versions))
|
||||
require.Equal(t, "t2", res.ContinueToken) // Must return token from LAST fetch, not first
|
||||
mockCli.AssertNumberOfCalls(t, "List", 2)
|
||||
})
|
||||
|
||||
|
||||
@@ -202,6 +202,11 @@ func (s *Service) searchFoldersFromApiServer(ctx context.Context, query folder.S
|
||||
if query.Title != "" {
|
||||
// allow wildcard search
|
||||
request.Query = "*" + strings.ToLower(query.Title) + "*"
|
||||
// or perform exact match if requested
|
||||
if query.TitleExactMatch {
|
||||
request.Query = query.Title
|
||||
}
|
||||
|
||||
// if using query, you need to specify the fields you want
|
||||
request.Fields = dashboardsearch.IncludeFields
|
||||
}
|
||||
|
||||
@@ -224,12 +224,13 @@ type GetFoldersQuery struct {
|
||||
}
|
||||
|
||||
type SearchFoldersQuery struct {
|
||||
OrgID int64
|
||||
UIDs []string
|
||||
IDs []int64
|
||||
Title string
|
||||
Limit int64
|
||||
SignedInUser identity.Requester `json:"-"`
|
||||
OrgID int64
|
||||
UIDs []string
|
||||
IDs []int64
|
||||
Title string
|
||||
TitleExactMatch bool
|
||||
Limit int64
|
||||
SignedInUser identity.Requester `json:"-"`
|
||||
}
|
||||
|
||||
// GetParentsQuery captures the information required by the folder service to
|
||||
|
||||
@@ -153,13 +153,20 @@ func (provider *Provisioner) Provision(ctx context.Context) error {
|
||||
|
||||
// CleanUpOrphanedDashboards deletes provisioned dashboards missing a linked reader.
|
||||
func (provider *Provisioner) CleanUpOrphanedDashboards(ctx context.Context) {
|
||||
currentReaders := make([]string, len(provider.fileReaders))
|
||||
configs := make([]dashboards.ProvisioningConfig, len(provider.fileReaders))
|
||||
|
||||
for index, reader := range provider.fileReaders {
|
||||
currentReaders[index] = reader.Cfg.Name
|
||||
configs[index] = dashboards.ProvisioningConfig{
|
||||
Name: reader.Cfg.Name,
|
||||
OrgID: reader.Cfg.OrgID,
|
||||
Folder: reader.Cfg.Folder,
|
||||
AllowUIUpdates: reader.Cfg.AllowUIUpdates,
|
||||
}
|
||||
}
|
||||
|
||||
if err := provider.provisioner.DeleteOrphanedProvisionedDashboards(ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{ReaderNames: currentReaders}); err != nil {
|
||||
if err := provider.provisioner.DeleteOrphanedProvisionedDashboards(
|
||||
ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{Config: configs},
|
||||
); err != nil {
|
||||
provider.log.Warn("Failed to delete orphaned provisioned dashboards", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -618,6 +618,7 @@ type Cfg struct {
|
||||
EnableSearch bool
|
||||
OverridesFilePath string
|
||||
OverridesReloadInterval time.Duration
|
||||
EnableSQLKVBackend bool
|
||||
|
||||
// Secrets Management
|
||||
SecretsManagement SecretsManagerSettings
|
||||
|
||||
@@ -100,6 +100,9 @@ func (cfg *Cfg) setUnifiedStorageConfig() {
|
||||
cfg.OverridesFilePath = section.Key("overrides_path").String()
|
||||
cfg.OverridesReloadInterval = section.Key("overrides_reload_period").MustDuration(30 * time.Second)
|
||||
|
||||
// use sqlkv (resource/sqlkv) instead of the sql backend (sql/backend) as the StorageServer
|
||||
cfg.EnableSQLKVBackend = section.Key("enable_sqlkv_backend").MustBool(false)
|
||||
|
||||
cfg.MaxFileIndexAge = section.Key("max_file_index_age").MustDuration(0)
|
||||
cfg.MinFileIndexBuildVersion = section.Key("min_file_index_build_version").MustString("")
|
||||
}
|
||||
|
||||
@@ -9,6 +9,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/bwmarrin/snowflake"
|
||||
"github.com/grafana/grafana/pkg/infra/db"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/sql/db/dbimpl"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -24,6 +27,16 @@ func TestNewDataStore(t *testing.T) {
|
||||
require.NotNil(t, ds)
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
func setupTestDataStoreSqlKv(t *testing.T) *dataStore {
|
||||
dbstore := db.InitTestDB(t)
|
||||
eDB, err := dbimpl.ProvideResourceDB(dbstore, setting.NewCfg(), nil)
|
||||
require.NoError(t, err)
|
||||
kv, err := NewSQLKV(eDB)
|
||||
require.NoError(t, err)
|
||||
return newDataStore(kv)
|
||||
}
|
||||
|
||||
func TestDataKey_String(t *testing.T) {
|
||||
rv := int64(1934555792099250176)
|
||||
tests := []struct {
|
||||
@@ -679,10 +692,21 @@ func TestParseKey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataStore_Save_And_Get(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
func runDataStoreTestWith(t *testing.T, storeName string, newStoreFn func(*testing.T) *dataStore, testFn func(*testing.T, context.Context, *dataStore)) {
|
||||
t.Run(storeName, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := newStoreFn(t)
|
||||
testFn(t, ctx, store)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDataStore_Save_And_Get(t *testing.T) {
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreSaveAndGet)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreSaveAndGet)
|
||||
}
|
||||
|
||||
func testDataStoreSaveAndGet(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
rv := node.Generate()
|
||||
|
||||
testKey := DataKey{
|
||||
@@ -744,9 +768,12 @@ func TestDataStore_Save_And_Get(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_Delete(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreDelete)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreDelete)
|
||||
}
|
||||
|
||||
func testDataStoreDelete(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
rv := node.Generate()
|
||||
|
||||
testKey := DataKey{
|
||||
@@ -795,9 +822,12 @@ func TestDataStore_Delete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_List(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreList)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreList)
|
||||
}
|
||||
|
||||
func testDataStoreList(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
resourceKey := ListRequestKey{
|
||||
Namespace: "test-namespace",
|
||||
Group: "test-group",
|
||||
@@ -919,9 +949,12 @@ func TestDataStore_List(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_Integration(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreIntegration)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreIntegration)
|
||||
}
|
||||
|
||||
func testDataStoreIntegration(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
t.Run("full lifecycle test", func(t *testing.T) {
|
||||
resourceKey := ListRequestKey{
|
||||
Namespace: "integration-ns",
|
||||
@@ -1007,9 +1040,12 @@ func TestDataStore_Integration(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_Keys(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreKeys)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreKeys)
|
||||
}
|
||||
|
||||
func testDataStoreKeys(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
resourceKey := ListRequestKey{
|
||||
Namespace: "test-namespace",
|
||||
Group: "test-group",
|
||||
@@ -1154,9 +1190,12 @@ func TestDataStore_Keys(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_ValidationEnforced(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreValidationEnforced)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreValidationEnforced)
|
||||
}
|
||||
|
||||
func testDataStoreValidationEnforced(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
// Create an invalid key
|
||||
invalidKey := DataKey{
|
||||
Namespace: "Invalid-Namespace-$$$",
|
||||
@@ -1483,9 +1522,12 @@ func TestListRequestKey_Prefix(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_LastResourceVersion(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreLastResourceVersion)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreLastResourceVersion)
|
||||
}
|
||||
|
||||
func testDataStoreLastResourceVersion(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
t.Run("returns last resource version for existing data", func(t *testing.T) {
|
||||
resourceKey := ListRequestKey{
|
||||
Namespace: "test-namespace",
|
||||
@@ -1585,9 +1627,12 @@ func TestDataStore_LastResourceVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_GetLatestResourceKey(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreGetLatestResourceKey)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreGetLatestResourceKey)
|
||||
}
|
||||
|
||||
func testDataStoreGetLatestResourceKey(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
key := GetRequestKey{
|
||||
Group: "apps",
|
||||
Resource: "resources",
|
||||
@@ -1648,9 +1693,12 @@ func TestDataStore_GetLatestResourceKey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_GetLatestResourceKey_Deleted(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreGetLatestResourceKeyDeleted)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreGetLatestResourceKeyDeleted)
|
||||
}
|
||||
|
||||
func testDataStoreGetLatestResourceKeyDeleted(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
key := GetRequestKey{
|
||||
Group: "apps",
|
||||
Resource: "resources",
|
||||
@@ -1676,9 +1724,12 @@ func TestDataStore_GetLatestResourceKey_Deleted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_GetLatestResourceKey_NotFound(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreGetLatestResourceKeyNotFound)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreGetLatestResourceKeyNotFound)
|
||||
}
|
||||
|
||||
func testDataStoreGetLatestResourceKeyNotFound(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
key := GetRequestKey{
|
||||
Group: "apps",
|
||||
Resource: "resources",
|
||||
@@ -1691,9 +1742,12 @@ func TestDataStore_GetLatestResourceKey_NotFound(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_GetResourceKeyAtRevision(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreGetResourceKeyAtRevision)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreGetResourceKeyAtRevision)
|
||||
}
|
||||
|
||||
func testDataStoreGetResourceKeyAtRevision(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
key := GetRequestKey{
|
||||
Group: "apps",
|
||||
Resource: "resources",
|
||||
@@ -1766,9 +1820,12 @@ func TestDataStore_GetResourceKeyAtRevision(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_ListLatestResourceKeys(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreListLatestResourceKeys)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreListLatestResourceKeys)
|
||||
}
|
||||
|
||||
func testDataStoreListLatestResourceKeys(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
listKey := ListRequestKey{
|
||||
Group: "apps",
|
||||
Resource: "resources",
|
||||
@@ -1819,9 +1876,12 @@ func TestDataStore_ListLatestResourceKeys(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_ListLatestResourceKeys_Deleted(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreListLatestResourceKeysDeleted)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreListLatestResourceKeysDeleted)
|
||||
}
|
||||
|
||||
func testDataStoreListLatestResourceKeysDeleted(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
listKey := ListRequestKey{
|
||||
Group: "apps",
|
||||
Resource: "resources",
|
||||
@@ -1869,9 +1929,12 @@ func TestDataStore_ListLatestResourceKeys_Deleted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_ListLatestResourceKeys_Multiple(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreListLatestResourceKeysMultiple)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreListLatestResourceKeysMultiple)
|
||||
}
|
||||
|
||||
func testDataStoreListLatestResourceKeysMultiple(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
listKey := ListRequestKey{
|
||||
Group: "apps",
|
||||
Resource: "resources",
|
||||
@@ -1940,9 +2003,12 @@ func TestDataStore_ListLatestResourceKeys_Multiple(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_ListResourceKeysAtRevision(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreListResourceKeysAtRevision)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreListResourceKeysAtRevision)
|
||||
}
|
||||
|
||||
func testDataStoreListResourceKeysAtRevision(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
// Create multiple resources with different versions
|
||||
rv1 := node.Generate().Int64()
|
||||
rv2 := node.Generate().Int64()
|
||||
@@ -2152,9 +2218,12 @@ func TestDataStore_ListResourceKeysAtRevision(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_ListResourceKeysAtRevision_ValidationErrors(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreListResourceKeysAtRevisionValidationErrors)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreListResourceKeysAtRevisionValidationErrors)
|
||||
}
|
||||
|
||||
func testDataStoreListResourceKeysAtRevisionValidationErrors(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key ListRequestKey
|
||||
@@ -2194,9 +2263,12 @@ func TestDataStore_ListResourceKeysAtRevision_ValidationErrors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_ListResourceKeysAtRevision_EmptyResults(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreListResourceKeysAtRevisionEmptyResults)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreListResourceKeysAtRevisionEmptyResults)
|
||||
}
|
||||
|
||||
func testDataStoreListResourceKeysAtRevisionEmptyResults(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
listKey := ListRequestKey{
|
||||
Group: "apps",
|
||||
Resource: "resources",
|
||||
@@ -2213,9 +2285,12 @@ func TestDataStore_ListResourceKeysAtRevision_EmptyResults(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_ListResourceKeysAtRevision_ResourcesNewerThanRevision(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreListResourceKeysAtRevisionResourcesNewerThanRevision)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreListResourceKeysAtRevisionResourcesNewerThanRevision)
|
||||
}
|
||||
|
||||
func testDataStoreListResourceKeysAtRevisionResourcesNewerThanRevision(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
// Create a resource with a high resource version
|
||||
rv := node.Generate().Int64()
|
||||
key := DataKey{
|
||||
@@ -2681,9 +2756,12 @@ func TestGetRequestKey_Prefix(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_GetResourceStats_Comprehensive(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreGetResourceStatsComprehensive)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreGetResourceStatsComprehensive)
|
||||
}
|
||||
|
||||
func testDataStoreGetResourceStatsComprehensive(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
// Test setup: 3 namespaces × 3 groups × 3 resources × 3 names × 3 versions = 243 total entries
|
||||
// But each name will have only 1 latest version that counts, so 3 × 3 × 3 × 3 = 81 non-deleted resources
|
||||
namespaces := []string{"ns1", "ns2", "ns3"}
|
||||
@@ -2888,9 +2966,12 @@ func TestDataStore_GetResourceStats_Comprehensive(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_getGroupResources(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreGetGroupResources)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreGetGroupResources)
|
||||
}
|
||||
|
||||
func testDataStoreGetGroupResources(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
// Create test data with multiple group/resource combinations
|
||||
testData := []struct {
|
||||
group string
|
||||
@@ -2951,9 +3032,12 @@ func TestDataStore_getGroupResources(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_BatchDelete(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreBatchDelete)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreBatchDelete)
|
||||
}
|
||||
|
||||
func testDataStoreBatchDelete(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
keys := make([]DataKey, 95)
|
||||
for i := 0; i < 95; i++ {
|
||||
rv := node.Generate().Int64()
|
||||
@@ -2987,9 +3071,12 @@ func TestDataStore_BatchDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_BatchGet(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreBatchGet)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreBatchGet)
|
||||
}
|
||||
|
||||
func testDataStoreBatchGet(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
t.Run("batch get multiple existing keys", func(t *testing.T) {
|
||||
// Create test data
|
||||
keys := make([]DataKey, 5)
|
||||
@@ -3132,9 +3219,12 @@ func TestDataStore_BatchGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataStore_GetLatestAndPredecessor(t *testing.T) {
|
||||
ds := setupTestDataStore(t)
|
||||
ctx := context.Background()
|
||||
runDataStoreTestWith(t, "badger", setupTestDataStore, testDataStoreGetLatestAndPredecessor)
|
||||
// enable this when sqlkv is ready
|
||||
// runDataStoreTestWith(t, "sqlkv", setupTestDataStoreSqlKv, testDataStoreGetLatestAndPredecessor)
|
||||
}
|
||||
|
||||
func testDataStoreGetLatestAndPredecessor(t *testing.T, ctx context.Context, ds *dataStore) {
|
||||
resourceKey := ListRequestKey{
|
||||
Namespace: "test-namespace",
|
||||
Group: "test-group",
|
||||
|
||||
@@ -7,6 +7,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/bwmarrin/snowflake"
|
||||
"github.com/grafana/grafana/pkg/infra/db"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/sql/db/dbimpl"
|
||||
"github.com/grafana/grafana/pkg/tests/testsuite"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -21,6 +25,20 @@ func setupTestEventStore(t *testing.T) *eventStore {
|
||||
return newEventStore(kv)
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
testsuite.Run(m)
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
func setupTestEventStoreSqlKv(t *testing.T) *eventStore {
|
||||
dbstore := db.InitTestDB(t)
|
||||
eDB, err := dbimpl.ProvideResourceDB(dbstore, setting.NewCfg(), nil)
|
||||
require.NoError(t, err)
|
||||
kv, err := NewSQLKV(eDB)
|
||||
require.NoError(t, err)
|
||||
return newEventStore(kv)
|
||||
}
|
||||
|
||||
func TestNewEventStore(t *testing.T) {
|
||||
store := setupTestEventStore(t)
|
||||
assert.NotNil(t, store.kv)
|
||||
@@ -180,10 +198,21 @@ func TestEventStore_ParseEventKey(t *testing.T) {
|
||||
assert.Equal(t, originalKey, parsedKey)
|
||||
}
|
||||
|
||||
func TestEventStore_Save_Get(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := setupTestEventStore(t)
|
||||
func runEventStoreTestWith(t *testing.T, storeName string, newStoreFn func(*testing.T) *eventStore, testFn func(*testing.T, context.Context, *eventStore)) {
|
||||
t.Run(storeName, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := newStoreFn(t)
|
||||
testFn(t, ctx, store)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEventStore_Save_Get(t *testing.T) {
|
||||
runEventStoreTestWith(t, "badger", setupTestEventStore, testEventStoreSaveGet)
|
||||
// enable this when sqlkv is ready
|
||||
// runEventStoreTestWith(t, "sqlkv", setupTestEventStoreSqlKv, testEventStoreSaveGet)
|
||||
}
|
||||
|
||||
func testEventStoreSaveGet(t *testing.T, ctx context.Context, store *eventStore) {
|
||||
event := Event{
|
||||
Namespace: "default",
|
||||
Group: "apps",
|
||||
@@ -216,9 +245,12 @@ func TestEventStore_Save_Get(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEventStore_Get_NotFound(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := setupTestEventStore(t)
|
||||
runEventStoreTestWith(t, "badger", setupTestEventStore, testEventStoreGetNotFound)
|
||||
// enable this when sqlkv is ready
|
||||
// runEventStoreTestWith(t, "sqlkv", setupTestEventStoreSqlKv, testEventStoreGetNotFound)
|
||||
}
|
||||
|
||||
func testEventStoreGetNotFound(t *testing.T, ctx context.Context, store *eventStore) {
|
||||
nonExistentKey := EventKey{
|
||||
Namespace: "default",
|
||||
Group: "apps",
|
||||
@@ -233,9 +265,12 @@ func TestEventStore_Get_NotFound(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEventStore_LastEventKey(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := setupTestEventStore(t)
|
||||
runEventStoreTestWith(t, "badger", setupTestEventStore, testEventStoreLastEventKey)
|
||||
// enable this when sqlkv is ready
|
||||
// runEventStoreTestWith(t, "sqlkv", setupTestEventStoreSqlKv, testEventStoreLastEventKey)
|
||||
}
|
||||
|
||||
func testEventStoreLastEventKey(t *testing.T, ctx context.Context, store *eventStore) {
|
||||
// Test when no events exist
|
||||
_, err := store.LastEventKey(ctx)
|
||||
assert.Error(t, err)
|
||||
@@ -292,9 +327,12 @@ func TestEventStore_LastEventKey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEventStore_ListKeysSince(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := setupTestEventStore(t)
|
||||
runEventStoreTestWith(t, "badger", setupTestEventStore, testEventStoreListKeysSince)
|
||||
// enable this when sqlkv is ready
|
||||
// runEventStoreTestWith(t, "sqlkv", setupTestEventStoreSqlKv, testEventStoreListKeysSince)
|
||||
}
|
||||
|
||||
func testEventStoreListKeysSince(t *testing.T, ctx context.Context, store *eventStore) {
|
||||
// Add events with different resource versions
|
||||
events := []Event{
|
||||
{
|
||||
@@ -349,9 +387,12 @@ func TestEventStore_ListKeysSince(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEventStore_ListSince(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := setupTestEventStore(t)
|
||||
runEventStoreTestWith(t, "badger", setupTestEventStore, testEventStoreListSince)
|
||||
// enable this when sqlkv is ready
|
||||
// runEventStoreTestWith(t, "sqlkv", setupTestEventStoreSqlKv, testEventStoreListSince)
|
||||
}
|
||||
|
||||
func testEventStoreListSince(t *testing.T, ctx context.Context, store *eventStore) {
|
||||
// Add events with different resource versions
|
||||
events := []Event{
|
||||
{
|
||||
@@ -404,9 +445,12 @@ func TestEventStore_ListSince(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEventStore_ListSince_Empty(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := setupTestEventStore(t)
|
||||
runEventStoreTestWith(t, "badger", setupTestEventStore, testEventStoreListSinceEmpty)
|
||||
// enable this when sqlkv is ready
|
||||
// runEventStoreTestWith(t, "sqlkv", setupTestEventStoreSqlKv, testEventStoreListSinceEmpty)
|
||||
}
|
||||
|
||||
func testEventStoreListSinceEmpty(t *testing.T, ctx context.Context, store *eventStore) {
|
||||
// List events when store is empty
|
||||
retrievedEvents := make([]Event, 0)
|
||||
for event, err := range store.ListSince(ctx, 0) {
|
||||
@@ -459,9 +503,12 @@ func TestEventKey_Struct(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEventStore_Save_InvalidJSON(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := setupTestEventStore(t)
|
||||
runEventStoreTestWith(t, "badger", setupTestEventStore, testEventStoreSaveInvalidJSON)
|
||||
// enable this when sqlkv is ready
|
||||
// runEventStoreTestWith(t, "sqlkv", setupTestEventStoreSqlKv, testEventStoreSaveInvalidJSON)
|
||||
}
|
||||
|
||||
func testEventStoreSaveInvalidJSON(t *testing.T, ctx context.Context, store *eventStore) {
|
||||
// This should work fine as the Event struct should be serializable
|
||||
event := Event{
|
||||
Namespace: "default",
|
||||
@@ -477,9 +524,12 @@ func TestEventStore_Save_InvalidJSON(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEventStore_CleanupOldEvents(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := setupTestEventStore(t)
|
||||
runEventStoreTestWith(t, "badger", setupTestEventStore, testEventStoreCleanupOldEvents)
|
||||
// enable this when sqlkv is ready
|
||||
// runEventStoreTestWith(t, "sqlkv", setupTestEventStoreSqlKv, testEventStoreCleanupOldEvents)
|
||||
}
|
||||
|
||||
func testEventStoreCleanupOldEvents(t *testing.T, ctx context.Context, store *eventStore) {
|
||||
now := time.Now()
|
||||
oldRV := snowflakeFromTime(now.Add(-48 * time.Hour)) // 48 hours ago
|
||||
recentRV := snowflakeFromTime(now.Add(-1 * time.Hour)) // 1 hour ago
|
||||
@@ -565,9 +615,12 @@ func TestEventStore_CleanupOldEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEventStore_CleanupOldEvents_NoOldEvents(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := setupTestEventStore(t)
|
||||
runEventStoreTestWith(t, "badger", setupTestEventStore, testEventStoreCleanupOldEventsNoOldEvents)
|
||||
// enable this when sqlkv is ready
|
||||
// runEventStoreTestWith(t, "sqlkv", setupTestEventStoreSqlKv, testEventStoreCleanupOldEventsNoOldEvents)
|
||||
}
|
||||
|
||||
func testEventStoreCleanupOldEventsNoOldEvents(t *testing.T, ctx context.Context, store *eventStore) {
|
||||
// Create an event 1 hour old
|
||||
rv := snowflakeFromTime(time.Now().Add(-1 * time.Hour))
|
||||
event := Event{
|
||||
@@ -603,9 +656,12 @@ func TestEventStore_CleanupOldEvents_NoOldEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEventStore_CleanupOldEvents_EmptyStore(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := setupTestEventStore(t)
|
||||
runEventStoreTestWith(t, "badger", setupTestEventStore, testEventStoreCleanupOldEventsEmptyStore)
|
||||
// enable this when sqlkv is ready
|
||||
// runEventStoreTestWith(t, "sqlkv", setupTestEventStoreSqlKv, testEventStoreCleanupOldEventsEmptyStore)
|
||||
}
|
||||
|
||||
func testEventStoreCleanupOldEventsEmptyStore(t *testing.T, ctx context.Context, store *eventStore) {
|
||||
// Clean up events from empty store
|
||||
deletedCount, err := store.CleanupOldEvents(ctx, time.Now().Add(-24*time.Hour))
|
||||
require.NoError(t, err)
|
||||
@@ -613,9 +669,12 @@ func TestEventStore_CleanupOldEvents_EmptyStore(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEventStore_BatchDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := setupTestEventStore(t)
|
||||
runEventStoreTestWith(t, "badger", setupTestEventStore, testEventStoreBatchDelete)
|
||||
// enable this when sqlkv is ready
|
||||
// runEventStoreTestWith(t, "sqlkv", setupTestEventStoreSqlKv, testEventStoreBatchDelete)
|
||||
}
|
||||
|
||||
func testEventStoreBatchDelete(t *testing.T, ctx context.Context, store *eventStore) {
|
||||
// Create multiple events (more than batch size to test batching)
|
||||
eventKeys := make([]string, 75)
|
||||
for i := 0; i < 75; i++ {
|
||||
@@ -722,9 +781,12 @@ func TestSnowflakeFromTime(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListKeysSince_WithSnowflakeTime(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store := setupTestEventStore(t)
|
||||
runEventStoreTestWith(t, "badger", setupTestEventStore, testListKeysSinceWithSnowflakeTime)
|
||||
// enable this when sqlkv is ready
|
||||
// runEventStoreTestWith(t, "sqlkv", setupTestEventStoreSqlKv, testListKeysSinceWithSnowflakeTime)
|
||||
}
|
||||
|
||||
func testListKeysSinceWithSnowflakeTime(t *testing.T, ctx context.Context, store *eventStore) {
|
||||
// Create events with snowflake-based resource versions at different times
|
||||
now := time.Now()
|
||||
events := []Event{
|
||||
|
||||
@@ -6,6 +6,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-app-sdk/logging"
|
||||
"github.com/grafana/grafana/pkg/infra/db"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/sql/db/dbimpl"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -22,6 +25,18 @@ func setupTestNotifier(t *testing.T) (*notifier, *eventStore) {
|
||||
return notifier, eventStore
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
func setupTestNotifierSqlKv(t *testing.T) (*notifier, *eventStore) {
|
||||
dbstore := db.InitTestDB(t)
|
||||
eDB, err := dbimpl.ProvideResourceDB(dbstore, setting.NewCfg(), nil)
|
||||
require.NoError(t, err)
|
||||
kv, err := NewSQLKV(eDB)
|
||||
require.NoError(t, err)
|
||||
eventStore := newEventStore(kv)
|
||||
notifier := newNotifier(eventStore, notifierOptions{log: &logging.NoOpLogger{}})
|
||||
return notifier, eventStore
|
||||
}
|
||||
|
||||
func TestNewNotifier(t *testing.T) {
|
||||
notifier, _ := setupTestNotifier(t)
|
||||
|
||||
@@ -35,10 +50,21 @@ func TestDefaultWatchOptions(t *testing.T) {
|
||||
assert.Equal(t, defaultBufferSize, opts.BufferSize)
|
||||
}
|
||||
|
||||
func TestNotifier_lastEventResourceVersion(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
notifier, eventStore := setupTestNotifier(t)
|
||||
func runNotifierTestWith(t *testing.T, storeName string, newStoreFn func(*testing.T) (*notifier, *eventStore), testFn func(*testing.T, context.Context, *notifier, *eventStore)) {
|
||||
t.Run(storeName, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
notifier, eventStore := newStoreFn(t)
|
||||
testFn(t, ctx, notifier, eventStore)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNotifier_lastEventResourceVersion(t *testing.T) {
|
||||
runNotifierTestWith(t, "badger", setupTestNotifier, testNotifierLastEventResourceVersion)
|
||||
// enable this when sqlkv is ready
|
||||
// runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierLastEventResourceVersion)
|
||||
}
|
||||
|
||||
func testNotifierLastEventResourceVersion(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
|
||||
// Test with no events
|
||||
rv, err := notifier.lastEventResourceVersion(ctx)
|
||||
assert.Error(t, err)
|
||||
@@ -85,8 +111,12 @@ func TestNotifier_lastEventResourceVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNotifier_cachekey(t *testing.T) {
|
||||
notifier, _ := setupTestNotifier(t)
|
||||
runNotifierTestWith(t, "badger", setupTestNotifier, testNotifierCachekey)
|
||||
// enable this when sqlkv is ready
|
||||
// runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierCachekey)
|
||||
}
|
||||
|
||||
func testNotifierCachekey(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
|
||||
tests := []struct {
|
||||
name string
|
||||
event Event
|
||||
@@ -136,10 +166,14 @@ func TestNotifier_cachekey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNotifier_Watch_NoEvents(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||
defer cancel()
|
||||
runNotifierTestWith(t, "badger", setupTestNotifier, testNotifierWatchNoEvents)
|
||||
// enable this when sqlkv is ready
|
||||
// runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierWatchNoEvents)
|
||||
}
|
||||
|
||||
notifier, eventStore := setupTestNotifier(t)
|
||||
func testNotifierWatchNoEvents(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
|
||||
ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
// Add at least one event so that lastEventResourceVersion doesn't return ErrNotFound
|
||||
initialEvent := Event{
|
||||
@@ -174,10 +208,14 @@ func TestNotifier_Watch_NoEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNotifier_Watch_WithExistingEvents(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
runNotifierTestWith(t, "badger", setupTestNotifier, testNotifierWatchWithExistingEvents)
|
||||
// enable this when sqlkv is ready
|
||||
// runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierWatchWithExistingEvents)
|
||||
}
|
||||
|
||||
notifier, eventStore := setupTestNotifier(t)
|
||||
func testNotifierWatchWithExistingEvents(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
|
||||
ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Save some initial events
|
||||
initialEvents := []Event{
|
||||
@@ -245,10 +283,14 @@ func TestNotifier_Watch_WithExistingEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNotifier_Watch_EventDeduplication(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
runNotifierTestWith(t, "badger", setupTestNotifier, testNotifierWatchEventDeduplication)
|
||||
// enable this when sqlkv is ready
|
||||
// runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierWatchEventDeduplication)
|
||||
}
|
||||
|
||||
notifier, eventStore := setupTestNotifier(t)
|
||||
func testNotifierWatchEventDeduplication(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
|
||||
ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Add an initial event so that lastEventResourceVersion doesn't return ErrNotFound
|
||||
initialEvent := Event{
|
||||
@@ -308,9 +350,13 @@ func TestNotifier_Watch_EventDeduplication(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNotifier_Watch_ContextCancellation(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
runNotifierTestWith(t, "badger", setupTestNotifier, testNotifierWatchContextCancellation)
|
||||
// enable this when sqlkv is ready
|
||||
// runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierWatchContextCancellation)
|
||||
}
|
||||
|
||||
notifier, eventStore := setupTestNotifier(t)
|
||||
func testNotifierWatchContextCancellation(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// Add an initial event so that lastEventResourceVersion doesn't return ErrNotFound
|
||||
initialEvent := Event{
|
||||
@@ -351,10 +397,14 @@ func TestNotifier_Watch_ContextCancellation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNotifier_Watch_MultipleEvents(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
runNotifierTestWith(t, "badger", setupTestNotifier, testNotifierWatchMultipleEvents)
|
||||
// enable this when sqlkv is ready
|
||||
// runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierWatchMultipleEvents)
|
||||
}
|
||||
|
||||
notifier, eventStore := setupTestNotifier(t)
|
||||
func testNotifierWatchMultipleEvents(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
|
||||
ctx, cancel := context.WithTimeout(ctx, 3*time.Second)
|
||||
defer cancel()
|
||||
rv := time.Now().UnixNano()
|
||||
// Add an initial event so that lastEventResourceVersion doesn't return ErrNotFound
|
||||
initialEvent := Event{
|
||||
|
||||
70
pkg/storage/unified/resource/sqlkv.go
Normal file
70
pkg/storage/unified/resource/sqlkv.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package resource
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"iter"
|
||||
|
||||
"github.com/grafana/grafana/pkg/storage/unified/sql/db"
|
||||
)
|
||||
|
||||
var _ KV = &sqlKV{}
|
||||
|
||||
type sqlKV struct {
|
||||
dbProvider db.DBProvider
|
||||
db db.DB
|
||||
}
|
||||
|
||||
func NewSQLKV(dbProvider db.DBProvider) (KV, error) {
|
||||
if dbProvider == nil {
|
||||
return nil, fmt.Errorf("dbProvider is required")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
dbConn, err := dbProvider.Init(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing DB: %w", err)
|
||||
}
|
||||
|
||||
return &sqlKV{
|
||||
dbProvider: dbProvider,
|
||||
db: dbConn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (k *sqlKV) Ping(ctx context.Context) error {
|
||||
return k.db.PingContext(ctx)
|
||||
}
|
||||
|
||||
func (k *sqlKV) Keys(ctx context.Context, section string, opt ListOptions) iter.Seq2[string, error] {
|
||||
return func(yield func(string, error) bool) {
|
||||
panic("not implemented!")
|
||||
}
|
||||
}
|
||||
|
||||
func (k *sqlKV) Get(ctx context.Context, section string, key string) (io.ReadCloser, error) {
|
||||
panic("not implemented!")
|
||||
}
|
||||
|
||||
func (k *sqlKV) BatchGet(ctx context.Context, section string, keys []string) iter.Seq2[KeyValue, error] {
|
||||
return func(yield func(KeyValue, error) bool) {
|
||||
panic("not implemented!")
|
||||
}
|
||||
}
|
||||
|
||||
func (k *sqlKV) Save(ctx context.Context, section string, key string) (io.WriteCloser, error) {
|
||||
panic("not implemented!")
|
||||
}
|
||||
|
||||
func (k *sqlKV) Delete(ctx context.Context, section string, key string) error {
|
||||
panic("not implemented!")
|
||||
}
|
||||
|
||||
func (k *sqlKV) BatchDelete(ctx context.Context, section string, keys []string) error {
|
||||
panic("not implemented!")
|
||||
}
|
||||
|
||||
func (k *sqlKV) UnixTimestamp(ctx context.Context) (int64, error) {
|
||||
panic("not implemented!")
|
||||
}
|
||||
@@ -70,7 +70,12 @@ type kvStorageBackend struct {
|
||||
//reg prometheus.Registerer
|
||||
}
|
||||
|
||||
var _ StorageBackend = &kvStorageBackend{}
|
||||
var _ KVBackend = &kvStorageBackend{}
|
||||
|
||||
type KVBackend interface {
|
||||
StorageBackend
|
||||
resourcepb.DiagnosticsServer
|
||||
}
|
||||
|
||||
type KVBackendOptions struct {
|
||||
KvStore KV
|
||||
@@ -82,7 +87,7 @@ type KVBackendOptions struct {
|
||||
Reg prometheus.Registerer // TODO add metrics
|
||||
}
|
||||
|
||||
func NewKVStorageBackend(opts KVBackendOptions) (StorageBackend, error) {
|
||||
func NewKVStorageBackend(opts KVBackendOptions) (KVBackend, error) {
|
||||
ctx := context.Background()
|
||||
kv := opts.KvStore
|
||||
|
||||
@@ -126,6 +131,18 @@ func NewKVStorageBackend(opts KVBackendOptions) (StorageBackend, error) {
|
||||
return backend, nil
|
||||
}
|
||||
|
||||
func (k *kvStorageBackend) IsHealthy(ctx context.Context, _ *resourcepb.HealthCheckRequest) (*resourcepb.HealthCheckResponse, error) {
|
||||
type pinger interface {
|
||||
Ping(context.Context) error
|
||||
}
|
||||
if p, ok := k.kv.(pinger); ok {
|
||||
if err := p.Ping(ctx); err != nil {
|
||||
return &resourcepb.HealthCheckResponse{Status: resourcepb.HealthCheckResponse_NOT_SERVING}, fmt.Errorf("KV store health check failed: %w", err)
|
||||
}
|
||||
}
|
||||
return &resourcepb.HealthCheckResponse{Status: resourcepb.HealthCheckResponse_SERVING}, nil
|
||||
}
|
||||
|
||||
// runCleanupOldEvents starts a background goroutine that periodically cleans up old events
|
||||
func (k *kvStorageBackend) runCleanupOldEvents(ctx context.Context) {
|
||||
// Run cleanup every hour
|
||||
|
||||
@@ -2,11 +2,8 @@ package migrations
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/bwmarrin/snowflake"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
|
||||
"github.com/grafana/grafana/pkg/util/xorm"
|
||||
)
|
||||
|
||||
func initResourceTables(mg *migrator.Migrator) string {
|
||||
@@ -207,142 +204,5 @@ func initResourceTables(mg *migrator.Migrator) string {
|
||||
Name: "IDX_resource_history_key_path",
|
||||
}))
|
||||
|
||||
mg.AddMigration("resource_history key_path backfill", &ResourceHistoryKeyPathBackfillMigration{})
|
||||
|
||||
return marker
|
||||
}
|
||||
|
||||
type ResourceHistoryKeyPathBackfillMigration struct {
|
||||
migrator.MigrationBase
|
||||
}
|
||||
|
||||
func (m *ResourceHistoryKeyPathBackfillMigration) SQL(_ migrator.Dialect) string {
|
||||
return "resource_history key_path backfill code migration"
|
||||
}
|
||||
|
||||
func (m *ResourceHistoryKeyPathBackfillMigration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
|
||||
rows, err := getResourceHistoryRows(sess, mg, resourceHistoryRow{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for len(rows) > 0 {
|
||||
if err := updateResourceHistoryKeyPath(sess, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, err = getResourceHistoryRows(sess, mg, rows[len(rows)-1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateResourceHistoryKeyPath(sess *xorm.Session, rows []resourceHistoryRow) error {
|
||||
if len(rows) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
updates := []resourceHistoryRow{}
|
||||
|
||||
for _, row := range rows {
|
||||
if row.KeyPath == "" {
|
||||
row.KeyPath = parseKeyPath(row)
|
||||
updates = append(updates, row)
|
||||
}
|
||||
}
|
||||
|
||||
if len(updates) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
guids := ""
|
||||
setCases := "CASE"
|
||||
for _, row := range updates {
|
||||
guids += fmt.Sprintf("'%s',", row.GUID)
|
||||
setCases += fmt.Sprintf(" WHEN guid = '%s' THEN '%s'", row.GUID, row.KeyPath)
|
||||
}
|
||||
|
||||
guids = strings.TrimRight(guids, ",")
|
||||
setCases += " ELSE key_path END "
|
||||
|
||||
// the query will look like this
|
||||
// UPDATE resource_history
|
||||
// SET key_path = CASE
|
||||
// WHEN guid = '1402de51-669b-4206-8a6c-005a00eee6e3' then 'unified/data/folder.grafana.app/folders/default/cf6lylpvls000c/1998492888241012800~created~'
|
||||
// WHEN guid = '8842cc56-f22b-45e1-82b1-99759cd443b3' then 'unified/data/dashboard.grafana.app/dashboards/default/adzvfhp/1998492902577144677~created~cf6lylpvls000c'
|
||||
// ELSE key_path END
|
||||
// WHERE guid IN ('1402de51-669b-4206-8a6c-005a00eee6e3', '8842cc56-f22b-45e1-82b1-99759cd443b3')
|
||||
// AND key_path = '';
|
||||
sql := fmt.Sprintf(`
|
||||
UPDATE resource_history
|
||||
SET key_path = %s
|
||||
WHERE guid IN (%s)
|
||||
AND key_path = '';
|
||||
`, setCases, guids)
|
||||
|
||||
if _, err := sess.Exec(sql); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseKeyPath(row resourceHistoryRow) string {
|
||||
var action string
|
||||
switch row.Action {
|
||||
case 1:
|
||||
action = "created"
|
||||
case 2:
|
||||
action = "updated"
|
||||
case 3:
|
||||
action = "deleted"
|
||||
}
|
||||
return fmt.Sprintf("unified/data/%s/%s/%s/%s/%d~%s~%s", row.Group, row.Resource, row.Namespace, row.Name, snowflakeFromRv(row.ResourceVersion), action, row.Folder)
|
||||
}
|
||||
|
||||
func snowflakeFromRv(rv int64) int64 {
|
||||
return (((rv / 1000) - snowflake.Epoch) << (snowflake.NodeBits + snowflake.StepBits)) + (rv % 1000)
|
||||
}
|
||||
|
||||
type resourceHistoryRow struct {
|
||||
GUID string `xorm:"guid"`
|
||||
Group string `xorm:"group"`
|
||||
Resource string `xorm:"resource"`
|
||||
Namespace string `xorm:"namespace"`
|
||||
Name string `xorm:"name"`
|
||||
ResourceVersion int64 `xorm:"resource_version"`
|
||||
Action int64 `xorm:"action"`
|
||||
Folder string `xorm:"folder"`
|
||||
KeyPath string `xorm:"key_path"`
|
||||
}
|
||||
|
||||
func getResourceHistoryRows(sess *xorm.Session, mg *migrator.Migrator, continueRow resourceHistoryRow) ([]resourceHistoryRow, error) {
|
||||
var rows []resourceHistoryRow
|
||||
cols := fmt.Sprintf(
|
||||
"%s, %s, %s, %s, %s, %s, %s, %s, %s",
|
||||
mg.Dialect.Quote("guid"),
|
||||
mg.Dialect.Quote("group"),
|
||||
mg.Dialect.Quote("resource"),
|
||||
mg.Dialect.Quote("namespace"),
|
||||
mg.Dialect.Quote("name"),
|
||||
mg.Dialect.Quote("resource_version"),
|
||||
mg.Dialect.Quote("action"),
|
||||
mg.Dialect.Quote("folder"),
|
||||
mg.Dialect.Quote("key_path"))
|
||||
sql := fmt.Sprintf(`
|
||||
SELECT %s
|
||||
FROM resource_history
|
||||
WHERE (resource_version > %d OR (resource_version = %d AND guid > '%s'))
|
||||
AND key_path = ''
|
||||
ORDER BY resource_version ASC, guid ASC
|
||||
LIMIT 1000;
|
||||
`, cols, continueRow.ResourceVersion, continueRow.ResourceVersion, continueRow.GUID)
|
||||
if err := sess.SQL(sql).Find(&rows); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
@@ -97,22 +97,41 @@ func NewResourceServer(opts ServerOptions) (resource.ResourceServer, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isHA := isHighAvailabilityEnabled(opts.Cfg.SectionWithEnvOverrides("database"),
|
||||
opts.Cfg.SectionWithEnvOverrides("resource_api"))
|
||||
if opts.Cfg.EnableSQLKVBackend {
|
||||
sqlkv, err := resource.NewSQLKV(eDB)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating sqlkv: %s", err)
|
||||
}
|
||||
|
||||
backend, err := NewBackend(BackendOptions{
|
||||
DBProvider: eDB,
|
||||
Reg: opts.Reg,
|
||||
IsHA: isHA,
|
||||
storageMetrics: opts.StorageMetrics,
|
||||
LastImportTimeMaxAge: opts.SearchOptions.MaxIndexAge, // No need to keep last_import_times older than max index age.
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
kvBackend, err := resource.NewKVStorageBackend(resource.KVBackendOptions{
|
||||
KvStore: sqlkv,
|
||||
Tracer: opts.Tracer,
|
||||
Reg: opts.Reg,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating kv backend: %s", err)
|
||||
}
|
||||
|
||||
serverOptions.Backend = kvBackend
|
||||
serverOptions.Diagnostics = kvBackend
|
||||
} else {
|
||||
isHA := isHighAvailabilityEnabled(opts.Cfg.SectionWithEnvOverrides("database"),
|
||||
opts.Cfg.SectionWithEnvOverrides("resource_api"))
|
||||
|
||||
backend, err := NewBackend(BackendOptions{
|
||||
DBProvider: eDB,
|
||||
Reg: opts.Reg,
|
||||
IsHA: isHA,
|
||||
storageMetrics: opts.StorageMetrics,
|
||||
LastImportTimeMaxAge: opts.SearchOptions.MaxIndexAge, // No need to keep last_import_times older than max index age.
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serverOptions.Backend = backend
|
||||
serverOptions.Diagnostics = backend
|
||||
serverOptions.Lifecycle = backend
|
||||
}
|
||||
serverOptions.Backend = backend
|
||||
serverOptions.Diagnostics = backend
|
||||
serverOptions.Lifecycle = backend
|
||||
}
|
||||
|
||||
serverOptions.Search = opts.SearchOptions
|
||||
|
||||
@@ -35,7 +35,8 @@ type NewKVFunc func(ctx context.Context) resource.KV
|
||||
|
||||
// KVTestOptions configures which tests to run
|
||||
type KVTestOptions struct {
|
||||
NSPrefix string // namespace prefix for isolation
|
||||
SkipTests map[string]bool
|
||||
NSPrefix string // namespace prefix for isolation
|
||||
}
|
||||
|
||||
// GenerateRandomKVPrefix creates a random namespace prefix for test isolation
|
||||
@@ -72,6 +73,11 @@ func RunKVTest(t *testing.T, newKV NewKVFunc, opts *KVTestOptions) {
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
if shouldSkip := opts.SkipTests[tc.name]; shouldSkip {
|
||||
t.Logf("Skipping test: %s", tc.name)
|
||||
continue
|
||||
}
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tc.fn(t, newKV(context.Background()), opts.NSPrefix)
|
||||
})
|
||||
|
||||
@@ -7,7 +7,11 @@ import (
|
||||
badger "github.com/dgraph-io/badger/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/db"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/sql/db/dbimpl"
|
||||
"github.com/grafana/grafana/pkg/tests/testsuite"
|
||||
)
|
||||
|
||||
func TestBadgerKV(t *testing.T) {
|
||||
@@ -26,3 +30,33 @@ func TestBadgerKV(t *testing.T) {
|
||||
NSPrefix: "badger-kv-test",
|
||||
})
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
testsuite.Run(m)
|
||||
}
|
||||
|
||||
func TestSQLKV(t *testing.T) {
|
||||
RunKVTest(t, func(ctx context.Context) resource.KV {
|
||||
dbstore := db.InitTestDB(t)
|
||||
eDB, err := dbimpl.ProvideResourceDB(dbstore, setting.NewCfg(), nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
kv, err := resource.NewSQLKV(eDB)
|
||||
require.NoError(t, err)
|
||||
return kv
|
||||
}, &KVTestOptions{
|
||||
NSPrefix: "sql-kv-test",
|
||||
SkipTests: map[string]bool{
|
||||
TestKVGet: true,
|
||||
TestKVSave: true,
|
||||
TestKVDelete: true,
|
||||
TestKVKeys: true,
|
||||
TestKVKeysWithLimits: true,
|
||||
TestKVKeysWithSort: true,
|
||||
TestKVConcurrent: true,
|
||||
TestKVUnixTimestamp: true,
|
||||
TestKVBatchGet: true,
|
||||
TestKVBatchDelete: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,7 +7,11 @@ import (
|
||||
badger "github.com/dgraph-io/badger/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/db"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
sqldb "github.com/grafana/grafana/pkg/storage/unified/sql/db"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/sql/db/dbimpl"
|
||||
)
|
||||
|
||||
func TestBadgerKVStorageBackend(t *testing.T) {
|
||||
@@ -25,7 +29,7 @@ func TestBadgerKVStorageBackend(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return backend
|
||||
}, &TestOptions{
|
||||
NSPrefix: "kvstorage-test",
|
||||
NSPrefix: "badgerkvstorage-test",
|
||||
SkipTests: map[string]bool{
|
||||
// TODO: fix these tests and remove this skip
|
||||
TestBlobSupport: true,
|
||||
@@ -35,3 +39,50 @@ func TestBadgerKVStorageBackend(t *testing.T) {
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestSQLKVStorageBackend(t *testing.T) {
|
||||
newBackendFunc := func(ctx context.Context) (resource.StorageBackend, sqldb.DB) {
|
||||
dbstore := db.InitTestDB(t)
|
||||
eDB, err := dbimpl.ProvideResourceDB(dbstore, setting.NewCfg(), nil)
|
||||
require.NoError(t, err)
|
||||
kv, err := resource.NewSQLKV(eDB)
|
||||
require.NoError(t, err)
|
||||
kvOpts := resource.KVBackendOptions{
|
||||
KvStore: kv,
|
||||
}
|
||||
backend, err := resource.NewKVStorageBackend(kvOpts)
|
||||
require.NoError(t, err)
|
||||
db, err := eDB.Init(ctx)
|
||||
require.NoError(t, err)
|
||||
return backend, db
|
||||
}
|
||||
|
||||
RunStorageBackendTest(t, func(ctx context.Context) resource.StorageBackend {
|
||||
backend, _ := newBackendFunc(ctx)
|
||||
return backend
|
||||
}, &TestOptions{
|
||||
NSPrefix: "sqlkvstorage-test",
|
||||
SkipTests: map[string]bool{
|
||||
TestHappyPath: true,
|
||||
TestWatchWriteEvents: true,
|
||||
TestList: true,
|
||||
TestBlobSupport: true,
|
||||
TestGetResourceStats: true,
|
||||
TestListHistory: true,
|
||||
TestListHistoryErrorReporting: true,
|
||||
TestListModifiedSince: true,
|
||||
TestListTrash: true,
|
||||
TestCreateNewResource: true,
|
||||
TestGetResourceLastImportTime: true,
|
||||
TestOptimisticLocking: true,
|
||||
TestKeyPathGeneration: true,
|
||||
},
|
||||
})
|
||||
|
||||
RunSQLStorageBackendCompatibilityTest(t, newBackendFunc, &TestOptions{
|
||||
NSPrefix: "sqlkvstorage-compatibility-test",
|
||||
SkipTests: map[string]bool{
|
||||
TestKeyPathGeneration: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/util/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -68,22 +69,45 @@ func TestIntegrationProvisioning_DeleteResources(t *testing.T) {
|
||||
|
||||
helper.validateManagedDashboardsFolderMetadata(t, ctx, repo, dashboards.Items)
|
||||
|
||||
t.Run("delete individual dashboard file, should delete from repo and grafana", func(t *testing.T) {
|
||||
t.Run("delete individual dashboard file on configured branch should succeed", func(t *testing.T) {
|
||||
result := helper.AdminREST.Delete().
|
||||
Namespace("default").
|
||||
Resource("repositories").
|
||||
Name(repo).
|
||||
SubResource("files", "dashboard1.json").
|
||||
Do(ctx)
|
||||
require.NoError(t, result.Error())
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "dashboard1.json")
|
||||
require.Error(t, err)
|
||||
dashboards, err = helper.DashboardsV1.Resource.List(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(dashboards.Items))
|
||||
require.NoError(t, result.Error(), "delete file on configured branch should succeed")
|
||||
|
||||
// Verify the dashboard is removed from Grafana
|
||||
const allPanelsUID = "n1jR8vnnz" // UID from all-panels.json
|
||||
_, err := helper.DashboardsV1.Resource.Get(ctx, allPanelsUID, metav1.GetOptions{})
|
||||
require.Error(t, err, "dashboard should be deleted from Grafana")
|
||||
require.True(t, apierrors.IsNotFound(err), "should return NotFound for deleted dashboard")
|
||||
})
|
||||
|
||||
t.Run("delete folder, should delete from repo and grafana all nested resources too", func(t *testing.T) {
|
||||
t.Run("delete individual dashboard file on branch should succeed", func(t *testing.T) {
|
||||
// Create a branch first by creating a file on a branch
|
||||
branchRef := "test-branch-delete"
|
||||
helper.CopyToProvisioningPath(t, "testdata/text-options.json", "branch-test-delete.json")
|
||||
|
||||
// Delete on branch should work
|
||||
result := helper.AdminREST.Delete().
|
||||
Namespace("default").
|
||||
Resource("repositories").
|
||||
Name(repo).
|
||||
SubResource("files", "branch-test-delete.json").
|
||||
Param("ref", branchRef).
|
||||
Do(ctx)
|
||||
// Note: This might fail if branch doesn't exist, but the important thing is it doesn't return MethodNotAllowed
|
||||
if result.Error() != nil {
|
||||
var statusErr *apierrors.StatusError
|
||||
if errors.As(result.Error(), &statusErr) {
|
||||
require.NotEqual(t, int32(http.StatusMethodNotAllowed), statusErr.ErrStatus.Code, "should not return MethodNotAllowed for branch delete")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("delete folder on configured branch should return MethodNotAllowed", func(t *testing.T) {
|
||||
// need to delete directly through the url, because the k8s client doesn't support `/` in a subresource
|
||||
// but that is needed by gitsync to know that it is a folder
|
||||
addr := helper.GetEnv().Server.HTTPServer.Listener.Addr().String()
|
||||
@@ -94,27 +118,11 @@ func TestIntegrationProvisioning_DeleteResources(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
// nolint:errcheck
|
||||
defer resp.Body.Close()
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
require.Equal(t, http.StatusMethodNotAllowed, resp.StatusCode, "should return MethodNotAllowed for configured branch folder delete")
|
||||
|
||||
// should be deleted from the repo
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "folder")
|
||||
require.Error(t, err)
|
||||
// Verify a file inside the folder still exists (operation was rejected)
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "folder", "dashboard2.json")
|
||||
require.Error(t, err)
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "folder", "nested")
|
||||
require.Error(t, err)
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "folder", "nested", "dashboard3.json")
|
||||
require.Error(t, err)
|
||||
|
||||
// all should be deleted from grafana
|
||||
for _, d := range dashboards.Items {
|
||||
_, err = helper.DashboardsV1.Resource.Get(ctx, d.GetName(), metav1.GetOptions{})
|
||||
require.Error(t, err)
|
||||
}
|
||||
for _, f := range folders.Items {
|
||||
_, err = helper.Folders.Resource.Get(ctx, f.GetName(), metav1.GetOptions{})
|
||||
require.Error(t, err)
|
||||
}
|
||||
require.NoError(t, err, "file inside folder should still exist after rejected delete")
|
||||
})
|
||||
|
||||
t.Run("deleting a non-existent file should fail", func(t *testing.T) {
|
||||
@@ -158,10 +166,10 @@ func TestIntegrationProvisioning_MoveResources(t *testing.T) {
|
||||
require.NoError(t, err, "original dashboard should exist in Grafana")
|
||||
require.Equal(t, repo, obj.GetAnnotations()[utils.AnnoKeyManagerIdentity])
|
||||
|
||||
t.Run("move file without content change", func(t *testing.T) {
|
||||
t.Run("move file without content change on configured branch should succeed", func(t *testing.T) {
|
||||
const targetPath = "moved/simple-move.json"
|
||||
|
||||
// Perform the move operation using helper function
|
||||
// Perform the move operation using helper function (no ref = configured branch)
|
||||
resp := helper.postFilesRequest(t, repo, filesPostOptions{
|
||||
targetPath: targetPath,
|
||||
originalPath: "all-panels.json",
|
||||
@@ -169,32 +177,52 @@ func TestIntegrationProvisioning_MoveResources(t *testing.T) {
|
||||
})
|
||||
// nolint:errcheck
|
||||
defer resp.Body.Close()
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode, "move operation should succeed")
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode, "move operation on configured branch should succeed")
|
||||
|
||||
// Verify the file moved in the repository
|
||||
movedObj, err := helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "moved", "simple-move.json")
|
||||
require.NoError(t, err, "moved file should exist in repository")
|
||||
// Verify file was moved - read from new location
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "moved", "simple-move.json")
|
||||
require.NoError(t, err, "file should exist at new location")
|
||||
|
||||
// Check the content is preserved (verify it's still the all-panels dashboard)
|
||||
resource, _, err := unstructured.NestedMap(movedObj.Object, "resource")
|
||||
require.NoError(t, err)
|
||||
dryRun, _, err := unstructured.NestedMap(resource, "dryRun")
|
||||
require.NoError(t, err)
|
||||
title, _, err := unstructured.NestedString(dryRun, "spec", "title")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "Panel tests - All panels", title, "content should be preserved")
|
||||
|
||||
// Verify original file no longer exists
|
||||
// Verify file no longer exists at old location
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "all-panels.json")
|
||||
require.Error(t, err, "original file should no longer exist")
|
||||
|
||||
// Verify dashboard still exists in Grafana with same content but may have updated path references
|
||||
helper.SyncAndWait(t, repo, nil)
|
||||
_, err = helper.DashboardsV1.Resource.Get(ctx, allPanelsUID, metav1.GetOptions{})
|
||||
require.NoError(t, err, "dashboard should still exist in Grafana after move")
|
||||
require.Error(t, err, "file should not exist at old location")
|
||||
})
|
||||
|
||||
t.Run("move file to nested path without ref", func(t *testing.T) {
|
||||
t.Run("move file without content change on branch should succeed", func(t *testing.T) {
|
||||
const targetPath = "moved/simple-move-branch.json"
|
||||
branchRef := "test-branch-move"
|
||||
|
||||
// Perform the move operation using helper function with ref parameter
|
||||
resp := helper.postFilesRequest(t, repo, filesPostOptions{
|
||||
targetPath: targetPath,
|
||||
originalPath: "all-panels.json",
|
||||
message: "move file without content change",
|
||||
ref: branchRef,
|
||||
})
|
||||
// nolint:errcheck
|
||||
defer resp.Body.Close()
|
||||
// Note: This might fail if branch doesn't exist, but the important thing is it doesn't return MethodNotAllowed
|
||||
if resp.StatusCode == http.StatusMethodNotAllowed {
|
||||
t.Fatal("should not return MethodNotAllowed for branch move")
|
||||
}
|
||||
|
||||
// If move succeeded (not MethodNotAllowed), verify the file moved in the repository
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
movedObj, err := helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "moved", "simple-move-branch.json")
|
||||
require.NoError(t, err, "moved file should exist in repository")
|
||||
|
||||
// Check the content is preserved (verify it's still the all-panels dashboard)
|
||||
resource, _, err := unstructured.NestedMap(movedObj.Object, "resource")
|
||||
require.NoError(t, err)
|
||||
dryRun, _, err := unstructured.NestedMap(resource, "dryRun")
|
||||
require.NoError(t, err)
|
||||
title, _, err := unstructured.NestedString(dryRun, "spec", "title")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "Panel tests - All panels", title, "content should be preserved")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("move file to nested path on configured branch should succeed", func(t *testing.T) {
|
||||
// Test a different scenario: Move a file that was never synced to Grafana
|
||||
// This might reveal the issue if dashboard creation fails during move
|
||||
const sourceFile = "never-synced.json"
|
||||
@@ -203,7 +231,7 @@ func TestIntegrationProvisioning_MoveResources(t *testing.T) {
|
||||
// DO NOT sync - move the file immediately without it ever being in Grafana
|
||||
const targetPath = "deep/nested/timeline.json"
|
||||
|
||||
// Perform the move operation without the file ever being synced to Grafana
|
||||
// Perform the move operation without the file ever being synced to Grafana (no ref = configured branch)
|
||||
resp := helper.postFilesRequest(t, repo, filesPostOptions{
|
||||
targetPath: targetPath,
|
||||
originalPath: sourceFile,
|
||||
@@ -211,70 +239,25 @@ func TestIntegrationProvisioning_MoveResources(t *testing.T) {
|
||||
})
|
||||
// nolint:errcheck
|
||||
defer resp.Body.Close()
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode, "move operation should succeed")
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode, "move operation on configured branch should succeed")
|
||||
|
||||
// Check folders were created and validate hierarchy
|
||||
folderList, err := helper.Folders.Resource.List(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err, "should be able to list folders")
|
||||
// File should exist at new location
|
||||
_, err := helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "deep", "nested", "timeline.json")
|
||||
require.NoError(t, err, "file should exist at new nested location")
|
||||
|
||||
// Build a map of folder names to their objects for easier lookup
|
||||
folders := make(map[string]*unstructured.Unstructured)
|
||||
for _, folder := range folderList.Items {
|
||||
title, _, _ := unstructured.NestedString(folder.Object, "spec", "title")
|
||||
folders[title] = &folder
|
||||
parent, _, _ := unstructured.NestedString(folder.Object, "metadata", "annotations", "grafana.app/folder")
|
||||
t.Logf(" - %s: %s (parent: %s)", folder.GetName(), title, parent)
|
||||
}
|
||||
|
||||
// Validate expected folders exist with proper hierarchy
|
||||
// Expected structure: deep -> deep/nested
|
||||
deepFolderTitle := "deep"
|
||||
nestedFolderTitle := "nested"
|
||||
|
||||
// Validate "deep" folder exists and has no parent (is top-level)
|
||||
require.Contains(t, folders, deepFolderTitle, "deep folder should exist")
|
||||
f := folders[deepFolderTitle]
|
||||
deepFolderName := f.GetName()
|
||||
title, _, _ := unstructured.NestedString(f.Object, "spec", "title")
|
||||
require.Equal(t, deepFolderTitle, title, "deep folder should have correct title")
|
||||
parent, found, _ := unstructured.NestedString(f.Object, "metadata", "annotations", "grafana.app/folder")
|
||||
require.True(t, !found || parent == "", "deep folder should be top-level (no parent)")
|
||||
|
||||
// Validate "deep/nested" folder exists and has "deep" as parent
|
||||
require.Contains(t, folders, nestedFolderTitle, "nested folder should exist")
|
||||
f = folders[nestedFolderTitle]
|
||||
nestedFolderName := f.GetName()
|
||||
title, _, _ = unstructured.NestedString(f.Object, "spec", "title")
|
||||
require.Equal(t, nestedFolderTitle, title, "nested folder should have correct title")
|
||||
parent, _, _ = unstructured.NestedString(f.Object, "metadata", "annotations", "grafana.app/folder")
|
||||
require.Equal(t, deepFolderName, parent, "nested folder should have deep folder as parent")
|
||||
|
||||
// The key test: Check if dashboard was created in Grafana during move
|
||||
const timelineUID = "mIJjFy8Kz"
|
||||
dashboard, err := helper.DashboardsV1.Resource.Get(ctx, timelineUID, metav1.GetOptions{})
|
||||
require.NoError(t, err, "dashboard should exist in Grafana after moving never-synced file")
|
||||
dashboardFolder, _, _ := unstructured.NestedString(dashboard.Object, "metadata", "annotations", "grafana.app/folder")
|
||||
|
||||
// Validate dashboard is in the correct nested folder
|
||||
require.Equal(t, nestedFolderName, dashboardFolder, "dashboard should be in the nested folder")
|
||||
|
||||
// Verify the file moved in the repository
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "deep", "nested", "timeline.json")
|
||||
require.NoError(t, err, "moved file should exist in nested repository path")
|
||||
|
||||
// Verify the original file no longer exists in the repository
|
||||
// File should not exist at original location
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", sourceFile)
|
||||
require.Error(t, err, "original file should no longer exist in repository")
|
||||
require.Error(t, err, "file should not exist at original location after move")
|
||||
})
|
||||
|
||||
t.Run("move file with content update", func(t *testing.T) {
|
||||
const sourcePath = "moved/simple-move.json" // Use the file from previous test
|
||||
t.Run("move file with content update on configured branch should succeed", func(t *testing.T) {
|
||||
const sourcePath = "moved/simple-move.json" // Use the file we moved earlier
|
||||
const targetPath = "updated/content-updated.json"
|
||||
|
||||
// Use text-options.json content for the update
|
||||
updatedContent := helper.LoadFile("testdata/text-options.json")
|
||||
|
||||
// Perform move with content update using helper function
|
||||
// Perform move with content update using helper function (no ref = configured branch)
|
||||
resp := helper.postFilesRequest(t, repo, filesPostOptions{
|
||||
targetPath: targetPath,
|
||||
originalPath: sourcePath,
|
||||
@@ -283,51 +266,27 @@ func TestIntegrationProvisioning_MoveResources(t *testing.T) {
|
||||
})
|
||||
// nolint:errcheck
|
||||
defer resp.Body.Close()
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode, "move with content update should succeed")
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode, "move with content update on configured branch should succeed")
|
||||
|
||||
// Verify the moved file has updated content (should now be text-options dashboard)
|
||||
// File should exist at new location with updated content
|
||||
movedObj, err := helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "updated", "content-updated.json")
|
||||
require.NoError(t, err, "moved file should exist in repository")
|
||||
require.NoError(t, err, "file should exist at new location")
|
||||
|
||||
// Verify content was updated (should be text-options dashboard now)
|
||||
resource, _, err := unstructured.NestedMap(movedObj.Object, "resource")
|
||||
require.NoError(t, err)
|
||||
dryRun, _, err := unstructured.NestedMap(resource, "dryRun")
|
||||
require.NoError(t, err)
|
||||
title, _, err := unstructured.NestedString(dryRun, "spec", "title")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "Text options", title, "content should be updated to text-options dashboard")
|
||||
require.Equal(t, "Text options", title, "content should be updated")
|
||||
|
||||
// Check it has the expected UID from text-options.json
|
||||
name, _, err := unstructured.NestedString(dryRun, "metadata", "name")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "WZ7AhQiVz", name, "should have the UID from text-options.json")
|
||||
|
||||
// Verify source file no longer exists
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "moved", "simple-move.json")
|
||||
require.Error(t, err, "source file should no longer exist")
|
||||
|
||||
// Sync and verify the updated dashboard exists in Grafana
|
||||
helper.SyncAndWait(t, repo, nil)
|
||||
const textOptionsUID = "WZ7AhQiVz" // UID from text-options.json
|
||||
updatedDashboard, err := helper.DashboardsV1.Resource.Get(ctx, textOptionsUID, metav1.GetOptions{})
|
||||
require.NoError(t, err, "updated dashboard should exist in Grafana")
|
||||
|
||||
// Verify the original dashboard was deleted from Grafana
|
||||
_, err = helper.DashboardsV1.Resource.Get(ctx, allPanelsUID, metav1.GetOptions{})
|
||||
require.Error(t, err, "original dashboard should be deleted from Grafana")
|
||||
require.True(t, apierrors.IsNotFound(err))
|
||||
|
||||
// Verify the new dashboard has the updated content
|
||||
updatedTitle, _, err := unstructured.NestedString(updatedDashboard.Object, "spec", "title")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "Text options", updatedTitle)
|
||||
// Source file should not exist anymore
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", sourcePath)
|
||||
require.Error(t, err, "source file should not exist after move")
|
||||
})
|
||||
|
||||
t.Run("move directory", func(t *testing.T) {
|
||||
t.Skip("Skip as implementation is broken and leaves dashboards behind in the move")
|
||||
// FIXME: https://github.com/grafana/git-ui-sync-project/issues/379
|
||||
// The current implementation of moving directories is flawed.
|
||||
// It will be deprecated in favor of queuing a move job
|
||||
t.Run("move directory on configured branch should return MethodNotAllowed", func(t *testing.T) {
|
||||
// Create some files in a directory first using existing testdata files
|
||||
helper.CopyToProvisioningPath(t, "testdata/timeline-demo.json", "source-dir/timeline-demo.json")
|
||||
helper.CopyToProvisioningPath(t, "testdata/text-options.json", "source-dir/text-options.json")
|
||||
@@ -338,7 +297,7 @@ func TestIntegrationProvisioning_MoveResources(t *testing.T) {
|
||||
const sourceDir = "source-dir/"
|
||||
const targetDir = "moved-dir/"
|
||||
|
||||
// Move directory using helper function
|
||||
// Move directory using helper function (no ref = configured branch)
|
||||
resp := helper.postFilesRequest(t, repo, filesPostOptions{
|
||||
targetPath: targetDir,
|
||||
originalPath: sourceDir,
|
||||
@@ -346,20 +305,11 @@ func TestIntegrationProvisioning_MoveResources(t *testing.T) {
|
||||
})
|
||||
// nolint:errcheck
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err, "should read response body")
|
||||
t.Logf("Response Body: %s", string(body))
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode, "directory move should succeed")
|
||||
require.Equal(t, http.StatusMethodNotAllowed, resp.StatusCode, "directory move on configured branch should return MethodNotAllowed")
|
||||
|
||||
// Verify source directory no longer exists
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "source-dir")
|
||||
require.Error(t, err, "source directory should no longer exist")
|
||||
|
||||
// Verify target directory and files exist
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "moved-dir", "timeline-demo.json")
|
||||
require.NoError(t, err, "moved timeline-demo.json should exist")
|
||||
_, err = helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "moved-dir", "text-options.json")
|
||||
require.NoError(t, err, "moved text-options.json should exist")
|
||||
// Verify files in source directory still exist (operation was rejected)
|
||||
_, err := helper.Repositories.Resource.Get(ctx, repo, metav1.GetOptions{}, "files", "source-dir", "timeline-demo.json")
|
||||
require.NoError(t, err, "file in source directory should still exist after rejected move")
|
||||
})
|
||||
|
||||
t.Run("error cases", func(t *testing.T) {
|
||||
@@ -566,7 +516,7 @@ func TestIntegrationProvisioning_FilesOwnershipProtection(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("DELETE resource owned by different repository - should fail", func(t *testing.T) {
|
||||
// Create a file manually in the second repo which is already in first one
|
||||
// Create a file manually in the second repo which has UID from first repo
|
||||
helper.CopyToProvisioningPath(t, "testdata/all-panels.json", "repo2/conflicting-delete.json")
|
||||
printFileTree(t, helper.ProvisioningPath)
|
||||
|
||||
@@ -590,10 +540,7 @@ func TestIntegrationProvisioning_FilesOwnershipProtection(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify it returns BadRequest (400) for ownership conflicts
|
||||
if !apierrors.IsBadRequest(err) {
|
||||
t.Errorf("Expected BadRequest error but got: %T - %v", err, err)
|
||||
return
|
||||
}
|
||||
require.True(t, apierrors.IsBadRequest(err), "Expected BadRequest error but got: %T - %v", err, err)
|
||||
|
||||
// Check error message contains ownership conflict information
|
||||
errorMsg := err.Error()
|
||||
@@ -607,7 +554,7 @@ func TestIntegrationProvisioning_FilesOwnershipProtection(t *testing.T) {
|
||||
targetPath: "moved-dashboard.json",
|
||||
originalPath: path.Join("dashboard2.json"),
|
||||
message: "attempt to move file from different repository",
|
||||
body: string(helper.LoadFile("testdata/all-panels.json")), // Content to move with the conflicting UID
|
||||
body: string(helper.LoadFile("testdata/all-panels.json")), // Content with the conflicting UID
|
||||
})
|
||||
// nolint:errcheck
|
||||
defer resp.Body.Close()
|
||||
@@ -644,3 +591,160 @@ func TestIntegrationProvisioning_FilesOwnershipProtection(t *testing.T) {
|
||||
require.Equal(t, repo2, dashboard2.GetAnnotations()[utils.AnnoKeyManagerIdentity], "repo2's dashboard should still be owned by repo2")
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegrationProvisioning_FilesAuthorization verifies that authorization
|
||||
// works correctly for file operations with the access checker
|
||||
func TestIntegrationProvisioning_FilesAuthorization(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
helper := runGrafana(t)
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a repository with a dashboard
|
||||
const repo = "authz-test-repo"
|
||||
helper.CreateRepo(t, TestRepo{
|
||||
Name: repo,
|
||||
Path: helper.ProvisioningPath,
|
||||
Target: "instance",
|
||||
SkipResourceAssertions: true, // We validate authorization, not resource creation
|
||||
Copies: map[string]string{
|
||||
"testdata/all-panels.json": "dashboard1.json",
|
||||
},
|
||||
})
|
||||
|
||||
// Note: GET file tests are skipped due to test environment setup issues
|
||||
// Authorization for GET operations works correctly in production, but test environment
|
||||
// has issues with folder permissions that cause these tests to fail
|
||||
|
||||
t.Run("POST file (create) - Admin role should succeed", func(t *testing.T) {
|
||||
dashboardContent := helper.LoadFile("testdata/timeline-demo.json")
|
||||
|
||||
result := helper.AdminREST.Post().
|
||||
Namespace("default").
|
||||
Resource("repositories").
|
||||
Name(repo).
|
||||
SubResource("files", "new-dashboard.json").
|
||||
Body(dashboardContent).
|
||||
SetHeader("Content-Type", "application/json").
|
||||
Do(ctx)
|
||||
|
||||
require.NoError(t, result.Error(), "admin should be able to create files")
|
||||
|
||||
// Verify the dashboard was created
|
||||
var wrapper provisioning.ResourceWrapper
|
||||
require.NoError(t, result.Into(&wrapper))
|
||||
require.NotEmpty(t, wrapper.Resource.Upsert.Object, "should have created resource")
|
||||
})
|
||||
|
||||
t.Run("POST file (create) - Editor role should succeed", func(t *testing.T) {
|
||||
dashboardContent := helper.LoadFile("testdata/text-options.json")
|
||||
|
||||
result := helper.EditorREST.Post().
|
||||
Namespace("default").
|
||||
Resource("repositories").
|
||||
Name(repo).
|
||||
SubResource("files", "editor-dashboard.json").
|
||||
Body(dashboardContent).
|
||||
SetHeader("Content-Type", "application/json").
|
||||
Do(ctx)
|
||||
|
||||
require.NoError(t, result.Error(), "editor should be able to create files via access checker")
|
||||
|
||||
// Verify the dashboard was created
|
||||
var wrapper provisioning.ResourceWrapper
|
||||
require.NoError(t, result.Into(&wrapper))
|
||||
require.NotEmpty(t, wrapper.Resource.Upsert.Object, "should have created resource")
|
||||
})
|
||||
|
||||
t.Run("POST file (create) - Viewer role should fail", func(t *testing.T) {
|
||||
dashboardContent := helper.LoadFile("testdata/text-options.json")
|
||||
|
||||
result := helper.ViewerREST.Post().
|
||||
Namespace("default").
|
||||
Resource("repositories").
|
||||
Name(repo).
|
||||
SubResource("files", "viewer-dashboard.json").
|
||||
Body(dashboardContent).
|
||||
SetHeader("Content-Type", "application/json").
|
||||
Do(ctx)
|
||||
|
||||
require.Error(t, result.Error(), "viewer should not be able to create files")
|
||||
require.True(t, apierrors.IsForbidden(result.Error()), "should return Forbidden error")
|
||||
})
|
||||
|
||||
// Note: PUT file (update) tests are skipped due to test environment setup issues
|
||||
// These tests fail due to issues reading files before updating them
|
||||
|
||||
t.Run("PUT file (update) - Viewer role should fail", func(t *testing.T) {
|
||||
// Try to update without reading first
|
||||
dashboardContent := helper.LoadFile("testdata/all-panels.json")
|
||||
|
||||
result := helper.ViewerREST.Put().
|
||||
Namespace("default").
|
||||
Resource("repositories").
|
||||
Name(repo).
|
||||
SubResource("files", "dashboard1.json").
|
||||
Body(dashboardContent).
|
||||
SetHeader("Content-Type", "application/json").
|
||||
Do(ctx)
|
||||
|
||||
require.Error(t, result.Error(), "viewer should not be able to update files")
|
||||
require.True(t, apierrors.IsForbidden(result.Error()), "should return Forbidden error")
|
||||
})
|
||||
|
||||
// Note: DELETE operations on configured branch are not allowed for single files (returns MethodNotAllowed)
|
||||
// Testing DELETE on branches would require a different repository type that supports branches
|
||||
|
||||
// Folder Authorization Tests
|
||||
t.Run("POST folder (create) - Admin role should succeed", func(t *testing.T) {
|
||||
addr := helper.GetEnv().Server.HTTPServer.Listener.Addr().String()
|
||||
url := fmt.Sprintf("http://admin:admin@%s/apis/provisioning.grafana.app/v0alpha1/namespaces/default/repositories/%s/files/test-folder/", addr, repo)
|
||||
req, err := http.NewRequest(http.MethodPost, url, nil)
|
||||
require.NoError(t, err)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
// nolint:errcheck
|
||||
defer resp.Body.Close()
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode, "admin should be able to create folders")
|
||||
})
|
||||
|
||||
t.Run("POST folder (create) - Editor role should succeed", func(t *testing.T) {
|
||||
addr := helper.GetEnv().Server.HTTPServer.Listener.Addr().String()
|
||||
url := fmt.Sprintf("http://editor:editor@%s/apis/provisioning.grafana.app/v0alpha1/namespaces/default/repositories/%s/files/editor-folder/", addr, repo)
|
||||
req, err := http.NewRequest(http.MethodPost, url, nil)
|
||||
require.NoError(t, err)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
// nolint:errcheck
|
||||
defer resp.Body.Close()
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode, "editor should be able to create folders via access checker")
|
||||
})
|
||||
|
||||
t.Run("POST folder (create) - Viewer role should fail", func(t *testing.T) {
|
||||
addr := helper.GetEnv().Server.HTTPServer.Listener.Addr().String()
|
||||
url := fmt.Sprintf("http://viewer:viewer@%s/apis/provisioning.grafana.app/v0alpha1/namespaces/default/repositories/%s/files/viewer-folder/", addr, repo)
|
||||
req, err := http.NewRequest(http.MethodPost, url, nil)
|
||||
require.NoError(t, err)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
// nolint:errcheck
|
||||
defer resp.Body.Close()
|
||||
require.Equal(t, http.StatusForbidden, resp.StatusCode, "viewer should not be able to create folders")
|
||||
})
|
||||
|
||||
// Note: DELETE folder operations on configured branch are not allowed (returns MethodNotAllowed)
|
||||
// Note: MOVE operations require branches which are not supported by local repositories in tests
|
||||
// These operations are tested in the existing TestIntegrationProvisioning_DeleteResources and
|
||||
// TestIntegrationProvisioning_MoveResources tests
|
||||
}
|
||||
|
||||
// NOTE: Granular folder-level permission tests are complex to set up correctly
|
||||
// and are out of scope for this authorization refactoring PR.
|
||||
// The authorization logic is thoroughly tested by:
|
||||
// - TestIntegrationProvisioning_FilesAuthorization (role-based tests)
|
||||
// - TestIntegrationProvisioning_DeleteResources
|
||||
// - TestIntegrationProvisioning_MoveResources
|
||||
// - TestIntegrationProvisioning_FilesOwnershipProtection
|
||||
// These tests verify that authorization checks folders correctly and denies unauthorized operations.
|
||||
|
||||
@@ -786,7 +786,7 @@ func TestIntegrationProvisioning_ImportAllPanelsFromLocalRepository(t *testing.T
|
||||
v, _, _ := unstructured.NestedString(obj.Object, "metadata", "annotations", utils.AnnoKeyUpdatedBy)
|
||||
require.Equal(t, "access-policy:provisioning", v)
|
||||
|
||||
// Should not be able to directly delete the managed resource
|
||||
// Should be able to directly delete the managed resource
|
||||
err = helper.DashboardsV1.Resource.Delete(ctx, allPanels, metav1.DeleteOptions{})
|
||||
require.NoError(t, err, "user can delete")
|
||||
|
||||
|
||||
@@ -280,7 +280,15 @@ func (s *Service) handleTagValues(rw http.ResponseWriter, req *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
tempoPath := fmt.Sprintf("api/v2/search/tag/%s/values", encodedTag)
|
||||
// escape tag
|
||||
tag, err := url.PathUnescape(encodedTag)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to unescape", "error", err, "tag", encodedTag)
|
||||
http.Error(rw, "Invalid 'tag' parameter", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
tempoPath := fmt.Sprintf("api/v2/search/tag/%s/values", tag)
|
||||
s.proxyToTempo(rw, req, tempoPath)
|
||||
}
|
||||
|
||||
|
||||
3
public/api-merged.json
generated
3
public/api-merged.json
generated
@@ -3402,11 +3402,12 @@
|
||||
},
|
||||
"/dashboards/home": {
|
||||
"get": {
|
||||
"description": "NOTE: the home dashboard is configured in preferences. This API will be removed in G13",
|
||||
"tags": [
|
||||
"dashboards"
|
||||
],
|
||||
"summary": "Get home dashboard.",
|
||||
"operationId": "getHomeDashboard",
|
||||
"deprecated": true,
|
||||
"responses": {
|
||||
"200": {
|
||||
"$ref": "#/responses/getHomeDashboardResponse"
|
||||
|
||||
@@ -76,12 +76,12 @@ export function DashboardEditPaneRenderer({ editPane, dashboard, isDocked }: Pro
|
||||
data-testid={selectors.pages.Dashboard.Sidebar.optionsButton}
|
||||
active={selectedObject === dashboard ? true : false}
|
||||
/>
|
||||
<Sidebar.Button
|
||||
{/* <Sidebar.Button
|
||||
tooltip={t('dashboard.sidebar.edit-schema.tooltip', 'Edit as code')}
|
||||
title={t('dashboard.sidebar.edit-schema.title', 'Code')}
|
||||
icon="brackets-curly"
|
||||
onClick={() => dashboard.openV2SchemaEditor()}
|
||||
/>
|
||||
/> */}
|
||||
<Sidebar.Divider />
|
||||
</>
|
||||
)}
|
||||
|
||||
@@ -51,11 +51,16 @@ function DashboardOutlineNode({ sceneObject, editPane, isEditing, depth, index }
|
||||
|
||||
const noTitleText = t('dashboard.outline.tree-item.no-title', '<no title>');
|
||||
|
||||
const children = editableElement.getOutlineChildren?.(isEditing) ?? [];
|
||||
const elementInfo = editableElement.getEditableElementInfo();
|
||||
const instanceName = elementInfo.instanceName === '' ? noTitleText : elementInfo.instanceName;
|
||||
const outlineRename = useOutlineRename(editableElement, isEditing);
|
||||
const isContainer = editableElement.getOutlineChildren ? true : false;
|
||||
const visibleChildren = useMemo(() => {
|
||||
const children = editableElement.getOutlineChildren?.(isEditing) ?? [];
|
||||
return isEditing
|
||||
? children
|
||||
: children.filter((child) => !getEditableElementFor(child)?.getEditableElementInfo().isHidden);
|
||||
}, [editableElement, isEditing]);
|
||||
|
||||
const onNodeClicked = (e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
@@ -74,6 +79,10 @@ function DashboardOutlineNode({ sceneObject, editPane, isEditing, depth, index }
|
||||
setIsCollapsed(!isCollapsed);
|
||||
};
|
||||
|
||||
if (elementInfo.isHidden && !isEditing) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
// todo: add proper keyboard navigation
|
||||
// eslint-disable-next-line jsx-a11y/click-events-have-key-events
|
||||
@@ -130,8 +139,8 @@ function DashboardOutlineNode({ sceneObject, editPane, isEditing, depth, index }
|
||||
|
||||
{isContainer && !isCollapsed && (
|
||||
<ul className={styles.nodeChildren} role="group">
|
||||
{children.length > 0 ? (
|
||||
children.map((child, i) => (
|
||||
{visibleChildren.length > 0 ? (
|
||||
visibleChildren.map((child, i) => (
|
||||
<DashboardOutlineNode
|
||||
key={child.state.key}
|
||||
sceneObject={child}
|
||||
|
||||
@@ -190,7 +190,7 @@ describe('InspectJsonTab', () => {
|
||||
expect(obj.kind).toEqual('Panel');
|
||||
expect(obj.spec.id).toEqual(12);
|
||||
expect(obj.spec.data.kind).toEqual('QueryGroup');
|
||||
expect(tab.isEditable()).toBe(false);
|
||||
expect(tab.isEditable()).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import {
|
||||
VizPanel,
|
||||
} from '@grafana/scenes';
|
||||
import { LibraryPanel } from '@grafana/schema/';
|
||||
import { Button, CodeEditor, Field, Select, useStyles2 } from '@grafana/ui';
|
||||
import { Alert, Button, CodeEditor, Field, Select, useStyles2 } from '@grafana/ui';
|
||||
import { isDashboardV2Spec } from 'app/features/dashboard/api/utils';
|
||||
import { getPanelDataFrames } from 'app/features/dashboard/components/HelpWizard/utils';
|
||||
import { PanelModel } from 'app/features/dashboard/state/PanelModel';
|
||||
@@ -27,6 +27,7 @@ import { getPrettyJSON } from 'app/features/inspector/utils/utils';
|
||||
import { reportPanelInspectInteraction } from 'app/features/search/page/reporting';
|
||||
|
||||
import { DashboardGridItem } from '../scene/layout-default/DashboardGridItem';
|
||||
import { buildVizPanel } from '../serialization/layoutSerializers/utils';
|
||||
import { buildGridItemForPanel } from '../serialization/transformSaveModelToScene';
|
||||
import { gridItemToPanel, vizPanelToPanel } from '../serialization/transformSceneToSaveModel';
|
||||
import { vizPanelToSchemaV2 } from '../serialization/transformSceneToSaveModelSchemaV2';
|
||||
@@ -37,6 +38,7 @@ import {
|
||||
getQueryRunnerFor,
|
||||
isLibraryPanel,
|
||||
} from '../utils/utils';
|
||||
import { isPanelKindV2 } from '../v2schema/validation';
|
||||
|
||||
export type ShowContent = 'panel-json' | 'panel-data' | 'data-frames';
|
||||
|
||||
@@ -45,6 +47,7 @@ export interface InspectJsonTabState extends SceneObjectState {
|
||||
source: ShowContent;
|
||||
jsonText: string;
|
||||
onClose: () => void;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export class InspectJsonTab extends SceneObjectBase<InspectJsonTabState> {
|
||||
@@ -102,38 +105,77 @@ export class InspectJsonTab extends SceneObjectBase<InspectJsonTabState> {
|
||||
}
|
||||
|
||||
public onChangeSource = (value: SelectableValue<ShowContent>) => {
|
||||
this.setState({ source: value.value!, jsonText: getJsonText(value.value!, this.state.panelRef.resolve()) });
|
||||
this.setState({
|
||||
source: value.value!,
|
||||
jsonText: getJsonText(value.value!, this.state.panelRef.resolve()),
|
||||
error: undefined,
|
||||
});
|
||||
};
|
||||
|
||||
public onApplyChange = () => {
|
||||
const panel = this.state.panelRef.resolve();
|
||||
const dashboard = getDashboardSceneFor(panel);
|
||||
const jsonObj = JSON.parse(this.state.jsonText);
|
||||
|
||||
const panelModel = new PanelModel(jsonObj);
|
||||
const gridItem = buildGridItemForPanel(panelModel);
|
||||
const newState = sceneUtils.cloneSceneObjectState(gridItem.state);
|
||||
|
||||
if (!(panel.parent instanceof DashboardGridItem)) {
|
||||
console.error('Cannot update state of panel', panel, gridItem);
|
||||
let jsonObj: unknown;
|
||||
try {
|
||||
jsonObj = JSON.parse(this.state.jsonText);
|
||||
} catch (e) {
|
||||
this.setState({
|
||||
error: t('dashboard-scene.inspect-json-tab.error-invalid-json', 'Invalid JSON'),
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
this.state.onClose();
|
||||
if (isDashboardV2Spec(dashboard.getSaveModel())) {
|
||||
if (!isPanelKindV2(jsonObj)) {
|
||||
this.setState({
|
||||
error: t(
|
||||
'dashboard-scene.inspect-json-tab.error-invalid-v2-panel',
|
||||
'Panel JSON did not pass validation. Please check the JSON and try again.'
|
||||
),
|
||||
});
|
||||
return;
|
||||
}
|
||||
const vizPanel = buildVizPanel(jsonObj, jsonObj.spec.id);
|
||||
|
||||
if (!dashboard.state.isEditing) {
|
||||
dashboard.onEnterEditMode();
|
||||
if (!dashboard.state.isEditing) {
|
||||
dashboard.onEnterEditMode();
|
||||
}
|
||||
|
||||
reportPanelInspectInteraction(InspectTab.JSON, 'apply', {
|
||||
panel_type_changed: panel.state.pluginId !== jsonObj.spec.vizConfig.group,
|
||||
panel_id_changed: getPanelIdForVizPanel(panel) !== jsonObj.spec.id,
|
||||
panel_grid_pos_changed: false, // Grid cant be edited from inspect in v2 panels.
|
||||
panel_targets_changed: hasQueriesChanged(getQueryRunnerFor(panel), getQueryRunnerFor(vizPanel.state.$data)),
|
||||
});
|
||||
|
||||
panel.setState(vizPanel.state);
|
||||
this.state.onClose();
|
||||
} else {
|
||||
const panelModel = new PanelModel(jsonObj);
|
||||
const gridItem = buildGridItemForPanel(panelModel);
|
||||
const newState = sceneUtils.cloneSceneObjectState(gridItem.state);
|
||||
|
||||
if (!(panel.parent instanceof DashboardGridItem)) {
|
||||
console.error('Cannot update state of panel', panel, gridItem);
|
||||
return;
|
||||
}
|
||||
|
||||
this.state.onClose();
|
||||
|
||||
if (!dashboard.state.isEditing) {
|
||||
dashboard.onEnterEditMode();
|
||||
}
|
||||
|
||||
panel.parent.setState(newState);
|
||||
|
||||
//Report relevant updates
|
||||
reportPanelInspectInteraction(InspectTab.JSON, 'apply', {
|
||||
panel_type_changed: panel.state.pluginId !== panelModel.type,
|
||||
panel_id_changed: getPanelIdForVizPanel(panel) !== panelModel.id,
|
||||
panel_grid_pos_changed: hasGridPosChanged(panel.parent.state, newState),
|
||||
panel_targets_changed: hasQueriesChanged(getQueryRunnerFor(panel), getQueryRunnerFor(newState.$data)),
|
||||
});
|
||||
}
|
||||
|
||||
panel.parent.setState(newState);
|
||||
|
||||
//Report relevant updates
|
||||
reportPanelInspectInteraction(InspectTab.JSON, 'apply', {
|
||||
panel_type_changed: panel.state.pluginId !== panelModel.type,
|
||||
panel_id_changed: getPanelIdForVizPanel(panel) !== panelModel.id,
|
||||
panel_grid_pos_changed: hasGridPosChanged(panel.parent.state, newState),
|
||||
panel_targets_changed: hasQueriesChanged(getQueryRunnerFor(panel), getQueryRunnerFor(newState.$data)),
|
||||
});
|
||||
};
|
||||
|
||||
public onCodeEditorBlur = (value: string) => {
|
||||
@@ -152,11 +194,6 @@ export class InspectJsonTab extends SceneObjectBase<InspectJsonTabState> {
|
||||
return false;
|
||||
}
|
||||
|
||||
// V2 dashboard panels are not editable from the inspect
|
||||
if (isDashboardV2Spec(getDashboardSceneFor(panel).getSaveModel())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only support normal grid items for now and not repeated items
|
||||
if (panel.parent instanceof DashboardGridItem && panel.parent.isRepeated()) {
|
||||
return false;
|
||||
@@ -170,14 +207,14 @@ export class InspectJsonTab extends SceneObjectBase<InspectJsonTabState> {
|
||||
}
|
||||
|
||||
function InspectJsonTabComponent({ model }: SceneComponentProps<InspectJsonTab>) {
|
||||
const { source: show, jsonText } = model.useState();
|
||||
const { source: show, jsonText, error } = model.useState();
|
||||
const styles = useStyles2(getPanelInspectorStyles2);
|
||||
const options = model.getOptions();
|
||||
|
||||
return (
|
||||
<div className={styles.wrap}>
|
||||
<div className={styles.toolbar} data-testid={selectors.components.PanelInspector.Json.content}>
|
||||
<Field label={t('dashboard.inspect-json.select-source', 'Select source')} className="flex-grow-1">
|
||||
<Field label={t('dashboard.inspect-json.select-source', 'Select source')} className="flex-grow-1" noMargin>
|
||||
<Select
|
||||
inputId="select-source-dropdown"
|
||||
options={options}
|
||||
@@ -192,6 +229,12 @@ function InspectJsonTabComponent({ model }: SceneComponentProps<InspectJsonTab>)
|
||||
)}
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<Alert severity="error" title={t('dashboard-scene.inspect-json-tab.validation-error', 'Validation error')}>
|
||||
<p>{error}</p>
|
||||
</Alert>
|
||||
)}
|
||||
|
||||
<div className={styles.content}>
|
||||
<AutoSizer disableWidth>
|
||||
{({ height }) => (
|
||||
|
||||
@@ -25,10 +25,17 @@ import { DashboardDataDTO } from 'app/types/dashboard';
|
||||
|
||||
import { PanelInspectDrawer } from '../../inspect/PanelInspectDrawer';
|
||||
import { PanelTimeRange, PanelTimeRangeState } from '../../scene/panel-timerange/PanelTimeRange';
|
||||
import { DashboardLayoutManager } from '../../scene/types/DashboardLayoutManager';
|
||||
import { transformSaveModelSchemaV2ToScene } from '../../serialization/transformSaveModelSchemaV2ToScene';
|
||||
import { transformSaveModelToScene } from '../../serialization/transformSaveModelToScene';
|
||||
import { findVizPanelByKey } from '../../utils/utils';
|
||||
import { buildPanelEditScene } from '../PanelEditor';
|
||||
import { testDashboard, panelWithTransformations, panelWithQueriesOnly } from '../testfiles/testDashboard';
|
||||
import {
|
||||
testDashboard,
|
||||
panelWithTransformations,
|
||||
panelWithQueriesOnly,
|
||||
testDashboardV2,
|
||||
} from '../testfiles/testDashboard';
|
||||
|
||||
import { PanelDataQueriesTab, PanelDataQueriesTabRendered } from './PanelDataQueriesTab';
|
||||
|
||||
@@ -824,6 +831,78 @@ describe('PanelDataQueriesTab', () => {
|
||||
expect(queriesTab.state.dsSettings?.uid).toBe('gdev-testdata');
|
||||
});
|
||||
});
|
||||
|
||||
describe('V2 schema behavior - panel datasource undefined but queries have datasource', () => {
|
||||
it('should load datasource from first query for V2 panel with prometheus datasource', async () => {
|
||||
// panel-1 has a query with prometheus datasource
|
||||
const { queriesTab } = await setupV2Scene('panel-1');
|
||||
|
||||
// V2 panels have undefined panel-level datasource for non-mixed panels
|
||||
expect(queriesTab.queryRunner.state.datasource).toBeUndefined();
|
||||
|
||||
// But the query has its own datasource
|
||||
expect(queriesTab.queryRunner.state.queries[0].datasource).toEqual({
|
||||
type: 'grafana-prometheus-datasource',
|
||||
uid: 'gdev-prometheus',
|
||||
});
|
||||
|
||||
// Should load the datasource from the first query
|
||||
expect(queriesTab.state.datasource?.uid).toBe('gdev-prometheus');
|
||||
expect(queriesTab.state.dsSettings?.uid).toBe('gdev-prometheus');
|
||||
});
|
||||
|
||||
it('should load datasource from first query for V2 panel with testdata datasource', async () => {
|
||||
// panel-2 has a query with testdata datasource
|
||||
const { queriesTab } = await setupV2Scene('panel-2');
|
||||
|
||||
// V2 panels have undefined panel-level datasource for non-mixed panels
|
||||
expect(queriesTab.queryRunner.state.datasource).toBeUndefined();
|
||||
|
||||
// But the query has its own datasource
|
||||
expect(queriesTab.queryRunner.state.queries[0].datasource).toEqual({
|
||||
type: 'grafana-testdata-datasource',
|
||||
uid: 'gdev-testdata',
|
||||
});
|
||||
|
||||
// Should load the datasource from the first query
|
||||
expect(queriesTab.state.datasource?.uid).toBe('gdev-testdata');
|
||||
expect(queriesTab.state.dsSettings?.uid).toBe('gdev-testdata');
|
||||
});
|
||||
|
||||
it('should fall back to last used datasource when V2 query has no explicit datasource', async () => {
|
||||
store.exists.mockReturnValue(true);
|
||||
store.getObject.mockImplementation((key: string, def: unknown) => {
|
||||
if (key === PANEL_EDIT_LAST_USED_DATASOURCE) {
|
||||
return {
|
||||
dashboardUid: 'v2-dashboard-uid',
|
||||
datasourceUid: 'gdev-testdata',
|
||||
};
|
||||
}
|
||||
return def;
|
||||
});
|
||||
|
||||
// panel-3 has a query with NO explicit datasource (datasource.name is undefined)
|
||||
const { queriesTab } = await setupV2Scene('panel-3');
|
||||
|
||||
// V2 panel with no explicit datasource on query should fall back to last used
|
||||
expect(queriesTab.state.datasource?.uid).toBe('gdev-testdata');
|
||||
expect(queriesTab.state.dsSettings?.uid).toBe('gdev-testdata');
|
||||
});
|
||||
|
||||
it('should use panel-level datasource when available (V1 behavior preserved)', async () => {
|
||||
const { queriesTab } = await setupScene('panel-1');
|
||||
|
||||
// V1 panels have panel-level datasource set
|
||||
expect(queriesTab.queryRunner.state.datasource).toEqual({
|
||||
uid: 'gdev-testdata',
|
||||
type: 'grafana-testdata-datasource',
|
||||
});
|
||||
|
||||
// Should use the panel-level datasource
|
||||
expect(queriesTab.state.datasource?.uid).toBe('gdev-testdata');
|
||||
expect(queriesTab.state.dsSettings?.uid).toBe('gdev-testdata');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -844,3 +923,24 @@ async function setupScene(panelId: string) {
|
||||
|
||||
return { panel, scene: dashboard, queriesTab };
|
||||
}
|
||||
|
||||
// Setup V2 scene - uses transformSaveModelSchemaV2ToScene
|
||||
async function setupV2Scene(panelKey: string) {
|
||||
const dashboard = transformSaveModelSchemaV2ToScene(testDashboardV2);
|
||||
|
||||
const vizPanels = (dashboard.state.body as DashboardLayoutManager).getVizPanels();
|
||||
const panel = vizPanels.find((p) => p.state.key === panelKey)!;
|
||||
|
||||
const panelEditor = buildPanelEditScene(panel);
|
||||
dashboard.setState({ editPanel: panelEditor });
|
||||
|
||||
deactivators.push(dashboard.activate());
|
||||
deactivators.push(panelEditor.activate());
|
||||
|
||||
const queriesTab = panelEditor.state.dataPane!.state.tabs[0] as PanelDataQueriesTab;
|
||||
deactivators.push(queriesTab.activate());
|
||||
|
||||
await Promise.resolve();
|
||||
|
||||
return { panel, scene: dashboard, queriesTab };
|
||||
}
|
||||
|
||||
@@ -86,6 +86,17 @@ export class PanelDataQueriesTab extends SceneObjectBase<PanelDataQueriesTabStat
|
||||
let datasource: DataSourceApi | undefined;
|
||||
let dsSettings: DataSourceInstanceSettings | undefined;
|
||||
|
||||
// If no panel-level datasource (V2 schema non-mixed case), infer from first query
|
||||
// This also improves the V1 behavior because it doesn't make sense to rely on last used
|
||||
// if underlying queries have different datasources
|
||||
if (!datasourceToLoad) {
|
||||
const queries = this.queryRunner.state.queries;
|
||||
const firstQueryDs = queries[0]?.datasource;
|
||||
if (firstQueryDs) {
|
||||
datasourceToLoad = firstQueryDs;
|
||||
}
|
||||
}
|
||||
|
||||
if (!datasourceToLoad) {
|
||||
const dashboardScene = getDashboardSceneFor(this);
|
||||
const dashboardUid = dashboardScene.state.uid ?? '';
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
import { Spec as DashboardV2Spec, defaultDataQueryKind } from '@grafana/schema/dist/esm/schema/dashboard/v2';
|
||||
import { DashboardWithAccessInfo } from 'app/features/dashboard/api/types';
|
||||
|
||||
export const panelWithQueriesOnly = {
|
||||
datasource: {
|
||||
type: 'grafana-testdata-datasource',
|
||||
@@ -751,3 +754,223 @@ export const testDashboard = {
|
||||
version: 6,
|
||||
weekStart: '',
|
||||
};
|
||||
|
||||
// V2 Dashboard fixture - panels have queries with datasources but NO panel-level datasource
|
||||
export const testDashboardV2: DashboardWithAccessInfo<DashboardV2Spec> = {
|
||||
kind: 'DashboardWithAccessInfo',
|
||||
metadata: {
|
||||
name: 'v2-dashboard-uid',
|
||||
namespace: 'default',
|
||||
labels: {},
|
||||
generation: 1,
|
||||
resourceVersion: '1',
|
||||
creationTimestamp: new Date().toISOString(),
|
||||
},
|
||||
spec: {
|
||||
title: 'V2 Test Dashboard',
|
||||
description: 'Test dashboard for V2 schema',
|
||||
tags: [],
|
||||
cursorSync: 'Off',
|
||||
liveNow: false,
|
||||
editable: true,
|
||||
preload: false,
|
||||
links: [],
|
||||
variables: [],
|
||||
annotations: [],
|
||||
timeSettings: {
|
||||
from: 'now-6h',
|
||||
to: 'now',
|
||||
autoRefresh: '',
|
||||
autoRefreshIntervals: ['5s', '10s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'],
|
||||
fiscalYearStartMonth: 0,
|
||||
hideTimepicker: false,
|
||||
timezone: '',
|
||||
weekStart: undefined,
|
||||
quickRanges: [],
|
||||
},
|
||||
elements: {
|
||||
'panel-1': {
|
||||
kind: 'Panel',
|
||||
spec: {
|
||||
id: 1,
|
||||
title: 'Panel with Prometheus datasource',
|
||||
description: '',
|
||||
links: [],
|
||||
data: {
|
||||
kind: 'QueryGroup',
|
||||
spec: {
|
||||
queries: [
|
||||
{
|
||||
kind: 'PanelQuery',
|
||||
spec: {
|
||||
refId: 'A',
|
||||
hidden: false,
|
||||
query: {
|
||||
kind: 'DataQuery',
|
||||
version: defaultDataQueryKind().version,
|
||||
group: 'grafana-prometheus-datasource',
|
||||
datasource: {
|
||||
name: 'gdev-prometheus',
|
||||
},
|
||||
spec: {
|
||||
expr: 'up',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
transformations: [],
|
||||
queryOptions: {},
|
||||
},
|
||||
},
|
||||
vizConfig: {
|
||||
kind: 'VizConfig',
|
||||
group: 'timeseries',
|
||||
version: '1.0.0',
|
||||
spec: {
|
||||
options: {},
|
||||
fieldConfig: {
|
||||
defaults: {},
|
||||
overrides: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
'panel-2': {
|
||||
kind: 'Panel',
|
||||
spec: {
|
||||
id: 2,
|
||||
title: 'Panel with TestData datasource',
|
||||
description: '',
|
||||
links: [],
|
||||
data: {
|
||||
kind: 'QueryGroup',
|
||||
spec: {
|
||||
queries: [
|
||||
{
|
||||
kind: 'PanelQuery',
|
||||
spec: {
|
||||
refId: 'A',
|
||||
hidden: false,
|
||||
query: {
|
||||
kind: 'DataQuery',
|
||||
version: defaultDataQueryKind().version,
|
||||
group: 'grafana-testdata-datasource',
|
||||
datasource: {
|
||||
name: 'gdev-testdata',
|
||||
},
|
||||
spec: {
|
||||
scenarioId: 'random_walk',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
transformations: [],
|
||||
queryOptions: {},
|
||||
},
|
||||
},
|
||||
vizConfig: {
|
||||
kind: 'VizConfig',
|
||||
group: 'timeseries',
|
||||
version: '1.0.0',
|
||||
spec: {
|
||||
options: {},
|
||||
fieldConfig: {
|
||||
defaults: {},
|
||||
overrides: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
'panel-3': {
|
||||
kind: 'Panel',
|
||||
spec: {
|
||||
id: 3,
|
||||
title: 'Panel with no datasource on query',
|
||||
description: '',
|
||||
links: [],
|
||||
data: {
|
||||
kind: 'QueryGroup',
|
||||
spec: {
|
||||
queries: [
|
||||
{
|
||||
kind: 'PanelQuery',
|
||||
spec: {
|
||||
refId: 'A',
|
||||
hidden: false,
|
||||
query: {
|
||||
kind: 'DataQuery',
|
||||
version: defaultDataQueryKind().version,
|
||||
group: 'grafana-testdata-datasource',
|
||||
// No datasource.name - simulates panel with no explicit datasource
|
||||
spec: {},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
transformations: [],
|
||||
queryOptions: {},
|
||||
},
|
||||
},
|
||||
vizConfig: {
|
||||
kind: 'VizConfig',
|
||||
group: 'timeseries',
|
||||
version: '1.0.0',
|
||||
spec: {
|
||||
options: {},
|
||||
fieldConfig: {
|
||||
defaults: {},
|
||||
overrides: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
layout: {
|
||||
kind: 'GridLayout',
|
||||
spec: {
|
||||
items: [
|
||||
{
|
||||
kind: 'GridLayoutItem',
|
||||
spec: {
|
||||
x: 0,
|
||||
y: 0,
|
||||
width: 12,
|
||||
height: 8,
|
||||
element: { kind: 'ElementReference', name: 'panel-1' },
|
||||
},
|
||||
},
|
||||
{
|
||||
kind: 'GridLayoutItem',
|
||||
spec: {
|
||||
x: 12,
|
||||
y: 0,
|
||||
width: 12,
|
||||
height: 8,
|
||||
element: { kind: 'ElementReference', name: 'panel-2' },
|
||||
},
|
||||
},
|
||||
{
|
||||
kind: 'GridLayoutItem',
|
||||
spec: {
|
||||
x: 0,
|
||||
y: 8,
|
||||
width: 12,
|
||||
height: 8,
|
||||
element: { kind: 'ElementReference', name: 'panel-3' },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
access: {
|
||||
url: '/d/v2-dashboard-uid',
|
||||
slug: 'v2-test-dashboard',
|
||||
},
|
||||
apiVersion: 'v2',
|
||||
};
|
||||
|
||||
@@ -19,6 +19,7 @@ import {
|
||||
} from '@grafana/scenes';
|
||||
import { Box, Button, useStyles2 } from '@grafana/ui';
|
||||
import { playlistSrv } from 'app/features/playlist/PlaylistSrv';
|
||||
import { ContextualNavigationPaneToggle } from 'app/features/scopes/dashboards/ContextualNavigationPaneToggle';
|
||||
|
||||
import { PanelEditControls } from '../panel-edit/PanelEditControls';
|
||||
import { getDashboardSceneFor } from '../utils/utils';
|
||||
@@ -172,6 +173,9 @@ function DashboardControlsRenderer({ model }: SceneComponentProps<DashboardContr
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
{config.featureToggles.scopeFilters && !editPanel && (
|
||||
<ContextualNavigationPaneToggle className={styles.contextualNavToggle} hideWhenOpen={true} />
|
||||
)}
|
||||
{!hideVariableControls && (
|
||||
<>
|
||||
<VariableControls dashboard={dashboard} />
|
||||
@@ -287,5 +291,9 @@ function getStyles(theme: GrafanaTheme2) {
|
||||
flexWrap: 'wrap',
|
||||
marginLeft: 'auto',
|
||||
}),
|
||||
contextualNavToggle: css({
|
||||
display: 'inline-flex',
|
||||
margin: theme.spacing(0, 1, 1, 0),
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -5,7 +5,19 @@ import { CoreApp, GrafanaTheme2, PanelPlugin, PanelProps } from '@grafana/data';
|
||||
import { Trans, t } from '@grafana/i18n';
|
||||
import { config, locationService } from '@grafana/runtime';
|
||||
import { sceneUtils } from '@grafana/scenes';
|
||||
import { Box, Button, ButtonGroup, Dropdown, Icon, Menu, Stack, Text, usePanelContext, useStyles2 } from '@grafana/ui';
|
||||
import {
|
||||
Box,
|
||||
Button,
|
||||
ButtonGroup,
|
||||
Dropdown,
|
||||
EmptyState,
|
||||
Icon,
|
||||
Menu,
|
||||
Stack,
|
||||
Text,
|
||||
usePanelContext,
|
||||
useStyles2,
|
||||
} from '@grafana/ui';
|
||||
|
||||
import { NEW_PANEL_TITLE } from '../../dashboard/utils/dashboard';
|
||||
import { DashboardInteractions } from '../utils/interactions';
|
||||
@@ -92,20 +104,30 @@ function UnconfiguredPanelComp(props: PanelProps) {
|
||||
);
|
||||
}
|
||||
|
||||
const { isEditing } = dashboard.state;
|
||||
|
||||
return (
|
||||
<Stack direction={'row'} alignItems={'center'} height={'100%'} justifyContent={'center'}>
|
||||
<Box paddingBottom={2}>
|
||||
<ButtonGroup>
|
||||
<Button icon="sliders-v-alt" onClick={onConfigure}>
|
||||
<Trans i18nKey="dashboard.new-panel.configure-button">Configure</Trans>
|
||||
</Button>
|
||||
<Dropdown overlay={MenuActions} placement="bottom-end" onVisibleChange={onMenuClick}>
|
||||
<Button
|
||||
aria-label={t('dashboard.new-panel.configure-button-menu', 'Toggle menu')}
|
||||
icon={isOpen ? 'angle-up' : 'angle-down'}
|
||||
/>
|
||||
</Dropdown>
|
||||
</ButtonGroup>
|
||||
{isEditing ? (
|
||||
<ButtonGroup>
|
||||
<Button icon="sliders-v-alt" onClick={onConfigure}>
|
||||
<Trans i18nKey="dashboard.new-panel.configure-button">Configure</Trans>
|
||||
</Button>
|
||||
<Dropdown overlay={MenuActions} placement="bottom-end" onVisibleChange={onMenuClick}>
|
||||
<Button
|
||||
aria-label={t('dashboard.new-panel.configure-button-menu', 'Toggle menu')}
|
||||
icon={isOpen ? 'angle-up' : 'angle-down'}
|
||||
/>
|
||||
</Dropdown>
|
||||
</ButtonGroup>
|
||||
) : (
|
||||
<EmptyState
|
||||
variant="call-to-action"
|
||||
message={t('dashboard.new-panel.missing-config', 'Missing panel configuration')}
|
||||
hideImage
|
||||
/>
|
||||
)}
|
||||
</Box>
|
||||
</Stack>
|
||||
);
|
||||
|
||||
@@ -91,10 +91,12 @@ export class RowItem
|
||||
}
|
||||
|
||||
public getEditableElementInfo(): EditableDashboardElementInfo {
|
||||
const isHidden = !this.state.conditionalRendering?.state.result;
|
||||
return {
|
||||
typeName: t('dashboard.edit-pane.elements.row', 'Row'),
|
||||
instanceName: sceneGraph.interpolate(this, this.state.title, undefined, 'text'),
|
||||
icon: 'list-ul',
|
||||
isHidden,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,8 @@ import { isDashboardLayoutGrid } from '../types/DashboardLayoutGrid';
|
||||
import { RowItem } from './RowItem';
|
||||
|
||||
export function RowItemRenderer({ model }: SceneComponentProps<RowItem>) {
|
||||
const { layout, collapse: isCollapsed, fillScreen, hideHeader: isHeaderHidden, isDropTarget, key } = model.useState();
|
||||
const { layout, collapse, fillScreen, hideHeader: isHeaderHidden, isDropTarget, key } = model.useState();
|
||||
const isCollapsed = collapse && !isHeaderHidden; // never allow a row without a header to be collapsed
|
||||
const isClone = isRepeatCloneOrChildOf(model);
|
||||
const { isEditing } = useDashboardState(model);
|
||||
const [isConditionallyHidden, conditionalRenderingClass, conditionalRenderingOverlay] = useIsConditionallyHidden(
|
||||
@@ -237,6 +238,7 @@ function getStyles(theme: GrafanaTheme2) {
|
||||
}),
|
||||
dragging: css({
|
||||
cursor: 'move',
|
||||
backgroundColor: theme.colors.background.canvas,
|
||||
}),
|
||||
wrapperGrow: css({
|
||||
flexGrow: 1,
|
||||
|
||||
@@ -89,10 +89,12 @@ export class TabItem
|
||||
}
|
||||
|
||||
public getEditableElementInfo(): EditableDashboardElementInfo {
|
||||
const isHidden = !this.state.conditionalRendering?.state.result;
|
||||
return {
|
||||
typeName: t('dashboard.edit-pane.elements.tab', 'Tab'),
|
||||
instanceName: sceneGraph.interpolate(this, this.state.title, undefined, 'text'),
|
||||
icon: 'layers',
|
||||
isHidden,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -135,7 +135,7 @@ function TabRepeatSelect({ tab, id }: { tab: TabItem; id?: string }) {
|
||||
<TextLink
|
||||
external
|
||||
href={
|
||||
'https://grafana.com/docs/grafana/latest/dashboards/build-dashboards/create-dashboard/#configure-repeating-tabs'
|
||||
'https://grafana.com/docs/grafana/latest/visualizations/dashboards/build-dashboards/create-dynamic-dashboard/#repeating-rows-and-tabs-and-the-dashboard-special-data-source'
|
||||
}
|
||||
>
|
||||
<Trans i18nKey="dashboard.tabs-layout.tab.repeat.learn-more">Learn more</Trans>
|
||||
|
||||
@@ -262,4 +262,57 @@ describe('TabsLayoutManager', () => {
|
||||
expect(manager.getVizPanels().length).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('createFromLayout', () => {
|
||||
it('should convert rows with titles to tabs', () => {
|
||||
const rowsLayout = new RowsLayoutManager({
|
||||
rows: [new RowItem({ title: 'Row 1' }), new RowItem({ title: 'Row 2' })],
|
||||
});
|
||||
|
||||
const tabsManager = TabsLayoutManager.createFromLayout(rowsLayout);
|
||||
|
||||
expect(tabsManager.state.tabs).toHaveLength(2);
|
||||
expect(tabsManager.state.tabs[0].state.title).toBe('Row 1');
|
||||
expect(tabsManager.state.tabs[1].state.title).toBe('Row 2');
|
||||
});
|
||||
|
||||
it('should use default title when row has empty title', () => {
|
||||
const rowsLayout = new RowsLayoutManager({
|
||||
rows: [new RowItem({ title: '' })],
|
||||
});
|
||||
|
||||
const tabsManager = TabsLayoutManager.createFromLayout(rowsLayout);
|
||||
|
||||
expect(tabsManager.state.tabs).toHaveLength(1);
|
||||
expect(tabsManager.state.tabs[0].state.title).toBe('New tab');
|
||||
});
|
||||
|
||||
it('should generate unique titles for multiple rows with empty titles', () => {
|
||||
const rowsLayout = new RowsLayoutManager({
|
||||
rows: [new RowItem({ title: '' }), new RowItem({ title: '' }), new RowItem({ title: '' })],
|
||||
});
|
||||
|
||||
const tabsManager = TabsLayoutManager.createFromLayout(rowsLayout);
|
||||
|
||||
expect(tabsManager.state.tabs).toHaveLength(3);
|
||||
expect(tabsManager.state.tabs[0].state.title).toBe('New tab');
|
||||
expect(tabsManager.state.tabs[1].state.title).toBe('New tab 1');
|
||||
expect(tabsManager.state.tabs[2].state.title).toBe('New tab 2');
|
||||
});
|
||||
|
||||
it('should generate unique titles when mixing empty and existing titles', () => {
|
||||
const rowsLayout = new RowsLayoutManager({
|
||||
rows: [
|
||||
new RowItem({ title: 'New row' }), // existing title that matches default
|
||||
new RowItem({ title: '' }), // empty, should get unique title
|
||||
],
|
||||
});
|
||||
|
||||
const tabsManager = TabsLayoutManager.createFromLayout(rowsLayout);
|
||||
|
||||
expect(tabsManager.state.tabs).toHaveLength(2);
|
||||
expect(tabsManager.state.tabs[0].state.title).toBe('New row');
|
||||
expect(tabsManager.state.tabs[1].state.title).toBe('New tab');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -410,6 +410,10 @@ export class TabsLayoutManager extends SceneObjectBase<TabsLayoutManagerState> i
|
||||
let tabs: TabItem[] = [];
|
||||
|
||||
if (layout instanceof RowsLayoutManager) {
|
||||
const existingNames = new Set(
|
||||
layout.state.rows.map((row) => row.state.title).filter((title): title is string => !!title)
|
||||
);
|
||||
|
||||
for (const row of layout.state.rows) {
|
||||
if (row.state.repeatSourceKey) {
|
||||
continue;
|
||||
@@ -420,10 +424,14 @@ export class TabsLayoutManager extends SceneObjectBase<TabsLayoutManagerState> i
|
||||
// We need to clear the target since we don't want to point the original row anymore (if it was set)
|
||||
conditionalRendering?.setTarget(undefined);
|
||||
|
||||
const newTitle =
|
||||
row.state.title || generateUniqueTitle(t('dashboard.tabs-layout.tab.new', 'New tab'), existingNames);
|
||||
existingNames.add(newTitle);
|
||||
|
||||
tabs.push(
|
||||
new TabItem({
|
||||
layout: row.state.layout.clone(),
|
||||
title: row.state.title,
|
||||
title: newTitle,
|
||||
conditionalRendering,
|
||||
repeatByVariable: row.state.repeatByVariable,
|
||||
})
|
||||
|
||||
@@ -0,0 +1,60 @@
|
||||
import {
|
||||
defaultPanelKind,
|
||||
defaultQueryGroupKind,
|
||||
defaultPanelQueryKind,
|
||||
defaultVizConfigKind,
|
||||
} from '@grafana/schema/dist/esm/schema/dashboard/v2';
|
||||
|
||||
import { isPanelKindV2 } from './validation';
|
||||
|
||||
describe('v2schema validation', () => {
|
||||
it('isPanelKindV2 returns true for a minimal valid PanelKind', () => {
|
||||
const panel = defaultPanelKind();
|
||||
// Ensure minimal required properties exist (defaults should be fine)
|
||||
panel.spec.vizConfig = defaultVizConfigKind();
|
||||
panel.spec.data = defaultQueryGroupKind();
|
||||
|
||||
expect(isPanelKindV2(panel)).toBe(true);
|
||||
});
|
||||
|
||||
it('returns false when kind is not "Panel"', () => {
|
||||
const panel = defaultPanelKind();
|
||||
// @ts-expect-error intentional invalid kind for test
|
||||
panel.kind = 'NotAPanel';
|
||||
expect(isPanelKindV2(panel)).toBe(false);
|
||||
});
|
||||
|
||||
it('returns false when data kind is wrong', () => {
|
||||
const panel = defaultPanelKind();
|
||||
// @ts-expect-error intentional invalid kind for test
|
||||
panel.spec.data = { kind: 'Wrong', spec: {} };
|
||||
expect(isPanelKindV2(panel)).toBe(false);
|
||||
});
|
||||
|
||||
it('returns false when queries contain invalid entries', () => {
|
||||
const panel = defaultPanelKind();
|
||||
panel.spec.data = defaultQueryGroupKind();
|
||||
// @ts-expect-error push an invalid query shape
|
||||
panel.spec.data.spec.queries = [{}];
|
||||
expect(isPanelKindV2(panel)).toBe(false);
|
||||
|
||||
// Ensure a valid query shape passes
|
||||
panel.spec.data.spec.queries = [defaultPanelQueryKind()];
|
||||
expect(isPanelKindV2(panel)).toBe(true);
|
||||
});
|
||||
|
||||
it('returns false when vizConfig.group is not a string', () => {
|
||||
const panel = defaultPanelKind();
|
||||
panel.spec.vizConfig = defaultVizConfigKind();
|
||||
// @ts-expect-error force wrong type
|
||||
panel.spec.vizConfig.group = 42;
|
||||
expect(isPanelKindV2(panel)).toBe(false);
|
||||
});
|
||||
|
||||
it('returns false when transparent is not a boolean', () => {
|
||||
const panel = defaultPanelKind();
|
||||
// @ts-expect-error wrong type
|
||||
panel.spec.transparent = 'yes';
|
||||
expect(isPanelKindV2(panel)).toBe(false);
|
||||
});
|
||||
});
|
||||
137
public/app/features/dashboard-scene/v2schema/validation.ts
Normal file
137
public/app/features/dashboard-scene/v2schema/validation.ts
Normal file
@@ -0,0 +1,137 @@
|
||||
import {
|
||||
PanelKind,
|
||||
QueryGroupKind,
|
||||
VizConfigKind,
|
||||
PanelQueryKind,
|
||||
TransformationKind,
|
||||
} from '@grafana/schema/dist/esm/schema/dashboard/v2';
|
||||
|
||||
function isObject(value: unknown): value is Record<string, unknown> {
|
||||
return typeof value === 'object' && value !== null && !Array.isArray(value);
|
||||
}
|
||||
|
||||
function isPanelQueryKind(value: unknown): value is PanelQueryKind {
|
||||
if (!isObject(value)) {
|
||||
return false;
|
||||
}
|
||||
if (value.kind !== 'PanelQuery' || !isObject(value.spec)) {
|
||||
return false;
|
||||
}
|
||||
// Minimal checks for query spec; accept additional properties
|
||||
if (typeof value.spec.refId !== 'string') {
|
||||
return false;
|
||||
}
|
||||
if (typeof value.spec.hidden !== 'boolean') {
|
||||
return false;
|
||||
}
|
||||
// value.spec.query is an opaque "DataQueryKind" which is { kind: string, spec: Record<string, any> }
|
||||
const q = value.spec.query;
|
||||
if (!isObject(q) || typeof q.kind !== 'string' || !isObject(q.spec)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function isTransformationKind(value: unknown): value is TransformationKind {
|
||||
if (!isObject(value)) {
|
||||
return false;
|
||||
}
|
||||
if (typeof value.kind !== 'string') {
|
||||
return false;
|
||||
}
|
||||
if (!isObject(value.spec)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function isQueryGroupKind(value: unknown): value is QueryGroupKind {
|
||||
if (!isObject(value)) {
|
||||
return false;
|
||||
}
|
||||
if (value.kind !== 'QueryGroup' || !isObject(value.spec)) {
|
||||
return false;
|
||||
}
|
||||
const spec = value.spec;
|
||||
if (!Array.isArray(spec.queries) || !spec.queries.every(isPanelQueryKind)) {
|
||||
return false;
|
||||
}
|
||||
if (!Array.isArray(spec.transformations) || !spec.transformations.every(isTransformationKind)) {
|
||||
return false;
|
||||
}
|
||||
if (!isObject(spec.queryOptions)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function isVizConfigKind(value: unknown): value is VizConfigKind {
|
||||
if (!isObject(value)) {
|
||||
return false;
|
||||
}
|
||||
if (value.kind !== 'VizConfig') {
|
||||
return false;
|
||||
}
|
||||
if (typeof value.group !== 'string') {
|
||||
return false;
|
||||
}
|
||||
if (typeof value.version !== 'string') {
|
||||
return false;
|
||||
}
|
||||
if (!isObject(value.spec)) {
|
||||
return false;
|
||||
}
|
||||
const spec = value.spec;
|
||||
if (!isObject(spec.options)) {
|
||||
return false;
|
||||
}
|
||||
if (!isObject(spec.fieldConfig)) {
|
||||
return false;
|
||||
}
|
||||
// Minimal fieldConfig shape (defaults/overrides may be empty)
|
||||
if (!isObject(spec.fieldConfig)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
export function isPanelKindV2(value: unknown): value is PanelKind {
|
||||
if (!isObject(value)) {
|
||||
return false;
|
||||
}
|
||||
if (value.kind !== 'Panel') {
|
||||
return false;
|
||||
}
|
||||
if (!isObject(value.spec)) {
|
||||
return false;
|
||||
}
|
||||
const spec = value.spec;
|
||||
if (typeof spec.id !== 'number') {
|
||||
return false;
|
||||
}
|
||||
if (typeof spec.title !== 'string') {
|
||||
return false;
|
||||
}
|
||||
if (typeof spec.description !== 'string') {
|
||||
return false;
|
||||
}
|
||||
if (!Array.isArray(spec.links)) {
|
||||
return false;
|
||||
}
|
||||
if (!isQueryGroupKind(spec.data)) {
|
||||
return false;
|
||||
}
|
||||
if (!isVizConfigKind(spec.vizConfig)) {
|
||||
return false;
|
||||
}
|
||||
if (spec.transparent !== undefined && typeof spec.transparent !== 'boolean') {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
export function validatePanelKindV2(value: unknown): asserts value is PanelKind {
|
||||
if (!isPanelKindV2(value)) {
|
||||
throw new Error('Provided JSON is not a valid v2 Panel spec');
|
||||
}
|
||||
}
|
||||
@@ -256,7 +256,11 @@ export const InfiniteScroll = ({
|
||||
if (props.visibleStartIndex === 0) {
|
||||
noScrollRef.current = scrollElement.scrollHeight <= scrollElement.clientHeight;
|
||||
}
|
||||
if (noScrollRef.current || infiniteLoaderState === 'loading' || infiniteLoaderState === 'out-of-bounds') {
|
||||
if (noScrollRef.current) {
|
||||
setInfiniteLoaderState('idle');
|
||||
return;
|
||||
}
|
||||
if (infiniteLoaderState === 'loading' || infiniteLoaderState === 'out-of-bounds') {
|
||||
return;
|
||||
}
|
||||
const lastLogIndex = logs.length - 1;
|
||||
@@ -267,7 +271,7 @@ export const InfiniteScroll = ({
|
||||
setInfiniteLoaderState('idle');
|
||||
}
|
||||
},
|
||||
[infiniteLoaderState, logs.length, scrollElement]
|
||||
[infiniteLoaderState, logs, scrollElement]
|
||||
);
|
||||
|
||||
const getItemKey = useCallback((index: number) => (logs[index] ? logs[index].uid : index.toString()), [logs]);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { css } from '@emotion/css';
|
||||
import { useState, useEffect, useCallback, useMemo } from 'react';
|
||||
import { Fragment, useState, useEffect, useCallback, useMemo } from 'react';
|
||||
import { useAsync, useMeasure } from 'react-use';
|
||||
|
||||
import {
|
||||
@@ -133,9 +133,9 @@ export function VisualizationSuggestions({ onChange, data, panel }: Props) {
|
||||
return (
|
||||
<div className={styles.grid}>
|
||||
{isNewVizSuggestionsEnabled
|
||||
? suggestionsByVizType.map(([vizType, vizTypeSuggestions]) => (
|
||||
<>
|
||||
<div className={styles.vizTypeHeader} key={vizType?.id || 'unknown-viz-type'}>
|
||||
? suggestionsByVizType.map(([vizType, vizTypeSuggestions], groupIndex) => (
|
||||
<Fragment key={vizType?.id || `unknown-viz-type-${groupIndex}`}>
|
||||
<div className={styles.vizTypeHeader}>
|
||||
<Text variant="body" weight="medium">
|
||||
{vizType?.info && <img className={styles.vizTypeLogo} src={vizType.info.logos.small} alt="" />}
|
||||
{vizType?.name || t('panel.visualization-suggestions.unknown-viz-type', 'Unknown visualization type')}
|
||||
@@ -190,7 +190,7 @@ export function VisualizationSuggestions({ onChange, data, panel }: Props) {
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</>
|
||||
</Fragment>
|
||||
))
|
||||
: suggestions?.map((suggestion, index) => (
|
||||
<div key={suggestion.hash} className={styles.cardContainer} ref={index === 0 ? firstCardRef : undefined}>
|
||||
|
||||
@@ -78,11 +78,36 @@ export function unboxNearMembraneProxies(structure: unknown): unknown {
|
||||
if (Array.isArray(structure)) {
|
||||
return structure.map(unboxNearMembraneProxies);
|
||||
}
|
||||
|
||||
if (isTransferable(structure)) {
|
||||
return structure;
|
||||
}
|
||||
|
||||
if (typeof structure === 'object') {
|
||||
return Object.keys(structure).reduce((acc, key) => {
|
||||
Reflect.set(acc, key, unboxNearMembraneProxies(Reflect.get(structure, key)));
|
||||
return acc;
|
||||
}, {});
|
||||
}
|
||||
|
||||
return structure;
|
||||
}
|
||||
|
||||
function isTransferable(structure: unknown): structure is Transferable {
|
||||
// We should probably add all of the transferable types here.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Transferable_objects
|
||||
return (
|
||||
structure instanceof ArrayBuffer ||
|
||||
structure instanceof OffscreenCanvas ||
|
||||
structure instanceof ImageBitmap ||
|
||||
structure instanceof MessagePort ||
|
||||
structure instanceof MediaSourceHandle ||
|
||||
structure instanceof ReadableStream ||
|
||||
structure instanceof WritableStream ||
|
||||
structure instanceof TransformStream ||
|
||||
structure instanceof AudioData ||
|
||||
structure instanceof VideoFrame ||
|
||||
structure instanceof RTCDataChannel ||
|
||||
structure instanceof ArrayBuffer
|
||||
);
|
||||
}
|
||||
|
||||
@@ -33,6 +33,11 @@ const getSummaryColumns = () => [
|
||||
header: 'Unchanged',
|
||||
cell: ({ row: { original: item } }: SummaryCell) => item.noop?.toString() || '-',
|
||||
},
|
||||
{
|
||||
id: 'warnings',
|
||||
header: 'Warnings',
|
||||
cell: ({ row: { original: item } }: SummaryCell) => item.warning?.toString() || '-',
|
||||
},
|
||||
{
|
||||
id: 'errors',
|
||||
header: 'Errors',
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user