Compare commits
55 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 6024fbb363 | |||
| 512f4bc8dc | |||
| 0c49337205 | |||
| c5345498b1 | |||
| 1bcccd5e61 | |||
| 12b38d1b7a | |||
| 359d097154 | |||
| cfc5d96c34 | |||
| 3459c67bfb | |||
| 37ccd8bc3d | |||
| 5156177079 | |||
| 4817ecf6a3 | |||
| c73cab8eef | |||
| a37ebf609e | |||
| b29e8ccb45 | |||
| 644f7b7001 | |||
| 629570926d | |||
| 1b59c82b74 | |||
| f35447435f | |||
| c0dc92e8cd | |||
| 7114b9cd3b | |||
| b40d0e6ff4 | |||
| 584615cf3f | |||
| 5f80a29a28 | |||
| eab5d2b30e | |||
| f3421b9718 | |||
| 1addfd69b4 | |||
| d4a627c5fc | |||
| 46ef9aaa0a | |||
| 6ce672dd00 | |||
| 403f4d41de | |||
| 6512259acc | |||
| b2dd095bd8 | |||
| e525b529a8 | |||
| 7805e18368 | |||
| 88924ee9ac | |||
| 7a07a49ecc | |||
| 9a4e13800d | |||
| a0c4e8b4f4 | |||
| fa62113b41 | |||
| b863acab05 | |||
| c7c052480d | |||
| 5e3c7ad0c1 | |||
| 478ae15f0e | |||
| 8ebb1c2bc9 | |||
| 5572ce966a | |||
| e3510f6eb3 | |||
| 75e08a20f6 | |||
| c8908c5100 | |||
| d8106adb63 | |||
| a4c1b51182 | |||
| 535c9be2f7 | |||
| 49f891a24d | |||
| 86018141d0 | |||
| 7fd2476a12 |
+3
-1
@@ -520,7 +520,7 @@ i18next.config.ts @grafana/grafana-frontend-platform
|
||||
/e2e-playwright/various-suite/solo-route.spec.ts @grafana/dashboards-squad
|
||||
/e2e-playwright/various-suite/trace-view-scrolling.spec.ts @grafana/observability-traces-and-profiling
|
||||
/e2e-playwright/various-suite/verify-i18n.spec.ts @grafana/grafana-frontend-platform
|
||||
/e2e-playwright/various-suite/visualization-suggestions.spec.ts @grafana/dashboards-squad
|
||||
/e2e-playwright/various-suite/visualization-suggestions.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/various-suite/perf-test.spec.ts @grafana/grafana-frontend-platform
|
||||
|
||||
# Packages
|
||||
@@ -653,6 +653,7 @@ i18next.config.ts @grafana/grafana-frontend-platform
|
||||
/packages/grafana-runtime/src/components/QueryEditorWithMigration* @grafana/plugins-platform-frontend @grafana/plugins-platform-backend
|
||||
/packages/grafana-runtime/src/config.ts @grafana/grafana-frontend-platform
|
||||
/packages/grafana-runtime/src/services/ @grafana/grafana-frontend-platform
|
||||
/packages/grafana-runtime/src/services/plugins.ts @grafana/plugins-platform-frontend
|
||||
/packages/grafana-runtime/src/services/pluginExtensions @grafana/plugins-platform-frontend
|
||||
/packages/grafana-runtime/src/services/CorrelationsService.ts @grafana/datapro
|
||||
/packages/grafana-runtime/src/services/LocationService.test.tsx @grafana/grafana-search-navigate-organise
|
||||
@@ -956,6 +957,7 @@ playwright.storybook.config.ts @grafana/grafana-frontend-platform
|
||||
/public/app/features/notifications/ @grafana/grafana-search-navigate-organise
|
||||
/public/app/features/org/ @grafana/grafana-search-navigate-organise
|
||||
/public/app/features/panel/ @grafana/dashboards-squad
|
||||
/public/app/features/panel/components/VizTypePicker/VisualizationSuggestions.tsx @grafana/dataviz-squad
|
||||
/public/app/features/panel/suggestions/ @grafana/dataviz-squad
|
||||
/public/app/features/playlist/ @grafana/dashboards-squad
|
||||
/public/app/features/plugins/ @grafana/plugins-platform-frontend
|
||||
|
||||
Vendored
+141
-12
@@ -1603,7 +1603,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1671,7 +1670,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 98,
|
||||
"min": 5,
|
||||
"noise": 22,
|
||||
@@ -1689,7 +1687,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1757,7 +1754,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 98,
|
||||
"min": 5,
|
||||
"noise": 22,
|
||||
@@ -1788,7 +1784,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1857,7 +1852,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 8,
|
||||
"min": 1,
|
||||
"noise": 2,
|
||||
@@ -1875,7 +1869,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1944,7 +1937,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 12,
|
||||
"min": 1,
|
||||
"noise": 2,
|
||||
@@ -1962,7 +1954,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -2030,7 +2021,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 100,
|
||||
"min": 10,
|
||||
"noise": 22,
|
||||
@@ -2048,7 +2038,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -2116,7 +2105,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 100,
|
||||
"min": 10,
|
||||
"noise": 22,
|
||||
@@ -2129,6 +2117,147 @@
|
||||
],
|
||||
"title": "Backend",
|
||||
"type": "radialbar"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 66
|
||||
},
|
||||
"id": 35,
|
||||
"panels": [],
|
||||
"title": "Empty data",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": 0
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 6,
|
||||
"x": 0,
|
||||
"y": 67
|
||||
},
|
||||
"id": 36,
|
||||
"options": {
|
||||
"barWidthFactor": 0.5,
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"segmentCount": 1,
|
||||
"segmentSpacing": 0.3,
|
||||
"shape": "gauge",
|
||||
"showThresholdLabels": false,
|
||||
"showThresholdMarkers": true,
|
||||
"sparkline": true
|
||||
},
|
||||
"pluginVersion": "13.0.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"refId": "A",
|
||||
"scenarioId": "random_walk",
|
||||
"seriesCount": 0
|
||||
}
|
||||
],
|
||||
"title": "Numeric, no series",
|
||||
"type": "gauge"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": 0
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 6,
|
||||
"x": 6,
|
||||
"y": 67
|
||||
},
|
||||
"id": 37,
|
||||
"options": {
|
||||
"barWidthFactor": 0.5,
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"segmentCount": 1,
|
||||
"segmentSpacing": 0.3,
|
||||
"shape": "gauge",
|
||||
"showThresholdLabels": false,
|
||||
"showThresholdMarkers": true,
|
||||
"sparkline": true
|
||||
},
|
||||
"pluginVersion": "13.0.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"refId": "A",
|
||||
"scenarioId": "logs"
|
||||
}
|
||||
],
|
||||
"title": "Non-numeric",
|
||||
"type": "gauge"
|
||||
}
|
||||
],
|
||||
"preload": false,
|
||||
|
||||
@@ -198,6 +198,7 @@ type JobStatus struct {
|
||||
Finished int64 `json:"finished,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
|
||||
// Optional value 0-100 that can be set while running
|
||||
Progress float64 `json:"progress,omitempty"`
|
||||
@@ -225,18 +226,20 @@ type JobResourceSummary struct {
|
||||
Kind string `json:"kind,omitempty"`
|
||||
Total int64 `json:"total,omitempty"` // the count (if known)
|
||||
|
||||
Create int64 `json:"create,omitempty"`
|
||||
Update int64 `json:"update,omitempty"`
|
||||
Delete int64 `json:"delete,omitempty"`
|
||||
Write int64 `json:"write,omitempty"` // Create or update (export)
|
||||
Error int64 `json:"error,omitempty"` // The error count
|
||||
Create int64 `json:"create,omitempty"`
|
||||
Update int64 `json:"update,omitempty"`
|
||||
Delete int64 `json:"delete,omitempty"`
|
||||
Write int64 `json:"write,omitempty"` // Create or update (export)
|
||||
Error int64 `json:"error,omitempty"` // The error count
|
||||
Warning int64 `json:"warning,omitempty"` // The warning count
|
||||
|
||||
// No action required (useful for sync)
|
||||
Noop int64 `json:"noop,omitempty"`
|
||||
|
||||
// Report errors for this resource type
|
||||
// Report errors/warnings for this resource type
|
||||
// This may not be an exhaustive list and recommend looking at the logs for more info
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
}
|
||||
|
||||
// HistoricJob is an append only log, saving all jobs that have been processed.
|
||||
|
||||
@@ -401,6 +401,11 @@ func (in *JobResourceSummary) DeepCopyInto(out *JobResourceSummary) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Warnings != nil {
|
||||
in, out := &in.Warnings, &out.Warnings
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -468,6 +473,11 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Warnings != nil {
|
||||
in, out := &in.Warnings, &out.Warnings
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Summary != nil {
|
||||
in, out := &in.Summary, &out.Summary
|
||||
*out = make([]*JobResourceSummary, len(*in))
|
||||
|
||||
@@ -889,6 +889,13 @@ func schema_pkg_apis_provisioning_v0alpha1_JobResourceSummary(ref common.Referen
|
||||
Format: "int64",
|
||||
},
|
||||
},
|
||||
"warning": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "The error count",
|
||||
Type: []string{"integer"},
|
||||
Format: "int64",
|
||||
},
|
||||
},
|
||||
"noop": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "No action required (useful for sync)",
|
||||
@@ -898,7 +905,7 @@ func schema_pkg_apis_provisioning_v0alpha1_JobResourceSummary(ref common.Referen
|
||||
},
|
||||
"errors": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Report errors for this resource type This may not be an exhaustive list and recommend looking at the logs for more info",
|
||||
Description: "Report errors/warnings for this resource type This may not be an exhaustive list and recommend looking at the logs for more info",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
@@ -911,6 +918,20 @@ func schema_pkg_apis_provisioning_v0alpha1_JobResourceSummary(ref common.Referen
|
||||
},
|
||||
},
|
||||
},
|
||||
"warnings": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1029,6 +1050,20 @@ func schema_pkg_apis_provisioning_v0alpha1_JobStatus(ref common.ReferenceCallbac
|
||||
},
|
||||
},
|
||||
},
|
||||
"warnings": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"progress": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Optional value 0-100 that can be set while running",
|
||||
|
||||
+2
@@ -3,8 +3,10 @@ API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioni
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,FileList,Items
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,HistoryList,Items
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobResourceSummary,Errors
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobResourceSummary,Warnings
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobStatus,Errors
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobStatus,Summary
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobStatus,Warnings
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,ManagerStats,Stats
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,MoveJobOptions,Paths
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,MoveJobOptions,Resources
|
||||
|
||||
+30
-10
@@ -7,16 +7,18 @@ package v0alpha1
|
||||
// JobResourceSummaryApplyConfiguration represents a declarative configuration of the JobResourceSummary type for use
|
||||
// with apply.
|
||||
type JobResourceSummaryApplyConfiguration struct {
|
||||
Group *string `json:"group,omitempty"`
|
||||
Kind *string `json:"kind,omitempty"`
|
||||
Total *int64 `json:"total,omitempty"`
|
||||
Create *int64 `json:"create,omitempty"`
|
||||
Update *int64 `json:"update,omitempty"`
|
||||
Delete *int64 `json:"delete,omitempty"`
|
||||
Write *int64 `json:"write,omitempty"`
|
||||
Error *int64 `json:"error,omitempty"`
|
||||
Noop *int64 `json:"noop,omitempty"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
Group *string `json:"group,omitempty"`
|
||||
Kind *string `json:"kind,omitempty"`
|
||||
Total *int64 `json:"total,omitempty"`
|
||||
Create *int64 `json:"create,omitempty"`
|
||||
Update *int64 `json:"update,omitempty"`
|
||||
Delete *int64 `json:"delete,omitempty"`
|
||||
Write *int64 `json:"write,omitempty"`
|
||||
Error *int64 `json:"error,omitempty"`
|
||||
Warning *int64 `json:"warning,omitempty"`
|
||||
Noop *int64 `json:"noop,omitempty"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
}
|
||||
|
||||
// JobResourceSummaryApplyConfiguration constructs a declarative configuration of the JobResourceSummary type for use with
|
||||
@@ -89,6 +91,14 @@ func (b *JobResourceSummaryApplyConfiguration) WithError(value int64) *JobResour
|
||||
return b
|
||||
}
|
||||
|
||||
// WithWarning sets the Warning field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Warning field is set to the value of the last call.
|
||||
func (b *JobResourceSummaryApplyConfiguration) WithWarning(value int64) *JobResourceSummaryApplyConfiguration {
|
||||
b.Warning = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithNoop sets the Noop field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Noop field is set to the value of the last call.
|
||||
@@ -106,3 +116,13 @@ func (b *JobResourceSummaryApplyConfiguration) WithErrors(values ...string) *Job
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// WithWarnings adds the given value to the Warnings field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the Warnings field.
|
||||
func (b *JobResourceSummaryApplyConfiguration) WithWarnings(values ...string) *JobResourceSummaryApplyConfiguration {
|
||||
for i := range values {
|
||||
b.Warnings = append(b.Warnings, values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ type JobStatusApplyConfiguration struct {
|
||||
Finished *int64 `json:"finished,omitempty"`
|
||||
Message *string `json:"message,omitempty"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
Progress *float64 `json:"progress,omitempty"`
|
||||
Summary []*provisioningv0alpha1.JobResourceSummary `json:"summary,omitempty"`
|
||||
URLs *RepositoryURLsApplyConfiguration `json:"url,omitempty"`
|
||||
@@ -69,6 +70,16 @@ func (b *JobStatusApplyConfiguration) WithErrors(values ...string) *JobStatusApp
|
||||
return b
|
||||
}
|
||||
|
||||
// WithWarnings adds the given value to the Warnings field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the Warnings field.
|
||||
func (b *JobStatusApplyConfiguration) WithWarnings(values ...string) *JobStatusApplyConfiguration {
|
||||
for i := range values {
|
||||
b.Warnings = append(b.Warnings, values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// WithProgress sets the Progress field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Progress field is set to the value of the last call.
|
||||
|
||||
@@ -75,9 +75,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -152,9 +152,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -229,9 +229,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -306,9 +306,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
"spotlight": true
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -383,9 +383,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
"spotlight": true
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -460,9 +460,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": false,
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
"spotlight": true
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -537,9 +537,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": false,
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
"spotlight": true
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -627,9 +627,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
"spotlight": true
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -704,9 +704,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
"spotlight": true
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -781,9 +781,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
"spotlight": true
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -858,9 +858,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
"spotlight": true
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -952,9 +952,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": false,
|
||||
"rounded": false,
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1029,9 +1029,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": false,
|
||||
"rounded": false,
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1106,9 +1106,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false,
|
||||
"gradient": true
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1183,9 +1183,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": false,
|
||||
"rounded": false,
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1260,9 +1260,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": false,
|
||||
"rounded": false,
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1354,9 +1354,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false,
|
||||
"gradient": true
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1435,9 +1435,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false,
|
||||
"gradient": true
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1516,9 +1516,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false,
|
||||
"gradient": true
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1565,7 +1565,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1606,9 +1605,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": true,
|
||||
"rounded": true,
|
||||
"spotlight": true,
|
||||
"gradient": true
|
||||
"spotlight": true
|
||||
},
|
||||
"glow": "both",
|
||||
"orientation": "auto",
|
||||
@@ -1631,7 +1630,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 98,
|
||||
"min": 5,
|
||||
"noise": 22,
|
||||
@@ -1649,7 +1647,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1690,9 +1687,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": true,
|
||||
"rounded": true,
|
||||
"spotlight": true,
|
||||
"gradient": true
|
||||
"spotlight": true
|
||||
},
|
||||
"glow": "both",
|
||||
"orientation": "auto",
|
||||
@@ -1715,7 +1712,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 98,
|
||||
"min": 5,
|
||||
"noise": 22,
|
||||
@@ -1746,7 +1742,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1788,9 +1783,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": true,
|
||||
"rounded": true,
|
||||
"spotlight": true,
|
||||
"gradient": true
|
||||
"spotlight": true
|
||||
},
|
||||
"glow": "both",
|
||||
"orientation": "auto",
|
||||
@@ -1813,7 +1808,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 8,
|
||||
"min": 1,
|
||||
"noise": 2,
|
||||
@@ -1831,7 +1825,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1873,9 +1866,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": true,
|
||||
"rounded": true,
|
||||
"spotlight": true,
|
||||
"gradient": true
|
||||
"spotlight": true
|
||||
},
|
||||
"glow": "both",
|
||||
"orientation": "auto",
|
||||
@@ -1898,7 +1891,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 12,
|
||||
"min": 1,
|
||||
"noise": 2,
|
||||
@@ -1916,7 +1908,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1957,9 +1948,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": true,
|
||||
"rounded": true,
|
||||
"spotlight": true,
|
||||
"gradient": true
|
||||
"spotlight": true
|
||||
},
|
||||
"glow": "both",
|
||||
"orientation": "auto",
|
||||
@@ -1982,7 +1973,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 100,
|
||||
"min": 10,
|
||||
"noise": 22,
|
||||
@@ -2000,7 +1990,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -2041,9 +2030,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": true,
|
||||
"rounded": true,
|
||||
"spotlight": true,
|
||||
"gradient": true
|
||||
"spotlight": true
|
||||
},
|
||||
"glow": "both",
|
||||
"orientation": "auto",
|
||||
@@ -2066,7 +2055,6 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 100,
|
||||
"min": 10,
|
||||
"noise": 22,
|
||||
@@ -2079,6 +2067,147 @@
|
||||
],
|
||||
"title": "Backend",
|
||||
"type": "radialbar"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 66
|
||||
},
|
||||
"id": 35,
|
||||
"panels": [],
|
||||
"title": "Empty data",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": 0
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 6,
|
||||
"x": 0,
|
||||
"y": 67
|
||||
},
|
||||
"id": 36,
|
||||
"options": {
|
||||
"barWidthFactor": 0.5,
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"segmentCount": 1,
|
||||
"segmentSpacing": 0.3,
|
||||
"shape": "gauge",
|
||||
"showThresholdLabels": false,
|
||||
"showThresholdMarkers": true,
|
||||
"sparkline": true
|
||||
},
|
||||
"pluginVersion": "13.0.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"refId": "A",
|
||||
"scenarioId": "random_walk",
|
||||
"seriesCount": 0
|
||||
}
|
||||
],
|
||||
"title": "Numeric, no series",
|
||||
"type": "gauge"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": 0
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 6,
|
||||
"x": 6,
|
||||
"y": 67
|
||||
},
|
||||
"id": 37,
|
||||
"options": {
|
||||
"barWidthFactor": 0.5,
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"segmentCount": 1,
|
||||
"segmentSpacing": 0.3,
|
||||
"shape": "gauge",
|
||||
"showThresholdLabels": false,
|
||||
"showThresholdMarkers": true,
|
||||
"sparkline": true
|
||||
},
|
||||
"pluginVersion": "13.0.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"refId": "A",
|
||||
"scenarioId": "logs"
|
||||
}
|
||||
],
|
||||
"title": "Non-numeric",
|
||||
"type": "gauge"
|
||||
}
|
||||
],
|
||||
"preload": false,
|
||||
@@ -2095,5 +2224,5 @@
|
||||
"timezone": "browser",
|
||||
"title": "Panel tests - Gauge (new)",
|
||||
"uid": "panel-tests-gauge-new",
|
||||
"version": 6
|
||||
"version": 9
|
||||
}
|
||||
|
||||
+3
-3
@@ -59,9 +59,9 @@ For more details on contact points, including how to test them and enable notifi
|
||||
|
||||
## Alertmanager settings
|
||||
|
||||
| Option | Description |
|
||||
| ------ | ---------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| URL | The Alertmanager URL. This field is [protected](ref:configure-contact-points#protected-fields) from modification in Grafana Cloud. |
|
||||
| Option | Description |
|
||||
| ------ | ----------------------------------------------------------------------------------------------------------------- |
|
||||
| URL | The Alertmanager URL. This field is [protected](ref:configure-contact-points) from modification in Grafana Cloud. |
|
||||
|
||||
#### Optional settings
|
||||
|
||||
|
||||
+8
-8
@@ -49,14 +49,14 @@ For more details on contact points, including how to test them and enable notifi
|
||||
|
||||
### Required Settings
|
||||
|
||||
| Key | Description |
|
||||
| ------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| URL | The URL of the REST API of your Jira instance. Supported versions: `2` and `3` (e.g., `https://your-domain.atlassian.net/rest/api/3`). This field is [protected](ref:configure-contact-points#protected-fields) from modification in Grafana Cloud. |
|
||||
| Basic Auth User | Username for authentication. For Jira Cloud, use your email address. |
|
||||
| Basic Auth Password | Password or personal token. For Jira Cloud, you need to obtain a personal token [here](https://id.atlassian.com/manage-profile/security/api-tokens) and use it as the password. |
|
||||
| API Token | An alternative to basic authentication, a bearer token is used to authorize the API requests. See [Jira documentation](https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html) for more information. |
|
||||
| Project Key | The project key identifying the project where issues will be created. Project keys are unique identifiers for a project. |
|
||||
| Issue Type | The type of issue to create (e.g., `Task`, `Bug`, `Incident`). Make sure that you specify a type that is available in your project. |
|
||||
| Key | Description |
|
||||
| ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| URL | The URL of the REST API of your Jira instance. Supported versions: `2` and `3` (e.g., `https://your-domain.atlassian.net/rest/api/3`). This field is [protected](ref:configure-contact-points) from modification in Grafana Cloud. |
|
||||
| Basic Auth User | Username for authentication. For Jira Cloud, use your email address. |
|
||||
| Basic Auth Password | Password or personal token. For Jira Cloud, you need to obtain a personal token [here](https://id.atlassian.com/manage-profile/security/api-tokens) and use it as the password. |
|
||||
| API Token | An alternative to basic authentication, a bearer token is used to authorize the API requests. See [Jira documentation](https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html) for more information. |
|
||||
| Project Key | The project key identifying the project where issues will be created. Project keys are unique identifiers for a project. |
|
||||
| Issue Type | The type of issue to create (e.g., `Task`, `Bug`, `Incident`). Make sure that you specify a type that is available in your project. |
|
||||
|
||||
### Optional Settings
|
||||
|
||||
|
||||
+4
-4
@@ -54,10 +54,10 @@ For more details on contact points, including how to test them and enable notifi
|
||||
|
||||
### Required Settings
|
||||
|
||||
| Option | Description |
|
||||
| ---------- | ---------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Broker URL | The URL of the MQTT broker. This field is [protected](ref:configure-contact-points#protected-fields) from modification in Grafana Cloud. |
|
||||
| Topic | The topic to which the message will be sent. |
|
||||
| Option | Description |
|
||||
| ---------- | ----------------------------------------------------------------------------------------------------------------------- |
|
||||
| Broker URL | The URL of the MQTT broker. This field is [protected](ref:configure-contact-points) from modification in Grafana Cloud. |
|
||||
| Topic | The topic to which the message will be sent. |
|
||||
|
||||
### Optional Settings
|
||||
|
||||
|
||||
+2
-2
@@ -51,8 +51,8 @@ You can customize the `title` and `body` of the Slack message using [notificatio
|
||||
|
||||
If you are using a Slack API Token, complete the following steps.
|
||||
|
||||
1. Follow steps 1 and 2 of the [Slack API Quickstart](https://api.slack.com/start/quickstart).
|
||||
1. Add the [chat:write.public](https://api.slack.com/scopes/chat:write.public) scope to give your app the ability to post in all public channels without joining.
|
||||
1. Follow step 1 of the [Slack API Quickstart](https://docs.slack.dev/app-management/quickstart-app-settings/#creating) to create the app.
|
||||
1. Continue onto the second step of the [Slack API Quickstart](https://docs.slack.dev/app-management/quickstart-app-settings/#scopes) and add the [chat:write.public](https://api.slack.com/scopes/chat:write.public) scope as described to give your app the ability to post in all public channels without joining.
|
||||
1. In OAuth Tokens for Your Workspace, copy the Bot User OAuth Token.
|
||||
1. Open your Slack workplace.
|
||||
1. Right click the channel you want to receive notifications in.
|
||||
|
||||
+3
-3
@@ -62,9 +62,9 @@ For more details on contact points, including how to test them and enable notifi
|
||||
|
||||
## Webhook settings
|
||||
|
||||
| Option | Description |
|
||||
| ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||
| URL | The Webhook URL. This field is [protected](ref:configure-contact-points#protected-fields) from modification in Grafana Cloud. |
|
||||
| Option | Description |
|
||||
| ------ | ------------------------------------------------------------------------------------------------------------ |
|
||||
| URL | The Webhook URL. This field is [protected](ref:configure-contact-points) from modification in Grafana Cloud. |
|
||||
|
||||
#### Optional settings
|
||||
|
||||
|
||||
+1
-1
@@ -81,7 +81,7 @@ Replace the placeholders with your values:
|
||||
|
||||
In your `grafana` directory, create a sub-folder called `dashboards`.
|
||||
|
||||
This guide shows you how to creates three separate dashboards. For all dashboard configurations, replace the placeholders with your values:
|
||||
This guide shows you how to create three separate dashboards. For all dashboard configurations, replace the placeholders with your values:
|
||||
|
||||
- _`<GRAFANA_CLOUD_STACK_NAME>`_: Name of your Grafana Cloud Stack
|
||||
- _`<GRAFANA_OPERATOR_NAMESPACE>`_: Namespace where the `grafana-operator` is deployed in your Kubernetes cluster
|
||||
|
||||
+147
@@ -0,0 +1,147 @@
|
||||
---
|
||||
title: Git Sync deployment scenarios
|
||||
menuTitle: Deployment scenarios
|
||||
description: Learn about common Git Sync deployment patterns and configurations for different organizational needs
|
||||
weight: 450
|
||||
keywords:
|
||||
- git sync
|
||||
- deployment patterns
|
||||
- scenarios
|
||||
- multi-environment
|
||||
- teams
|
||||
---
|
||||
|
||||
# Git Sync deployment scenarios
|
||||
|
||||
This guide shows practical deployment scenarios for Grafana’s Git Sync. Learn how to configure bidirectional synchronization between Grafana and Git repositories for teams, environments, and regions.
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
Git Sync is an experimental feature. It reflects Grafana’s approach to Observability as Code and might include limitations or breaking changes. For current status and known limitations, refer to the [Git Sync introduction](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/intro-git-sync/).
|
||||
{{< /admonition >}}
|
||||
|
||||
## Understand the relationship between key Git Sync components
|
||||
|
||||
Before you explore the scenarios, understand how the key Git Sync components relate:
|
||||
|
||||
- [Grafana instance](#grafana-instance)
|
||||
- [Git repository structure](#git-repository-structure)
|
||||
- [Git Sync repository resource](#git-sync-repository-resource)
|
||||
|
||||
### Grafana instance
|
||||
|
||||
A Grafana instance is a running Grafana server. Multiple instances can:
|
||||
|
||||
- Connect to the same Git repository using different Repository configurations.
|
||||
- Sync from different branches of the same repository.
|
||||
- Sync from different paths within the same repository.
|
||||
- Sync from different repositories.
|
||||
|
||||
### Git repository structure
|
||||
|
||||
You can organize your Git repository in several ways:
|
||||
|
||||
- Single branch, multiple paths: Use different directories for different purposes (for example, `dev/`, `prod/`, `team-a/`).
|
||||
- Multiple branches: Use different branches for different environments or teams (for example, `main`, `develop`, `team-a`).
|
||||
- Multiple repositories: Use separate repositories for different teams or environments.
|
||||
|
||||
### Git Sync repository resource
|
||||
|
||||
A repository resource is a Grafana configuration object that defines:
|
||||
|
||||
- Which Git repository to sync with.
|
||||
- Which branch to use.
|
||||
- Which directory path to synchronize.
|
||||
- Sync behavior and workflows.
|
||||
|
||||
Each repository resource creates bidirectional synchronization between a Grafana instance and a specific location in Git.
|
||||
|
||||
## How does repository sync behave?
|
||||
|
||||
With Git Sync you configure a repository resource to sync with your Grafana instance:
|
||||
|
||||
1. Grafana monitors the specified Git location (repository, branch, and path).
|
||||
2. Grafana creates a folder in Dashboards (typically named after the repository).
|
||||
3. Grafana creates dashboards from dashboard JSON files in Git within this folder.
|
||||
4. Grafana commits dashboard changes made in the UI back to Git.
|
||||
5. Grafana pulls dashboard changes made in Git and updates dashboards in the UI.
|
||||
6. Synchronization occurs at regular intervals (configurable), or instantly if you use webhooks.
|
||||
|
||||
You can find the provisioned dashboards organized in folders under **Dashboards**.
|
||||
|
||||
## Example: Relationship between repository, branch, and path
|
||||
|
||||
Here's a concrete example showing how the three parameters work together:
|
||||
|
||||
**Configuration:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `team-platform/grafana/`
|
||||
|
||||
**In Git (on branch `main`):**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests/
|
||||
├── .git/
|
||||
├── README.md
|
||||
├── team-platform/
|
||||
│ └── grafana/
|
||||
│ ├── cpu-metrics.json ← Synced
|
||||
│ ├── memory-usage.json ← Synced
|
||||
│ └── disk-io.json ← Synced
|
||||
├── team-data/
|
||||
│ └── grafana/
|
||||
│ └── pipeline-stats.json ← Not synced (different path)
|
||||
└── other-files.txt ← Not synced (outside path)
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view:**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── CPU Metrics Dashboard
|
||||
├── Memory Usage Dashboard
|
||||
└── Disk I/O Dashboard
|
||||
```
|
||||
|
||||
**Key points:**
|
||||
|
||||
- Grafana only synchronizes files within the specified path (`team-platform/grafana/`).
|
||||
- Grafana ignores files in other paths or at the repository root.
|
||||
- The folder name in Grafana comes from the repository name.
|
||||
- Dashboard titles come from the JSON file content, not the filename.
|
||||
|
||||
## Repository configuration flexibility
|
||||
|
||||
Git Sync repositories support different combinations of repository URL, branch, and path:
|
||||
|
||||
- Different Git repositories: Each environment or team can use its own repository.
|
||||
- Instance A: `repository: your-org/grafana-prod`.
|
||||
- Instance B: `repository: your-org/grafana-dev`.
|
||||
- Different branches: Use separate branches within the same repository.
|
||||
- Instance A: `repository: your-org/grafana-manifests, branch: main`.
|
||||
- Instance B: `repository: your-org/grafana-manifests, branch: develop`.
|
||||
- Different paths: Use different directory paths within the same repository.
|
||||
- Instance A: `repository: your-org/grafana-manifests, branch: main, path: production/`.
|
||||
- Instance B: `repository: your-org/grafana-manifests, branch: main, path: development/`.
|
||||
- Any combination: Mix and match based on your workflow requirements.
|
||||
|
||||
## Scenarios
|
||||
|
||||
Use these deployment scenarios to plan your Git Sync setup:
|
||||
|
||||
- [Single instance](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/single-instance/)
|
||||
- [Git Sync for development and production environments](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/dev-prod/)
|
||||
- [Git Sync with regional replication](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/multi-region/)
|
||||
- [High availability](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/high-availability/)
|
||||
- [Git Sync in a shared Grafana instance](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/multi-team/)
|
||||
|
||||
## Learn more
|
||||
|
||||
Refer to the following documents to learn more:
|
||||
|
||||
- [Git Sync introduction](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/intro-git-sync/)
|
||||
- [Git Sync setup guide](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-setup/)
|
||||
- [Dashboard provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/provisioning/)
|
||||
- [Observability as Code](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/)
|
||||
+147
@@ -0,0 +1,147 @@
|
||||
---
|
||||
title: Git Sync for development and production environments
|
||||
menuTitle: Across environments
|
||||
description: Use separate Grafana instances for development and production with Git-controlled promotion
|
||||
weight: 20
|
||||
---
|
||||
|
||||
# Git Sync for development and production environments
|
||||
|
||||
Use separate Grafana instances for development and production. Each syncs with different Git locations to test dashboards before production.
|
||||
|
||||
## Use it for
|
||||
|
||||
- **Staged deployments**: You need to test dashboard changes before production deployment.
|
||||
- **Change control**: You require approvals before dashboards reach production.
|
||||
- **Quality assurance**: You verify dashboard functionality in a non-production environment.
|
||||
- **Risk mitigation**: You minimize the risk of breaking production dashboards.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────┐
|
||||
│ GitHub Repository │
|
||||
│ Repository: your-org/grafana-manifests │
|
||||
│ Branch: main │
|
||||
│ │
|
||||
│ grafana-manifests/ │
|
||||
│ ├── dev/ │
|
||||
│ │ ├── dashboard-new.json ← Development dashboards │
|
||||
│ │ └── dashboard-test.json │
|
||||
│ │ │
|
||||
│ └── prod/ │
|
||||
│ ├── dashboard-stable.json ← Production dashboards │
|
||||
│ └── dashboard-approved.json │
|
||||
└────────────────────────────────────────────────────────────┘
|
||||
↕ ↕
|
||||
Git Sync (dev/) Git Sync (prod/)
|
||||
↕ ↕
|
||||
┌─────────────────────┐ ┌─────────────────────┐
|
||||
│ Dev Grafana │ │ Prod Grafana │
|
||||
│ │ │ │
|
||||
│ Repository: │ │ Repository: │
|
||||
│ - path: dev/ │ │ - path: prod/ │
|
||||
│ │ │ │
|
||||
│ Creates folder: │ │ Creates folder: │
|
||||
│ "grafana-manifests"│ │ "grafana-manifests"│
|
||||
└─────────────────────┘ └─────────────────────┘
|
||||
```
|
||||
|
||||
## Repository structure
|
||||
|
||||
**In Git:**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests
|
||||
├── dev/
|
||||
│ ├── dashboard-new.json
|
||||
│ └── dashboard-test.json
|
||||
└── prod/
|
||||
├── dashboard-stable.json
|
||||
└── dashboard-approved.json
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view:**
|
||||
|
||||
**Dev instance:**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── New Dashboard
|
||||
└── Test Dashboard
|
||||
```
|
||||
|
||||
**Prod instance:**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── Stable Dashboard
|
||||
└── Approved Dashboard
|
||||
```
|
||||
|
||||
- Both instances create a folder named "grafana-manifests" (from repository name)
|
||||
- Each instance only shows dashboards from its configured path (`dev/` or `prod/`)
|
||||
- Dashboards appear with their titles from the JSON files
|
||||
|
||||
## Configuration parameters
|
||||
|
||||
Development:
|
||||
|
||||
- Repository: `your-org/grafana-manifests`
|
||||
- Branch: `main`
|
||||
- Path: `dev/`
|
||||
|
||||
Production:
|
||||
|
||||
- Repository: `your-org/grafana-manifests`
|
||||
- Branch: `main`
|
||||
- Path: `prod/`
|
||||
|
||||
## How it works
|
||||
|
||||
1. Developers create and modify dashboards in development.
|
||||
2. Git Sync commits changes to `dev/`.
|
||||
3. You review changes in Git.
|
||||
4. You promote approved dashboards from `dev/` to `prod/`.
|
||||
5. Production syncs from `prod/`.
|
||||
6. Production dashboards update.
|
||||
|
||||
## Alternative: Use branches
|
||||
|
||||
Instead of using different paths, you can configure instances to use different branches:
|
||||
|
||||
**Development instance:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `develop`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
**Production instance:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
With this approach:
|
||||
|
||||
- Development changes go to the `develop` branch
|
||||
- Use Git merge or pull request workflows to promote changes from `develop` to `main`
|
||||
- Production automatically syncs from the `main` branch
|
||||
|
||||
## Alternative: Use separate repositories for stricter isolation
|
||||
|
||||
For stricter isolation, use completely separate repositories:
|
||||
|
||||
**Development instance:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests-dev`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
**Production instance:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests-prod`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
+217
@@ -0,0 +1,217 @@
|
||||
---
|
||||
title: Git Sync for high availability environments
|
||||
menuTitle: High availability
|
||||
description: Run multiple Grafana instances serving traffic simultaneously, synchronized via Git Sync
|
||||
weight: 50
|
||||
---
|
||||
|
||||
# Git Sync for high availability environments
|
||||
|
||||
## Primary–replica scenario
|
||||
|
||||
Use a primary Grafana instance and one or more replicas synchronized with the same Git location to enable failover.
|
||||
|
||||
### Use it for
|
||||
|
||||
- **Automatic failover**: You need service continuity when the primary instance fails.
|
||||
- **High availability**: Your organization requires guaranteed dashboard availability.
|
||||
- **Simple HA setup**: You want high availability without the complexity of active–active.
|
||||
- **Maintenance windows**: You perform updates while another instance serves traffic.
|
||||
- **Business continuity**: Dashboard access can't tolerate downtime.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ GitHub Repository │
|
||||
│ Repository: your-org/grafana-manifests │
|
||||
│ Branch: main │
|
||||
│ │
|
||||
│ grafana-manifests/ │
|
||||
│ └── shared/ │
|
||||
│ ├── dashboard-metrics.json │
|
||||
│ ├── dashboard-alerts.json │
|
||||
│ └── dashboard-logs.json │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
↕ ↕
|
||||
Git Sync (shared/) Git Sync (shared/)
|
||||
↕ ↕
|
||||
┌────────────────────┐ ┌────────────────────┐
|
||||
│ Master Grafana │ │ Replica Grafana │
|
||||
│ (Active) │ │ (Standby) │
|
||||
│ │ │ │
|
||||
│ Repository: │ │ Repository: │
|
||||
│ - path: shared/ │ │ - path: shared/ │
|
||||
└────────────────────┘ └────────────────────┘
|
||||
│ │
|
||||
└───────────┬───────────────────┘
|
||||
↓
|
||||
┌──────────────────────┐
|
||||
│ Reverse Proxy │
|
||||
│ (Failover) │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
### Repository structure
|
||||
|
||||
**In Git:**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests
|
||||
└── shared/
|
||||
├── dashboard-metrics.json
|
||||
├── dashboard-alerts.json
|
||||
└── dashboard-logs.json
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view (both instances):**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── Metrics Dashboard
|
||||
├── Alerts Dashboard
|
||||
└── Logs Dashboard
|
||||
```
|
||||
|
||||
- Master and replica instances show identical folder structure.
|
||||
- Both sync from the same `shared/` path.
|
||||
- Reverse proxy routes traffic to master (active) instance.
|
||||
- If master fails, proxy automatically fails over to replica (standby).
|
||||
- Users see the same dashboards regardless of which instance is serving traffic.
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
Both master and replica instances use identical parameters:
|
||||
|
||||
**Master instance:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `shared/`
|
||||
|
||||
**Replica instance:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `shared/`
|
||||
|
||||
### How it works
|
||||
|
||||
1. Both instances stay synchronized through Git.
|
||||
2. Reverse proxy routes traffic to primary.
|
||||
3. Users edit on primary. Git Sync commits changes.
|
||||
4. Both instances pull latest changes to keep replica in sync.
|
||||
5. On primary failure, proxy fails over to replica.
|
||||
|
||||
### Failover considerations
|
||||
|
||||
- Health checks and monitoring.
|
||||
- Continuous syncing to minimize data loss.
|
||||
- Plan failback (automatic or manual).
|
||||
|
||||
## Load balancer scenario
|
||||
|
||||
Run multiple active Grafana instances behind a load balancer. All instances sync from the same Git location.
|
||||
|
||||
### Use it for
|
||||
|
||||
- **High traffic**: Your deployment needs to handle significant user load.
|
||||
- **Load distribution**: You want to distribute user requests across instances.
|
||||
- **Maximum availability**: You need service continuity during maintenance or failures.
|
||||
- **Scalability**: You want to add instances as load increases.
|
||||
- **Performance**: Users need fast response times under heavy load.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ GitHub Repository │
|
||||
│ Repository: your-org/grafana-manifests │
|
||||
│ Branch: main │
|
||||
│ │
|
||||
│ grafana-manifests/ │
|
||||
│ └── shared/ │
|
||||
│ ├── dashboard-metrics.json │
|
||||
│ ├── dashboard-alerts.json │
|
||||
│ └── dashboard-logs.json │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
↕ ↕
|
||||
Git Sync (shared/) Git Sync (shared/)
|
||||
↕ ↕
|
||||
┌────────────────────┐ ┌────────────────────┐
|
||||
│ Grafana Instance 1│ │ Grafana Instance 2│
|
||||
│ (Active) │ │ (Active) │
|
||||
│ │ │ │
|
||||
│ Repository: │ │ Repository: │
|
||||
│ - path: shared/ │ │ - path: shared/ │
|
||||
└────────────────────┘ └────────────────────┘
|
||||
│ │
|
||||
└───────────┬───────────────────┘
|
||||
↓
|
||||
┌──────────────────────┐
|
||||
│ Load Balancer │
|
||||
│ (Round Robin) │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
### Repository structure
|
||||
|
||||
**In Git:**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests
|
||||
└── shared/
|
||||
├── dashboard-metrics.json
|
||||
├── dashboard-alerts.json
|
||||
└── dashboard-logs.json
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view (all instances):**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── Metrics Dashboard
|
||||
├── Alerts Dashboard
|
||||
└── Logs Dashboard
|
||||
```
|
||||
|
||||
- All instances show identical folder structure.
|
||||
- All instances sync from the same `shared/` path.
|
||||
- Load balancer distributes requests across all active instances.
|
||||
- Any instance can serve read requests.
|
||||
- Any instance can accept dashboard modifications.
|
||||
- Changes propagate to all instances through Git.
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
All instances use identical parameters:
|
||||
|
||||
**Instance 1:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `shared/`
|
||||
|
||||
**Instance 2:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `shared/`
|
||||
|
||||
### How it works
|
||||
|
||||
1. All instances stay synchronized through Git.
|
||||
2. Load balancer distributes incoming traffic across all active instances.
|
||||
3. Users can view dashboards from any instance.
|
||||
4. When a user modifies a dashboard on any instance, Git Sync commits the change.
|
||||
5. All other instances pull the updated dashboard during their next sync cycle, or instantly if webhooks are configured.
|
||||
6. If one instance fails, load balancer stops routing traffic to it and remaining instances continue serving.
|
||||
|
||||
### Important considerations
|
||||
|
||||
- **Eventually consistent**: Due to sync intervals, instances may briefly have different dashboard versions.
|
||||
- **Concurrent edits**: Multiple users editing the same dashboard on different instances can cause conflicts.
|
||||
- **Database sharing**: Instances should share the same backend database for user sessions, preferences, and annotations.
|
||||
- **Stateless design**: Design for stateless operation where possible to maximize load balancing effectiveness.
|
||||
+93
@@ -0,0 +1,93 @@
|
||||
---
|
||||
title: Git Sync with regional replication
|
||||
menuTitle: Regional replication
|
||||
description: Synchronize multiple regional Grafana instances from a shared Git location
|
||||
weight: 30
|
||||
---
|
||||
|
||||
# Git Sync with regional replication
|
||||
|
||||
Deploy multiple Grafana instances across regions. Synchronize them with the same Git location to ensure consistent dashboards everywhere.
|
||||
|
||||
## Use it for
|
||||
|
||||
- **Geographic distribution**: You deploy Grafana close to users in different regions.
|
||||
- **Latency reduction**: Users need fast dashboard access from their location.
|
||||
- **Data sovereignty**: You keep dashboard data in specific regions.
|
||||
- **High availability**: You need dashboard availability across regions.
|
||||
- **Consistent experience**: All users see the same dashboards regardless of region.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ GitHub Repository │
|
||||
│ Repository: your-org/grafana-manifests │
|
||||
│ Branch: main │
|
||||
│ │
|
||||
│ grafana-manifests/ │
|
||||
│ └── shared/ │
|
||||
│ ├── dashboard-global.json │
|
||||
│ ├── dashboard-metrics.json │
|
||||
│ └── dashboard-logs.json │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
↕ ↕
|
||||
Git Sync (shared/) Git Sync (shared/)
|
||||
↕ ↕
|
||||
┌────────────────────┐ ┌────────────────────┐
|
||||
│ US Region │ │ EU Region │
|
||||
│ Grafana │ │ Grafana │
|
||||
│ │ │ │
|
||||
│ Repository: │ │ Repository: │
|
||||
│ - path: shared/ │ │ - path: shared/ │
|
||||
└────────────────────┘ └────────────────────┘
|
||||
```
|
||||
|
||||
## Repository structure
|
||||
|
||||
**In Git:**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests
|
||||
└── shared/
|
||||
├── dashboard-global.json
|
||||
├── dashboard-metrics.json
|
||||
└── dashboard-logs.json
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view (all regions):**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── Global Dashboard
|
||||
├── Metrics Dashboard
|
||||
└── Logs Dashboard
|
||||
```
|
||||
|
||||
- All regional instances (US, EU, etc.) show identical folder structure
|
||||
- Same folder name "grafana-manifests" in every region
|
||||
- Same dashboards synced from the `shared/` path appear everywhere
|
||||
- Users in any region see the exact same dashboards with the same titles
|
||||
|
||||
## Configuration parameters
|
||||
|
||||
All regions:
|
||||
|
||||
- Repository: `your-org/grafana-manifests`
|
||||
- Branch: `main`
|
||||
- Path: `shared/`
|
||||
|
||||
## How it works
|
||||
|
||||
1. All regional instances pull dashboards from `shared/`.
|
||||
2. Any region’s change commits to Git.
|
||||
3. Other regions pull updates during the next sync (or via webhooks).
|
||||
4. Changes propagate across regions per sync interval.
|
||||
|
||||
## Considerations
|
||||
|
||||
- **Write conflicts**: If users in different regions modify the same dashboard simultaneously, Git uses last-write-wins.
|
||||
- **Primary region**: Consider designating one region as the primary location for making dashboard changes.
|
||||
- **Propagation time**: Changes propagate to all regions within the configured sync interval, or instantly if webhooks are configured.
|
||||
- **Network reliability**: Ensure all regions have reliable connectivity to the Git repository.
|
||||
+169
@@ -0,0 +1,169 @@
|
||||
---
|
||||
title: Multiple team Git Sync
|
||||
menuTitle: Shared instance
|
||||
description: Use multiple Git repositories with one Grafana instance, one repository per team
|
||||
weight: 60
|
||||
---
|
||||
|
||||
# Git Sync in a Grafana instance shared by multiple teams
|
||||
|
||||
Use a single Grafana instance with multiple Repository resources, one per team. Each team manages its own dashboards while sharing Grafana.
|
||||
|
||||
## Use it for
|
||||
|
||||
- **Team autonomy**: Different teams manage their own dashboards independently.
|
||||
- **Organizational structure**: Dashboard organization aligns with team structure.
|
||||
- **Resource efficiency**: Multiple teams share Grafana infrastructure.
|
||||
- **Cost optimization**: You reduce infrastructure costs while maintaining team separation.
|
||||
- **Collaboration**: Teams can view each other’s dashboards while managing their own.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────┐ ┌─────────────────────────┐
|
||||
│ Platform Team Repo │ │ Data Team Repo │
|
||||
│ platform-dashboards │ │ data-dashboards │
|
||||
│ │ │ │
|
||||
│ platform-dashboards/ │ │ data-dashboards/ │
|
||||
│ └── grafana/ │ │ └── grafana/ │
|
||||
│ ├── k8s.json │ │ ├── pipeline.json │
|
||||
│ └── infra.json │ │ └── analytics.json │
|
||||
└─────────────────────────┘ └─────────────────────────┘
|
||||
↕ ↕
|
||||
Git Sync (grafana/) Git Sync (grafana/)
|
||||
↕ ↕
|
||||
┌──────────────────────────────────────┐
|
||||
│ Grafana Instance │
|
||||
│ │
|
||||
│ Repository 1: │
|
||||
│ - repo: platform-dashboards │
|
||||
│ → Creates "platform-dashboards" │
|
||||
│ │
|
||||
│ Repository 2: │
|
||||
│ - repo: data-dashboards │
|
||||
│ → Creates "data-dashboards" │
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Repository structure
|
||||
|
||||
**In Git (separate repositories):**
|
||||
|
||||
**Platform team repository:**
|
||||
|
||||
```
|
||||
your-org/platform-dashboards
|
||||
└── grafana/
|
||||
├── dashboard-k8s.json
|
||||
└── dashboard-infra.json
|
||||
```
|
||||
|
||||
**Data team repository:**
|
||||
|
||||
```
|
||||
your-org/data-dashboards
|
||||
└── grafana/
|
||||
├── dashboard-pipeline.json
|
||||
└── dashboard-analytics.json
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view:**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
├── 📁 platform-dashboards/
|
||||
│ ├── Kubernetes Dashboard
|
||||
│ └── Infrastructure Dashboard
|
||||
└── 📁 data-dashboards/
|
||||
├── Pipeline Dashboard
|
||||
└── Analytics Dashboard
|
||||
```
|
||||
|
||||
- Two separate folders created (one per Repository resource).
|
||||
- Folder names derived from repository names.
|
||||
- Each team has complete control over their own repository.
|
||||
- Teams can independently manage permissions, branches, and workflows in their repos.
|
||||
- All teams can view each other's dashboards in Grafana but manage only their own.
|
||||
|
||||
## Configuration parameters
|
||||
|
||||
**Platform team repository:**
|
||||
|
||||
- **Repository**: `your-org/platform-dashboards`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
**Data team repository:**
|
||||
|
||||
- **Repository**: `your-org/data-dashboards`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
## How it works
|
||||
|
||||
1. Each team has their own Git repository for complete autonomy.
|
||||
2. Each repository resource in Grafana creates a separate folder.
|
||||
3. Platform team dashboards sync from `your-org/platform-dashboards` repository.
|
||||
4. Data team dashboards sync from `your-org/data-dashboards` repository.
|
||||
5. Teams can independently manage their repository settings, access controls, and workflows.
|
||||
6. All teams can view each other's dashboards in Grafana but edit only their own.
|
||||
|
||||
## Scale to more teams
|
||||
|
||||
Adding additional teams is straightforward. For a third team, create a new repository and configure:
|
||||
|
||||
- **Repository**: `your-org/security-dashboards`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
This creates a new "security-dashboards" folder in the same Grafana instance.
|
||||
|
||||
## Alternative: Shared repository with different paths
|
||||
|
||||
For teams that prefer sharing a single repository, use different paths to separate team dashboards:
|
||||
|
||||
**In Git:**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests
|
||||
├── team-platform/
|
||||
│ ├── dashboard-k8s.json
|
||||
│ └── dashboard-infra.json
|
||||
└── team-data/
|
||||
├── dashboard-pipeline.json
|
||||
└── dashboard-analytics.json
|
||||
```
|
||||
|
||||
**Configuration:**
|
||||
|
||||
**Platform team:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `team-platform/`
|
||||
|
||||
**Data team:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `team-data/`
|
||||
|
||||
This approach provides simpler repository management but less isolation between teams.
|
||||
|
||||
## Alternative: Different branches per team
|
||||
|
||||
For teams wanting their own branch in a shared repository:
|
||||
|
||||
**Platform team:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `team-platform`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
**Data team:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `team-data`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
This allows teams to use Git branch workflows for collaboration while sharing the same repository.
|
||||
+86
@@ -0,0 +1,86 @@
|
||||
---
|
||||
title: Single instance Git Sync
|
||||
menuTitle: Single instance
|
||||
description: Synchronize a single Grafana instance with a Git repository
|
||||
weight: 10
|
||||
---
|
||||
|
||||
# Single instance Git Sync
|
||||
|
||||
Use a single Grafana instance synchronized with a Git repository. This is the foundation for Git Sync and helps you understand bidirectional synchronization.
|
||||
|
||||
## Use it for
|
||||
|
||||
- **Getting started**: You want to learn how Git Sync works before implementing complex scenarios.
|
||||
- **Personal projects**: Individual developers manage their own dashboards.
|
||||
- **Small teams**: You have a simple setup without multiple environments or complex workflows.
|
||||
- **Development environments**: You need quick prototyping and testing.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ GitHub Repository │
|
||||
│ Repository: your-org/grafana-manifests │
|
||||
│ Branch: main │
|
||||
│ │
|
||||
│ grafana-manifests/ │
|
||||
│ └── grafana/ │
|
||||
│ ├── dashboard-1.json │
|
||||
│ ├── dashboard-2.json │
|
||||
│ └── dashboard-3.json │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
↕
|
||||
Git Sync (bidirectional)
|
||||
↕
|
||||
┌─────────────────────────────┐
|
||||
│ Grafana Instance │
|
||||
│ │
|
||||
│ Repository Resource: │
|
||||
│ - url: grafana-manifests │
|
||||
│ - branch: main │
|
||||
│ - path: grafana/ │
|
||||
│ │
|
||||
│ Creates folder: │
|
||||
│ "grafana-manifests" │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
## Repository structure
|
||||
|
||||
**In Git:**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests
|
||||
└── grafana/
|
||||
├── dashboard-1.json
|
||||
├── dashboard-2.json
|
||||
└── dashboard-3.json
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view:**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── Dashboard 1
|
||||
├── Dashboard 2
|
||||
└── Dashboard 3
|
||||
```
|
||||
|
||||
- A folder named "grafana-manifests" (from repository name) contains all synced dashboards.
|
||||
- Each JSON file becomes a dashboard with its title displayed in the folder.
|
||||
- Users browse dashboards organized under this folder structure.
|
||||
|
||||
## Configuration parameters
|
||||
|
||||
Configure your Grafana instance to synchronize with:
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
## How it works
|
||||
|
||||
1. **From Grafana to Git**: When users create or modify dashboards in Grafana, Git Sync commits changes to the `grafana/` directory on the `main` branch.
|
||||
2. **From Git to Grafana**: When dashboard JSON files are added or modified in the `grafana/` directory, Git Sync pulls these changes into Grafana.
|
||||
@@ -367,5 +367,6 @@ To learn more about using Git Sync:
|
||||
|
||||
- [Work with provisioned dashboards](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/provisioned-dashboards/)
|
||||
- [Manage provisioned repositories with Git Sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/use-git-sync/)
|
||||
- [Git Sync deployment scenarios](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios)
|
||||
- [Export resources](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/export-resources/)
|
||||
- [grafanactl documentation](https://grafana.github.io/grafanactl/)
|
||||
|
||||
@@ -127,7 +127,13 @@ An instance can be in one of the following Git Sync states:
|
||||
|
||||
## Common use cases
|
||||
|
||||
You can use Git Sync in the following scenarios.
|
||||
{{< admonition type="note" >}}
|
||||
|
||||
Refer to [Git Sync deployment scenarios](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios) for sample scenarios, including architecture and configuration details.
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
You can use Git Sync for the following use cases:
|
||||
|
||||
### Version control and auditing
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ labels:
|
||||
- cloud
|
||||
title: Manage provisioned repositories with Git Sync
|
||||
menuTitle: Manage repositories with Git Sync
|
||||
weight: 120
|
||||
weight: 400
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/observability-as-code/provision-resources/use-git-sync/
|
||||
aliases:
|
||||
- ../../../observability-as-code/provision-resources/use-git-sync/ # /docs/grafana/next/observability-as-code/provision-resources/use-git-sync/
|
||||
|
||||
@@ -3,7 +3,6 @@ aliases:
|
||||
- ../data-sources/azure-monitor/
|
||||
- ../features/datasources/azuremonitor/
|
||||
- azuremonitor/
|
||||
- azuremonitor/deprecated-application-insights/
|
||||
description: Guide for using Azure Monitor in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -23,6 +22,7 @@ labels:
|
||||
menuTitle: Azure Monitor
|
||||
title: Azure Monitor data source
|
||||
weight: 300
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
configure-grafana-feature-toggles:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -49,6 +49,11 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
transform-data:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
|
||||
configure-grafana-azure:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
|
||||
@@ -63,295 +68,98 @@ refs:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-access/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-access/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
query-editor-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
template-variables-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
alerting-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
troubleshooting-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
annotations-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
---
|
||||
|
||||
# Azure Monitor data source
|
||||
|
||||
Grafana ships with built-in support for Azure Monitor, the Azure service to maximize the availability and performance of applications and services in the Azure Cloud.
|
||||
This topic explains configuring and querying specific to the Azure Monitor data source.
|
||||
The Azure Monitor data source plugin allows you to query and visualize data from Azure Monitor, the Azure service to maximize the availability and performance of applications and services in the Azure Cloud.
|
||||
|
||||
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:data-source-management).
|
||||
Only users with the organization administrator role can add data sources.
|
||||
## Supported Azure clouds
|
||||
|
||||
Once you've added the Azure Monitor data source, you can [configure it](#configure-the-data-source) so that your Grafana instance's users can create queries in its [query editor](query-editor/) when they [build dashboards](ref:build-dashboards) and use [Explore](ref:explore).
|
||||
The Azure Monitor data source supports the following Azure cloud environments:
|
||||
|
||||
The Azure Monitor data source supports visualizing data from four Azure services:
|
||||
- **Azure** - Azure public cloud (default)
|
||||
- **Azure US Government** - Azure Government cloud
|
||||
- **Azure China** - Azure China cloud operated by 21Vianet
|
||||
|
||||
- **Azure Monitor Metrics:** Collect numeric data from resources in your Azure account.
|
||||
- **Azure Monitor Logs:** Collect log and performance data from your Azure account, and query using the Kusto Query Language (KQL).
|
||||
- **Azure Resource Graph:** Query your Azure resources across subscriptions.
|
||||
- **Azure Monitor Application Insights:** Collect trace logging data and other application performance metrics.
|
||||
## Supported Azure services
|
||||
|
||||
## Configure the data source
|
||||
The Azure Monitor data source supports the following Azure services:
|
||||
|
||||
**To access the data source configuration page:**
|
||||
| Service | Description |
|
||||
| ------------------------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Azure Monitor Metrics** | Collect numeric data from resources in your Azure account. Supports dimensions, aggregations, and time grain configuration. |
|
||||
| **Azure Monitor Logs** | Collect log and performance data from your Azure account using the Kusto Query Language (KQL). |
|
||||
| **Azure Resource Graph** | Query your Azure resources across subscriptions using KQL. Useful for inventory, compliance, and resource management. |
|
||||
| **Application Insights Traces** | Collect distributed trace data and correlate requests across your application components. |
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Under Your connections, click **Data sources**.
|
||||
1. Enter `Azure Monitor` in the search bar.
|
||||
1. Click **Azure Monitor**.
|
||||
## Get started
|
||||
|
||||
The **Settings** tab of the data source is displayed.
|
||||
The following documents will help you get started with the Azure Monitor data source:
|
||||
|
||||
### Configure Azure Active Directory (AD) authentication
|
||||
- [Configure the Azure Monitor data source](ref:configure-azure-monitor) - Set up authentication and connect to Azure
|
||||
- [Azure Monitor query editor](ref:query-editor-azure-monitor) - Create and edit queries for Metrics, Logs, Traces, and Resource Graph
|
||||
- [Template variables](ref:template-variables-azure-monitor) - Create dynamic dashboards with Azure Monitor variables
|
||||
- [Alerting](ref:alerting-azure-monitor) - Create alert rules using Azure Monitor data
|
||||
- [Troubleshooting](ref:troubleshooting-azure-monitor) - Solve common configuration and query errors
|
||||
|
||||
You must create an app registration and service principal in Azure AD to authenticate the data source.
|
||||
For configuration details, refer to the [Azure documentation for service principals](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in).
|
||||
## Additional features
|
||||
|
||||
The app registration you create must have the `Reader` role assigned on the subscription.
|
||||
For more information, refer to [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
|
||||
After you have configured the Azure Monitor data source, you can:
|
||||
|
||||
If you host Grafana in Azure, such as in App Service or Azure Virtual Machines, you can configure the Azure Monitor data source to use Managed Identity for secure authentication without entering credentials into Grafana.
|
||||
For details, refer to [Configuring using Managed Identity](#configuring-using-managed-identity).
|
||||
- Add [Annotations](ref:annotations-azure-monitor) to overlay Azure log events on your graphs.
|
||||
- Configure and use [Template variables](ref:template-variables-azure-monitor) for dynamic dashboards.
|
||||
- Add [Transformations](ref:transform-data) to manipulate query results.
|
||||
- Set up [Alerting](ref:alerting-azure-monitor) and recording rules using Metrics, Logs, Traces, and Resource Graph queries.
|
||||
- Use [Explore](ref:explore) to investigate your Azure data without building a dashboard.
|
||||
|
||||
You can configure the Azure Monitor data source to use Workload Identity for secure authentication without entering credentials into Grafana if you host Grafana in a Kubernetes environment, such as AKS, and require access to Azure resources.
|
||||
For details, refer to [Configuring using Workload Identity](#configuring-using-workload-identity).
|
||||
## Pre-built dashboards
|
||||
|
||||
| Name | Description |
|
||||
| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Enables Managed Identity. Selecting Managed Identity hides many of the other fields. For details, see [Configuring using Managed Identity](#configuring-using-managed-identity). |
|
||||
| **Azure Cloud** | Sets the national cloud for your Azure account. For most users, this is the default "Azure". For details, see the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/develop/authentication-national-cloud). |
|
||||
| **Directory (tenant) ID** | Sets the directory/tenant ID for the Azure AD app registration to use for authentication. For details, see the [Azure tenant and app ID docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in). |
|
||||
| **Application (client) ID** | Sets the application/client ID for the Azure AD app registration to use for authentication. |
|
||||
| **Client secret** | Sets the application client secret for the Azure AD app registration to use for authentication. For details, see the [Azure application secret docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret). |
|
||||
| **Default subscription** | _(Optional)_ Sets a default subscription for template variables to use. |
|
||||
| **Enable Basic Logs** | Allows this data source to execute queries against [Basic Logs tables](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/basic-logs-query?tabs=portal-1) in supported Log Analytics Workspaces. These queries may incur additional costs. |
|
||||
The Azure Monitor plugin includes the following pre-built dashboards:
|
||||
|
||||
### Provision the data source
|
||||
- **Azure Monitor Overview** - Displays key metrics across your Azure subscriptions and resources.
|
||||
- **Azure Storage Account** - Shows storage account metrics including availability, latency, and transactions.
|
||||
|
||||
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
|
||||
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
To import a pre-built dashboard:
|
||||
|
||||
#### Provisioning examples
|
||||
1. Go to **Connections** > **Data sources**.
|
||||
1. Select your Azure Monitor data source.
|
||||
1. Click the **Dashboards** tab.
|
||||
1. Click **Import** next to the dashboard you want to use.
|
||||
|
||||
**Azure AD App Registration (client secret):**
|
||||
## Related resources
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: clientsecret
|
||||
cloudName: azuremonitor # See table below
|
||||
tenantId: <tenant-id>
|
||||
clientId: <client-id>
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
secureJsonData:
|
||||
clientSecret: <client-secret>
|
||||
version: 1
|
||||
```
|
||||
|
||||
**Managed Identity:**
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: msi
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
**Workload Identity:**
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: workloadidentity
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
**Current User:**
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The `oauthPassThru` property is required for current user authentication to function.
|
||||
Additionally, `disableGrafanaCache` is necessary to prevent the data source returning cached responses for resources users don't have access to.
|
||||
{{< /admonition >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: currentuser
|
||||
oauthPassThru: true
|
||||
disableGrafanaCache: true
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
#### Supported cloud names
|
||||
|
||||
| Azure Cloud | `cloudName` Value |
|
||||
| ------------------------------------ | -------------------------- |
|
||||
| **Microsoft Azure public cloud** | `azuremonitor` (_Default_) |
|
||||
| **Microsoft Chinese national cloud** | `chinaazuremonitor` |
|
||||
| **US Government cloud** | `govazuremonitor` |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Cloud names for current user authentication differ to the `cloudName` values in the preceding table.
|
||||
The public cloud name is `AzureCloud`, the Chinese national cloud name is `AzureChinaCloud`, and the US Government cloud name is `AzureUSGovernment`.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Configure Managed Identity
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Managed Identity is available only in [Azure Managed Grafana](https://azure.microsoft.com/en-us/products/managed-grafana) or Grafana OSS/Enterprise when deployed in Azure. It is not available in Grafana Cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
You can use managed identity to configure Azure Monitor in Grafana if you host Grafana in Azure (such as an App Service or with Azure Virtual Machines) and have managed identity enabled on your VM.
|
||||
This lets you securely authenticate data sources without manually configuring credentials via Azure AD App Registrations.
|
||||
For details on Azure managed identities, refer to the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
|
||||
|
||||
**To enable managed identity for Grafana:**
|
||||
|
||||
1. Set the `managed_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
```
|
||||
|
||||
2. In the Azure Monitor data source configuration, set **Authentication** to **Managed Identity**.
|
||||
|
||||
This hides the directory ID, application ID, and client secret fields, and the data source uses managed identity to authenticate to Azure Monitor Metrics and Logs, and Azure Resource Graph.
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-managed-identity-2.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Managed Identity authentication" >}}
|
||||
|
||||
3. You can set the `managed_identity_client_id` field in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure) to allow a user-assigned managed identity to be used instead of the default system-assigned identity.
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
managed_identity_client_id = USER_ASSIGNED_IDENTITY_CLIENT_ID
|
||||
```
|
||||
|
||||
### Configure Workload Identity
|
||||
|
||||
You can use workload identity to configure Azure Monitor in Grafana if you host Grafana in a Kubernetes environment, such as AKS, in conjunction with managed identities.
|
||||
This lets you securely authenticate data sources without manually configuring credentials via Azure AD App Registrations.
|
||||
For details on workload identity, refer to the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/).
|
||||
|
||||
**To enable workload identity for Grafana:**
|
||||
|
||||
1. Set the `workload_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
```
|
||||
|
||||
2. In the Azure Monitor data source configuration, set **Authentication** to **Workload Identity**.
|
||||
|
||||
This hides the directory ID, application ID, and client secret fields, and the data source uses workload identity to authenticate to Azure Monitor Metrics and Logs, and Azure Resource Graph.
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-workload-identity.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Workload Identity authentication" >}}
|
||||
|
||||
3. There are additional configuration variables that can control the authentication method.`workload_identity_tenant_id` represents the Azure AD tenant that contains the managed identity, `workload_identity_client_id` represents the client ID of the managed identity if it differs from the default client ID, `workload_identity_token_file` represents the path to the token file. Refer to the [documentation](https://azure.github.io/azure-workload-identity/docs/) for more information on what values these variables should use, if any.
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
workload_identity_tenant_id = IDENTITY_TENANT_ID
|
||||
workload_identity_client_id = IDENTITY_CLIENT_ID
|
||||
workload_identity_token_file = TOKEN_FILE_PATH
|
||||
```
|
||||
|
||||
### Configure Current User authentication
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Current user authentication is an [experimental feature](/docs/release-life-cycle). Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Contact Grafana Support to enable this feature in Grafana Cloud. Aspects of Grafana may not work as expected when using this authentication method.
|
||||
{{< /admonition >}}
|
||||
|
||||
If your Grafana instance is configured with Azure Entra (formerly Active Directory) authentication for login, this authentication method can be used to forward the currently logged in user's credentials to the data source. The users credentials will then be used when requesting data from the data source. For details on how to configure your Grafana instance using Azure Entra refer to the [documentation](ref:configure-grafana-azure-auth).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Additional configuration is required to ensure that the App Registration used to login a user via Azure provides an access token with the permissions required by the data source.
|
||||
|
||||
The App Registration must be configured to issue both **Access Tokens** and **ID Tokens**.
|
||||
|
||||
1. In the Azure Portal, open the App Registration that requires configuration.
|
||||
2. Select **Authentication** in the side menu.
|
||||
3. Under **Implicit grant and hybrid flows** check both the **Access tokens** and **ID tokens** boxes.
|
||||
4. Save the changes to ensure the App Registration is updated.
|
||||
|
||||
The App Registration must also be configured with additional **API Permissions** to provide authenticated users with access to the APIs utilised by the data source.
|
||||
|
||||
1. In the Azure Portal, open the App Registration that requires configuration.
|
||||
1. Select **API Permissions** in the side menu.
|
||||
1. Ensure the `openid`, `profile`, `email`, and `offline_access` permissions are present under the **Microsoft Graph** section. If not, they must be added.
|
||||
1. Select **Add a permission** and choose the following permissions. They must be added individually. Refer to the [Azure documentation](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-configure-app-access-web-apis) for more information.
|
||||
- Select **Azure Service Management** > **Delegated permissions** > `user_impersonation` > **Add permissions**
|
||||
- Select **APIs my organization uses** > Search for **Log Analytics API** and select it > **Delegated permissions** > `Date.Read` > **Add permissions**
|
||||
|
||||
Once all permissions have been added, the Azure authentication section in Grafana must be updated. The `scopes` section must be updated to include the `.default` scope to ensure that a token with access to all APIs declared on the App Registration is requested by Grafana. Once updated the scopes value should equal: `.default openid email profile`.
|
||||
{{< /admonition >}}
|
||||
|
||||
This method of authentication doesn't inherently support all backend functionality as a user's credentials won't be in scope.
|
||||
Affected functionality includes alerting, reporting, and recorded queries.
|
||||
In order to support backend queries when using a data source configured with current user authentication, you can configure service credentials.
|
||||
Also, note that query and resource caching is disabled by default for data sources using current user authentication.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To configure fallback service credentials the [feature toggle](ref:configure-grafana-feature-toggles) `idForwarding` must be set to `true` and `user_identity_fallback_credentials_enabled` must be enabled in the [Azure configuration section](ref:configure-grafana-azure) (enabled by default when `user_identity_enabled` is set to `true`).
|
||||
{{< /admonition >}}
|
||||
|
||||
Permissions for fallback credentials may need to be broad to appropriately support backend functionality.
|
||||
For example, an alerting query created by a user is dependent on their permissions.
|
||||
If a user tries to create an alert for a resource that the fallback credentials can't access, the alert will fail.
|
||||
|
||||
**To enable current user authentication for Grafana:**
|
||||
|
||||
1. Set the `user_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
|
||||
By default this will also enable fallback service credentials.
|
||||
If you want to disable service credentials at the instance level set `user_identity_fallback_credentials_enabled` to false.
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
user_identity_enabled = true
|
||||
```
|
||||
|
||||
1. In the Azure Monitor data source configuration, set **Authentication** to **Current User**.
|
||||
If fallback service credentials are enabled at the instance level, an additional configuration section is visible that you can use to enable or disable using service credentials for this data source.
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-current-user.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Current User authentication" >}}
|
||||
|
||||
1. If you want backend functionality to work with this data source, enable service credentials and configure the data source using the most applicable credentials for your circumstances.
|
||||
|
||||
## Query the data source
|
||||
|
||||
The Azure Monitor data source can query data from Azure Monitor Metrics and Logs, the Azure Resource Graph, and Application Insights Traces. Each source has its own specialized query editor.
|
||||
|
||||
For details, see the [query editor documentation](query-editor/).
|
||||
|
||||
## Use template variables
|
||||
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
|
||||
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For details, see the [template variables documentation](template-variables/).
|
||||
|
||||
## Application Insights and Insights Analytics (removed)
|
||||
|
||||
Until Grafana v8.0, you could query the same Azure Application Insights data using Application Insights and Insights Analytics.
|
||||
|
||||
These queries were deprecated in Grafana v7.5. In Grafana v8.0, Application Insights and Insights Analytics were made read-only in favor of querying this data through Metrics and Logs. These query methods were completely removed in Grafana v9.0.
|
||||
|
||||
If you're upgrading from a Grafana version prior to v9.0 and relied on Application Insights and Analytics queries, refer to the [Grafana v9.0 documentation](/docs/grafana/v9.0/datasources/azuremonitor/deprecated-application-insights/) for help migrating these queries to Metrics and Logs queries.
|
||||
- [Azure Monitor documentation](https://docs.microsoft.com/en-us/azure/azure-monitor/)
|
||||
- [Kusto Query Language (KQL) reference](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/)
|
||||
- [Grafana community forum](https://community.grafana.com/)
|
||||
|
||||
@@ -0,0 +1,262 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/alerting/
|
||||
description: Set up alerts using Azure Monitor data in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- azure
|
||||
- monitor
|
||||
- alerting
|
||||
- alerts
|
||||
- metrics
|
||||
- logs
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Alerting
|
||||
title: Azure Monitor alerting
|
||||
weight: 500
|
||||
refs:
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
alerting-fundamentals:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/
|
||||
create-alert-rule:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
grafana-managed-recording-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
troubleshoot:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
---
|
||||
|
||||
# Azure Monitor alerting
|
||||
|
||||
The Azure Monitor data source supports [Grafana Alerting](ref:alerting) and [Grafana-managed recording rules](ref:grafana-managed-recording-rules), allowing you to create alert rules based on Azure metrics, logs, traces, and resource data. You can monitor your Azure environment and receive notifications when specific conditions are met.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have the appropriate permissions to create alert rules in Grafana.
|
||||
- Verify your Azure Monitor data source is configured and working correctly.
|
||||
- Familiarize yourself with [Grafana Alerting concepts](ref:alerting-fundamentals).
|
||||
- **Important**: Verify your data source uses a supported authentication method. Refer to [Authentication requirements](#authentication-requirements).
|
||||
|
||||
## Supported query types for alerting
|
||||
|
||||
All Azure Monitor query types support alerting and recording rules:
|
||||
|
||||
| Query type | Use case | Notes |
|
||||
| -------------------- | -------------------------------------------------- | -------------------------------------------------------- |
|
||||
| Metrics | Threshold-based alerts on Azure resource metrics | Best suited for alerting; returns time-series data |
|
||||
| Logs | Alert on log patterns, error counts, or thresholds | Use KQL to aggregate data into numeric values |
|
||||
| Azure Resource Graph | Alert on resource state or configuration changes | Use count aggregations to return numeric data |
|
||||
| Traces | Alert on trace data and application performance | Use aggregations to return numeric values for evaluation |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Alert queries must return numeric data that Grafana can evaluate against a threshold. Queries that return only text or non-numeric data cannot be used directly for alerting.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Authentication requirements
|
||||
|
||||
Alerting and recording rules run as background processes without a user context. This means they require service-level authentication and don't work with all authentication methods.
|
||||
|
||||
| Authentication method | Supported |
|
||||
| -------------------------------- | ------------------------------------- |
|
||||
| App Registration (client secret) | ✓ |
|
||||
| Managed Identity | ✓ |
|
||||
| Workload Identity | ✓ |
|
||||
| Current User | ✓ (with fallback service credentials) |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
If you use **Current User** authentication, you must configure **fallback service credentials** for alerting and recording rules to function. User credentials aren't available for background operations, so Grafana uses the fallback credentials instead. Refer to [configure the data source](ref:configure-azure-monitor) for details on setting up fallback credentials.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Create an alert rule
|
||||
|
||||
To create an alert rule using Azure Monitor data:
|
||||
|
||||
1. Go to **Alerting** > **Alert rules**.
|
||||
1. Click **New alert rule**.
|
||||
1. Enter a name for your alert rule.
|
||||
1. In the **Define query and alert condition** section:
|
||||
- Select your Azure Monitor data source.
|
||||
- Configure your query (for example, a Metrics query for CPU usage or a Logs query using KQL).
|
||||
- Add a **Reduce** expression if your query returns multiple series.
|
||||
- Add a **Threshold** expression to define the alert condition.
|
||||
1. Configure the **Set evaluation behavior**:
|
||||
- Select or create a folder and evaluation group.
|
||||
- Set the evaluation interval (how often the alert is checked).
|
||||
- Set the pending period (how long the condition must be true before firing).
|
||||
1. Add labels and annotations to provide context for notifications.
|
||||
1. Click **Save rule**.
|
||||
|
||||
For detailed instructions, refer to [Create a Grafana-managed alert rule](ref:create-alert-rule).
|
||||
|
||||
## Example: VM CPU usage alert
|
||||
|
||||
This example creates an alert that fires when virtual machine CPU usage exceeds 80%:
|
||||
|
||||
1. Create a new alert rule.
|
||||
1. Configure the query:
|
||||
- **Service**: Metrics
|
||||
- **Resource**: Select your virtual machine
|
||||
- **Metric namespace**: `Microsoft.Compute/virtualMachines`
|
||||
- **Metric**: `Percentage CPU`
|
||||
- **Aggregation**: `Average`
|
||||
1. Add expressions:
|
||||
- **Reduce**: Last (to get the most recent data point)
|
||||
- **Threshold**: Is above 80
|
||||
1. Set evaluation to run every 1 minute with a 5-minute pending period.
|
||||
1. Save the rule.
|
||||
|
||||
## Example: Error log count alert
|
||||
|
||||
This example alerts when error logs exceed a threshold using a KQL query:
|
||||
|
||||
1. Create a new alert rule.
|
||||
1. Configure the query:
|
||||
- **Service**: Logs
|
||||
- **Resource**: Select your Log Analytics workspace
|
||||
- **Query**:
|
||||
```kusto
|
||||
AppExceptions
|
||||
| where TimeGenerated > ago(5m)
|
||||
| summarize ErrorCount = count() by bin(TimeGenerated, 1m)
|
||||
```
|
||||
1. Add expressions:
|
||||
- **Reduce**: Max (to get the highest count in the period)
|
||||
- **Threshold**: Is above 10
|
||||
1. Set evaluation to run every 5 minutes.
|
||||
1. Save the rule.
|
||||
|
||||
## Example: Resource count alert
|
||||
|
||||
This example alerts when the number of running virtual machines drops below a threshold using Azure Resource Graph:
|
||||
|
||||
1. Create a new alert rule.
|
||||
1. Configure the query:
|
||||
- **Service**: Azure Resource Graph
|
||||
- **Subscriptions**: Select your subscriptions
|
||||
- **Query**:
|
||||
|
||||
```kusto
|
||||
resources
|
||||
| where type == "microsoft.compute/virtualmachines"
|
||||
| where properties.extended.instanceView.powerState.displayStatus == "VM running"
|
||||
| summarize RunningVMs = count()
|
||||
```
|
||||
|
||||
1. Add expressions:
|
||||
- **Reduce**: Last
|
||||
- **Threshold**: Is below 3
|
||||
1. Set evaluation to run every 5 minutes.
|
||||
1. Save the rule.
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these recommendations to create reliable and efficient alerts with Azure Monitor data.
|
||||
|
||||
### Use appropriate query intervals
|
||||
|
||||
- Set the alert evaluation interval to be greater than or equal to the minimum data resolution from Azure Monitor.
|
||||
- Azure Monitor Metrics typically have 1-minute granularity at minimum.
|
||||
- Avoid very short intervals (less than 1 minute) as they may cause evaluation timeouts or miss data points.
|
||||
|
||||
### Reduce multiple series
|
||||
|
||||
When your Azure Monitor query returns multiple time series (for example, CPU usage across multiple VMs), use the **Reduce** expression to aggregate them:
|
||||
|
||||
- **Last**: Use the most recent value
|
||||
- **Mean**: Average across all series
|
||||
- **Max/Min**: Use the highest or lowest value
|
||||
- **Sum**: Total across all series
|
||||
|
||||
### Optimize Log Analytics queries
|
||||
|
||||
For Logs queries used in alerting:
|
||||
|
||||
- Use `summarize` to aggregate data into numeric values.
|
||||
- Include appropriate time filters using `ago()` or `TimeGenerated`.
|
||||
- Avoid returning large result sets; aggregate data in the query.
|
||||
- Test queries in Explore before using them in alert rules.
|
||||
|
||||
### Handle no data conditions
|
||||
|
||||
Configure what happens when no data is returned:
|
||||
|
||||
1. In the alert rule, find **Configure no data and error handling**.
|
||||
1. Choose an appropriate action:
|
||||
- **No Data**: Keep the alert in its current state
|
||||
- **Alerting**: Treat no data as an alert condition
|
||||
- **OK**: Treat no data as a healthy state
|
||||
|
||||
### Test queries before alerting
|
||||
|
||||
Always verify your query returns expected data before creating an alert:
|
||||
|
||||
1. Go to **Explore**.
|
||||
1. Select your Azure Monitor data source.
|
||||
1. Run the query you plan to use for alerting.
|
||||
1. Confirm the data format and values are correct.
|
||||
1. Verify the query returns numeric data suitable for threshold evaluation.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If your Azure Monitor alerts aren't working as expected, use the following sections to diagnose and resolve common issues.
|
||||
|
||||
### Alerts not firing
|
||||
|
||||
- Verify the data source uses a supported authentication method. If using Current User authentication, ensure fallback service credentials are configured.
|
||||
- Check that the query returns numeric data in Explore.
|
||||
- Ensure the evaluation interval allows enough time for data to be available.
|
||||
- Review the alert rule's health and any error messages in the Alerting UI.
|
||||
|
||||
### Authentication errors in alert evaluation
|
||||
|
||||
If you see authentication errors when alerts evaluate:
|
||||
|
||||
- Confirm the data source is configured with App Registration, Managed Identity, Workload Identity, or Current User with fallback service credentials.
|
||||
- If using App Registration, verify the client secret hasn't expired.
|
||||
- If using Current User, verify that fallback service credentials are configured and valid.
|
||||
- Check that the service principal has appropriate permissions on Azure resources.
|
||||
|
||||
### Query timeout errors
|
||||
|
||||
- Simplify complex KQL queries.
|
||||
- Reduce the time range in Log Analytics queries.
|
||||
- Add more specific filters to narrow result sets.
|
||||
|
||||
For additional troubleshooting help, refer to [Troubleshoot Azure Monitor](ref:troubleshoot).
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Grafana Alerting documentation](ref:alerting)
|
||||
- [Create alert rules](ref:create-alert-rule)
|
||||
- [Azure Monitor query editor](ref:query-editor)
|
||||
- [Grafana-managed recording rules](ref:grafana-managed-recording-rules)
|
||||
@@ -0,0 +1,218 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/annotations/
|
||||
description: Use annotations with the Azure Monitor data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- azure
|
||||
- monitor
|
||||
- annotations
|
||||
- events
|
||||
- logs
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Annotations
|
||||
title: Azure Monitor annotations
|
||||
weight: 450
|
||||
refs:
|
||||
annotate-visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
---
|
||||
|
||||
# Azure Monitor annotations
|
||||
|
||||
[Annotations](ref:annotate-visualizations) overlay rich event information on top of graphs. You can use Azure Monitor Log Analytics queries to create annotations that mark important events, deployments, alerts, or other significant occurrences on your dashboards.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have configured the Azure Monitor data source.
|
||||
- You need access to a Log Analytics workspace containing the data you want to use for annotations.
|
||||
- Annotations use Log Analytics (KQL) queries only. Metrics, Traces, and Azure Resource Graph queries are not supported for annotations.
|
||||
|
||||
## Create an annotation query
|
||||
|
||||
To add an Azure Monitor annotation to a dashboard:
|
||||
|
||||
1. Open the dashboard where you want to add annotations.
|
||||
1. Click **Dashboard settings** (gear icon) in the top navigation.
|
||||
1. Select **Annotations** in the left menu.
|
||||
1. Click **Add annotation query**.
|
||||
1. Enter a **Name** for the annotation (e.g., "Azure Activity", "Deployments").
|
||||
1. Select your **Azure Monitor** data source.
|
||||
1. Choose the **Logs** service.
|
||||
1. Select a **Resource** (Log Analytics workspace or Application Insights resource).
|
||||
1. Write a KQL query that returns the annotation data.
|
||||
1. Click **Apply** to save.
|
||||
|
||||
## Query requirements
|
||||
|
||||
Your KQL query should return columns that Grafana can use to create annotations:
|
||||
|
||||
| Column | Required | Description |
|
||||
| ------------------ | ----------- | ------------------------------------------------------------------------------------------------ |
|
||||
| `TimeGenerated` | Yes | The timestamp for the annotation. Grafana uses this to position the annotation on the time axis. |
|
||||
| `Text` | Recommended | The annotation text displayed when you hover over or click the annotation. |
|
||||
| Additional columns | Optional | Any other columns returned become annotation tags. |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Always include a time filter in your query to limit results to the dashboard's time range. Use the `$__timeFilter()` macro.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Annotation query examples
|
||||
|
||||
The following examples demonstrate common annotation use cases.
|
||||
|
||||
### Azure Activity Log events
|
||||
|
||||
Display Azure Activity Log events such as resource modifications, deployments, and administrative actions:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where Level == "Error" or Level == "Warning" or CategoryValue == "Administrative"
|
||||
| project TimeGenerated, Text=OperationNameValue, Level, ResourceGroup, Caller
|
||||
| order by TimeGenerated desc
|
||||
| take 100
|
||||
```
|
||||
|
||||
### Deployment events
|
||||
|
||||
Show deployment-related activity:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where OperationNameValue contains "deployments"
|
||||
| project TimeGenerated, Text=strcat("Deployment: ", OperationNameValue), Status=ActivityStatusValue, ResourceGroup
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### Application Insights exceptions
|
||||
|
||||
Mark application exceptions as annotations:
|
||||
|
||||
```kusto
|
||||
AppExceptions
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| project TimeGenerated, Text=strcat(ProblemId, ": ", OuterMessage), SeverityLevel, AppRoleName
|
||||
| order by TimeGenerated desc
|
||||
| take 50
|
||||
```
|
||||
|
||||
### Custom events from Application Insights
|
||||
|
||||
Display custom events logged by your application:
|
||||
|
||||
```kusto
|
||||
AppEvents
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where Name == "DeploymentStarted" or Name == "DeploymentCompleted"
|
||||
| project TimeGenerated, Text=Name, AppRoleName
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### Security alerts
|
||||
|
||||
Show security-related alerts:
|
||||
|
||||
```kusto
|
||||
SecurityAlert
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| project TimeGenerated, Text=AlertName, Severity=AlertSeverity, Description
|
||||
| order by TimeGenerated desc
|
||||
| take 50
|
||||
```
|
||||
|
||||
### Resource health events
|
||||
|
||||
Display resource health status changes:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where CategoryValue == "ResourceHealth"
|
||||
| project TimeGenerated, Text=OperationNameValue, Status=ActivityStatusValue, ResourceId
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### VM start and stop events
|
||||
|
||||
Mark virtual machine state changes:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where OperationNameValue has_any ("start", "deallocate", "restart")
|
||||
| where ResourceProviderValue == "MICROSOFT.COMPUTE"
|
||||
| project TimeGenerated, Text=OperationNameValue, VM=Resource, Status=ActivityStatusValue
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### Autoscale events
|
||||
|
||||
Show autoscale operations:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where OperationNameValue contains "autoscale"
|
||||
| project TimeGenerated, Text=strcat("Autoscale: ", OperationNameValue), Status=ActivityStatusValue, ResourceGroup
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
## Customize annotation appearance
|
||||
|
||||
After creating an annotation query, you can customize its appearance:
|
||||
|
||||
| Setting | Description |
|
||||
| ------------- | -------------------------------------------------------------------------------------------------------- |
|
||||
| **Color** | Choose a color for the annotation markers. Use different colors to distinguish between annotation types. |
|
||||
| **Show in** | Select which panels display the annotations. |
|
||||
| **Filter by** | Add filters to limit when annotations appear. |
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these recommendations when creating annotations:
|
||||
|
||||
1. **Limit results**: Always use `take` or `limit` to restrict the number of annotations. Too many annotations can clutter your dashboard and impact performance.
|
||||
|
||||
2. **Use time filters**: Include `$__timeFilter()` to ensure queries only return data within the dashboard's time range.
|
||||
|
||||
3. **Create meaningful text**: Use `strcat()` or `project` to create descriptive annotation text that provides context at a glance.
|
||||
|
||||
4. **Add relevant tags**: Include columns like `ResourceGroup`, `Severity`, or `Status` that become clickable tags for filtering.
|
||||
|
||||
5. **Use descriptive names**: Name your annotations clearly (e.g., "Production Deployments", "Critical Alerts") so dashboard users understand what they represent.
|
||||
|
||||
## Troubleshoot annotations
|
||||
|
||||
If annotations aren't appearing as expected, try the following solutions.
|
||||
|
||||
### Annotations don't appear
|
||||
|
||||
- Verify the query returns data in the selected time range.
|
||||
- Check that the query includes a `TimeGenerated` column.
|
||||
- Test the query in the Azure Portal Log Analytics query editor.
|
||||
- Ensure the annotation is enabled (toggle is on).
|
||||
|
||||
### Too many annotations
|
||||
|
||||
- Add more specific filters to your query.
|
||||
- Use `take` to limit results.
|
||||
- Narrow the time range.
|
||||
|
||||
### Annotations appear at wrong times
|
||||
|
||||
- Verify the `TimeGenerated` column contains the correct timestamp.
|
||||
- Check your dashboard's timezone settings.
|
||||
@@ -0,0 +1,605 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/configure/
|
||||
description: Guide for configuring the Azure Monitor data source in Grafana.
|
||||
keywords:
|
||||
- grafana
|
||||
- microsoft
|
||||
- azure
|
||||
- monitor
|
||||
- application
|
||||
- insights
|
||||
- log
|
||||
- analytics
|
||||
- guide
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Configure
|
||||
title: Configure the Azure Monitor data source
|
||||
weight: 200
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
configure-grafana-feature-toggles:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#feature_toggles
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#feature_toggles
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
configure-grafana-azure-auth:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/
|
||||
build-dashboards:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
configure-grafana-azure:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
|
||||
data-source-management:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
configure-grafana-azure-auth-scopes:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
configure-pdc:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
---
|
||||
|
||||
# Configure the Azure Monitor data source
|
||||
|
||||
This document explains how to configure the Azure Monitor data source and the available configuration options.
|
||||
For general information about data sources, refer to [Grafana data sources](ref:data-sources) and [Data source management](ref:data-source-management).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before configuring the Azure Monitor data source, ensure you have the following:
|
||||
|
||||
- **Grafana permissions:** You must have the `Organization administrator` role to configure data sources.
|
||||
Organization administrators can also [configure the data source via YAML](#provision-the-data-source) with the Grafana provisioning system or [using Terraform](#configure-with-terraform).
|
||||
|
||||
- **Azure prerequisites:** Depending on your chosen authentication method, you may need:
|
||||
- A Microsoft Entra ID (formerly Azure AD) app registration with a service principal (for App Registration authentication)
|
||||
- A Managed Identity enabled on your Azure VM or App Service (for Managed Identity authentication)
|
||||
- Workload identity configured in your Kubernetes cluster (for Workload Identity authentication)
|
||||
- Microsoft Entra ID authentication configured for Grafana login (for Current User authentication)
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
**Grafana Cloud users:** Managed Identity and Workload Identity authentication methods are not available in Grafana Cloud because they require Grafana to run on your Azure infrastructure. Use **App Registration** authentication instead.
|
||||
{{< /admonition >}}
|
||||
|
||||
- **Azure RBAC permissions:** The identity used to authenticate must have the `Reader` role on the Azure subscription containing the resources you want to monitor.
|
||||
For Log Analytics queries, the identity also needs appropriate permissions on the Log Analytics workspaces to be queried.
|
||||
Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The Azure Monitor data source plugin is built into Grafana. No additional installation is required.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Add the data source
|
||||
|
||||
To add the Azure Monitor data source:
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Click **Add new connection**.
|
||||
1. Type `Azure Monitor` in the search bar.
|
||||
1. Select **Azure Monitor**.
|
||||
1. Click **Add new data source** in the upper right.
|
||||
|
||||
You're taken to the **Settings** tab where you can configure the data source.
|
||||
|
||||
## Choose an authentication method
|
||||
|
||||
The Azure Monitor data source supports four authentication methods. Choose based on where Grafana is hosted and your security requirements:
|
||||
|
||||
| Authentication method | Best for | Requirements |
|
||||
| --------------------- | ------------------------------------------ | -------------------------------------------------------------- |
|
||||
| **App Registration** | Any Grafana deployment | Microsoft Entra ID app registration with client secret |
|
||||
| **Managed Identity** | Grafana hosted in Azure (VMs, App Service) | Managed identity enabled on the Azure resource |
|
||||
| **Workload Identity** | Grafana in Kubernetes (AKS) | Workload identity federation configured |
|
||||
| **Current User** | User-level access control | Microsoft Entra ID authentication configured for Grafana login |
|
||||
|
||||
## Configure authentication
|
||||
|
||||
Select one of the following authentication methods and complete the configuration.
|
||||
|
||||
### App Registration
|
||||
|
||||
Use a Microsoft Entra ID app registration (service principal) to authenticate. This method works with any Grafana deployment.
|
||||
|
||||
#### App Registration prerequisites
|
||||
|
||||
1. Create an app registration in Microsoft Entra ID.
|
||||
Refer to the [Azure documentation for creating a service principal](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in).
|
||||
|
||||
1. Create a client secret for the app registration.
|
||||
Refer to the [Azure documentation for creating a client secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).
|
||||
|
||||
1. Assign the `Reader` role to the app registration on the subscription or resources you want to monitor.
|
||||
Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
|
||||
|
||||
#### App Registration UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| **Authentication** | Select **App Registration**. |
|
||||
| **Azure Cloud** | The Azure environment to connect to. Select **Azure** for the public cloud, or choose Azure Government or Azure China for national clouds. |
|
||||
| **Directory (tenant) ID** | The GUID that identifies your Microsoft Entra ID tenant. |
|
||||
| **Application (client) ID** | The GUID for the app registration you created. |
|
||||
| **Client secret** | The secret key for the app registration. Keep this secure and rotate periodically. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
|
||||
#### Provision App Registration with YAML
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: clientsecret
|
||||
cloudName: azuremonitor # See supported cloud names below
|
||||
tenantId: <tenant-id>
|
||||
clientId: <client-id>
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
secureJsonData:
|
||||
clientSecret: <client-secret>
|
||||
version: 1
|
||||
```
|
||||
|
||||
### Managed Identity
|
||||
|
||||
Use Azure Managed Identity for secure, credential-free authentication when Grafana is hosted in Azure.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Managed Identity is available in [Azure Managed Grafana](https://azure.microsoft.com/en-us/products/managed-grafana) or self-hosted Grafana deployed in Azure. It is not available in Grafana Cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Managed Identity prerequisites
|
||||
|
||||
- Grafana must be hosted in Azure (App Service, Azure VMs, or Azure Managed Grafana).
|
||||
- Managed identity must be enabled on the Azure resource hosting Grafana.
|
||||
- The managed identity must have the `Reader` role on the subscription or resources you want to monitor.
|
||||
|
||||
For details on Azure managed identities, refer to the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
|
||||
|
||||
#### Managed Identity Grafana server configuration
|
||||
|
||||
Enable managed identity in the Grafana server configuration:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
```
|
||||
|
||||
To use a user-assigned managed identity instead of the system-assigned identity, also set:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
managed_identity_client_id = <USER_ASSIGNED_IDENTITY_CLIENT_ID>
|
||||
```
|
||||
|
||||
Refer to [Grafana Azure configuration](ref:configure-grafana-azure) for more details.
|
||||
|
||||
#### Managed Identity UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------------ | --------------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Select **Managed Identity**. The directory ID, application ID, and client secret fields are hidden. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-managed-identity-2.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Managed Identity" >}}
|
||||
|
||||
#### Provision Managed Identity with YAML
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: msi
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
### Workload Identity
|
||||
|
||||
Use Azure Workload Identity for secure authentication in Kubernetes environments like AKS.
|
||||
|
||||
#### Workload Identity prerequisites
|
||||
|
||||
- Grafana must be running in a Kubernetes environment with workload identity federation configured.
|
||||
- The workload identity must have the `Reader` role on the subscription or resources you want to monitor.
|
||||
|
||||
For details, refer to the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/).
|
||||
|
||||
#### Workload Identity Grafana server configuration
|
||||
|
||||
Enable workload identity in the Grafana server configuration:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
```
|
||||
|
||||
Optional configuration variables:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
workload_identity_tenant_id = <IDENTITY_TENANT_ID> # Microsoft Entra ID tenant containing the managed identity
|
||||
workload_identity_client_id = <IDENTITY_CLIENT_ID> # Client ID if different from default
|
||||
workload_identity_token_file = <TOKEN_FILE_PATH> # Path to the token file
|
||||
```
|
||||
|
||||
Refer to [Grafana Azure configuration](ref:configure-grafana-azure) and the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/) for more details.
|
||||
|
||||
#### Workload Identity UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------------ | ---------------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Select **Workload Identity**. The directory ID, application ID, and client secret fields are hidden. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-workload-identity.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Workload Identity" >}}
|
||||
|
||||
#### Provision Workload Identity with YAML
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: workloadidentity
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
### Current User
|
||||
|
||||
Forward the logged-in Grafana user's Azure credentials to the data source for user-level access control.
|
||||
|
||||
{{< admonition type="warning" >}}
|
||||
Current User authentication is an [experimental feature](/docs/release-life-cycle/). Engineering and on-call support is not available. Documentation is limited. No SLA is provided. Contact Grafana Support to enable this feature in Grafana Cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Current User prerequisites
|
||||
|
||||
Your Grafana instance must be configured with Microsoft Entra ID authentication. Refer to the [Microsoft Entra ID authentication documentation](ref:configure-grafana-azure-auth).
|
||||
|
||||
#### Configure your Azure App Registration
|
||||
|
||||
The App Registration used for Grafana login requires additional configuration:
|
||||
|
||||
**Enable token issuance:**
|
||||
|
||||
1. In the Azure Portal, open your App Registration.
|
||||
1. Select **Authentication** in the side menu.
|
||||
1. Under **Implicit grant and hybrid flows**, check both **Access tokens** and **ID tokens**.
|
||||
1. Save your changes.
|
||||
|
||||
**Add API permissions:**
|
||||
|
||||
1. In the Azure Portal, open your App Registration.
|
||||
1. Select **API Permissions** in the side menu.
|
||||
1. Ensure these permissions are present under **Microsoft Graph**: `openid`, `profile`, `email`, and `offline_access`.
|
||||
1. Add the following permissions:
|
||||
- **Azure Service Management** > **Delegated permissions** > `user_impersonation`
|
||||
- **APIs my organization uses** > Search for **Log Analytics API** > **Delegated permissions** > `Data.Read`
|
||||
|
||||
Refer to the [Azure documentation](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-configure-app-access-web-apis) for more information.
|
||||
|
||||
**Update Grafana scopes:**
|
||||
|
||||
Update the `scopes` section in your Grafana Azure authentication configuration to include the `.default` scope:
|
||||
|
||||
```
|
||||
.default openid email profile
|
||||
```
|
||||
|
||||
#### Current User Grafana server configuration
|
||||
|
||||
Enable current user authentication in the Grafana server configuration:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
user_identity_enabled = true
|
||||
```
|
||||
|
||||
By default, this also enables fallback service credentials. To disable fallback credentials at the instance level:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
user_identity_enabled = true
|
||||
user_identity_fallback_credentials_enabled = false
|
||||
```
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To use fallback service credentials, the [feature toggle](ref:configure-grafana-feature-toggles) `idForwarding` must be set to `true`.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Limitations and fallback credentials
|
||||
|
||||
Current User authentication doesn't support backend functionality like alerting, reporting, and recorded queries because user credentials aren't available for background operations.
|
||||
|
||||
To support these features, configure **fallback service credentials**. When enabled, Grafana uses the fallback credentials for backend operations. Note that operations using fallback credentials are limited to the permissions of those credentials, not the user's permissions.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Query and resource caching is disabled by default for data sources using Current User authentication.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Current User UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| -------------------------------- | ------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Select **Current User**. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
| **Fallback Service Credentials** | Enable and configure credentials for backend features like alerting. |
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-current-user.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Current User authentication" >}}
|
||||
|
||||
#### Provision Current User with YAML
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The `oauthPassThru` property is required for Current User authentication. The `disableGrafanaCache` property prevents returning cached responses for resources users don't have access to.
|
||||
{{< /admonition >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: currentuser
|
||||
oauthPassThru: true
|
||||
disableGrafanaCache: true
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
## Additional configuration options
|
||||
|
||||
These settings apply to all authentication methods.
|
||||
|
||||
### General settings
|
||||
|
||||
| Setting | Description |
|
||||
| ----------- | ------------------------------------------------------------------------------- |
|
||||
| **Name** | The data source name used in panels and queries. Example: `azure-monitor-prod`. |
|
||||
| **Default** | Toggle to make this the default data source for new panels. |
|
||||
|
||||
### Enable Basic Logs
|
||||
|
||||
Toggle **Enable Basic Logs** to allow queries against [Basic Logs tables](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/basic-logs-query?tabs=portal-1) in supported Log Analytics Workspaces.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Querying Basic Logs tables incurs additional costs on a per-query basis.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Private data source connect (Grafana Cloud only)
|
||||
|
||||
If you're using Grafana Cloud and need to connect to Azure resources in a private network, use Private Data Source Connect (PDC).
|
||||
|
||||
1. Click the **Private data source connect** dropdown to select your PDC configuration.
|
||||
1. Click **Manage private data source connect** to view your PDC connection details.
|
||||
|
||||
For more information, refer to [Private data source connect](ref:private-data-source-connect) and [Configure PDC](ref:configure-pdc).
|
||||
|
||||
## Supported cloud names
|
||||
|
||||
When provisioning the data source, use the following `cloudName` values:
|
||||
|
||||
| Azure Cloud | `cloudName` value |
|
||||
| -------------------------------- | ------------------------ |
|
||||
| Microsoft Azure public cloud | `azuremonitor` (default) |
|
||||
| Microsoft Chinese national cloud | `chinaazuremonitor` |
|
||||
| US Government cloud | `govazuremonitor` |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
For Current User authentication, the cloud names differ: use `AzureCloud` for public cloud, `AzureChinaCloud` for the Chinese national cloud, and `AzureUSGovernment` for the US Government cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Verify the connection
|
||||
|
||||
After configuring the data source, click **Save & test**. A successful connection displays a message confirming that the credentials are valid and have access to the configured default subscription.
|
||||
|
||||
If the test fails, verify:
|
||||
|
||||
- Your credentials are correct (tenant ID, client ID, client secret)
|
||||
- The identity has the required Azure RBAC permissions
|
||||
- For Managed Identity or Workload Identity, that the Grafana server configuration is correct
|
||||
- Network connectivity to Azure endpoints
|
||||
|
||||
## Provision the data source
|
||||
|
||||
You can define and configure the Azure Monitor data source in YAML files as part of the Grafana provisioning system.
|
||||
For more information about provisioning, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
|
||||
### Provision quick reference
|
||||
|
||||
| Authentication method | `azureAuthType` value | Required fields |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- |
|
||||
| App Registration | `clientsecret` | `tenantId`, `clientId`, `clientSecret` |
|
||||
| Managed Identity | `msi` | None (uses VM identity) |
|
||||
| Workload Identity | `workloadidentity` | None (uses pod identity) |
|
||||
| Current User | `currentuser` | `oauthPassThru: true`, `disableGrafanaCache: true` |
|
||||
|
||||
All methods support the optional `subscriptionId` field to set a default subscription.
|
||||
|
||||
For complete YAML examples, see the [authentication method sections](#configure-authentication) above.
|
||||
|
||||
## Configure with Terraform
|
||||
|
||||
You can configure the Azure Monitor data source using the [Grafana Terraform provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs). This approach enables infrastructure-as-code workflows and version control for your Grafana configuration.
|
||||
|
||||
### Terraform prerequisites
|
||||
|
||||
- [Terraform](https://www.terraform.io/downloads) installed
|
||||
- Grafana Terraform provider configured with appropriate credentials
|
||||
- For Grafana Cloud: A [Cloud Access Policy token](https://grafana.com/docs/grafana-cloud/account-management/authentication-and-permissions/access-policies/) with data source permissions
|
||||
|
||||
### Provider configuration
|
||||
|
||||
Configure the Grafana provider to connect to your Grafana instance:
|
||||
|
||||
```hcl
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
version = ">= 2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# For Grafana Cloud
|
||||
provider "grafana" {
|
||||
url = "<YOUR_GRAFANA_CLOUD_STACK_URL>"
|
||||
auth = "<YOUR_SERVICE_ACCOUNT_TOKEN>"
|
||||
}
|
||||
|
||||
# For self-hosted Grafana
|
||||
# provider "grafana" {
|
||||
# url = "http://localhost:3000"
|
||||
# auth = "<API_KEY_OR_SERVICE_ACCOUNT_TOKEN>"
|
||||
# }
|
||||
```
|
||||
|
||||
### Terraform examples
|
||||
|
||||
The following examples show how to configure the Azure Monitor data source for each authentication method.
|
||||
|
||||
**App Registration (client secret):**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "clientsecret"
|
||||
cloudName = "azuremonitor"
|
||||
tenantId = "<TENANT_ID>"
|
||||
clientId = "<CLIENT_ID>"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
|
||||
secure_json_data_encoded = jsonencode({
|
||||
clientSecret = "<CLIENT_SECRET>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Managed Identity:**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "msi"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Workload Identity:**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "workloadidentity"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Current User:**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "currentuser"
|
||||
oauthPassThru = true
|
||||
disableGrafanaCache = true
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**With Basic Logs enabled:**
|
||||
|
||||
Add `enableBasicLogs = true` to any of the above configurations:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "clientsecret"
|
||||
cloudName = "azuremonitor"
|
||||
tenantId = "<TENANT_ID>"
|
||||
clientId = "<CLIENT_ID>"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
enableBasicLogs = true
|
||||
})
|
||||
|
||||
secure_json_data_encoded = jsonencode({
|
||||
clientSecret = "<CLIENT_SECRET>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
For more information about the Grafana Terraform provider, refer to the [provider documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs) and the [grafana_data_source resource](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source).
|
||||
@@ -21,6 +21,7 @@ labels:
|
||||
menuTitle: Query editor
|
||||
title: Azure Monitor query editor
|
||||
weight: 300
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
query-transform-data-query-options:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -32,30 +33,85 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
troubleshoot-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
configure-grafana-feature-toggles:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/
|
||||
template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
alerting-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
annotations-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
---
|
||||
|
||||
# Azure Monitor query editor
|
||||
|
||||
This topic explains querying specific to the Azure Monitor data source.
|
||||
For general documentation on querying data sources in Grafana, see [Query and transform data](ref:query-transform-data).
|
||||
Grafana provides a query editor for the Azure Monitor data source, which is located on the [Explore page](ref:explore). You can also access the Azure Monitor query editor from a dashboard panel. Click the menu in the upper right of the panel and select **Edit**.
|
||||
|
||||
## Choose a query editing mode
|
||||
This document explains querying specific to the Azure Monitor data source.
|
||||
For general documentation on querying data sources in Grafana, refer to [Query and transform data](ref:query-transform-data).
|
||||
|
||||
The Azure Monitor data source's query editor has three modes depending on which Azure service you want to query:
|
||||
The Azure Monitor data source can query data from Azure Monitor Metrics and Logs, the Azure Resource Graph, and Application Insights Traces. Each source has its own specialized query editor.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have [configured the Azure Monitor data source](ref:configure-azure-monitor).
|
||||
- Verify your credentials have appropriate permissions for the resources you want to query.
|
||||
|
||||
## Key concepts
|
||||
|
||||
If you're new to Azure Monitor, here are some key terms used throughout this documentation:
|
||||
|
||||
| Term | Description |
|
||||
| ------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **KQL (Kusto Query Language)** | The query language used for Azure Monitor Logs and Azure Resource Graph. KQL uses a pipe-based syntax similar to Unix commands and is optimized for read-only data exploration. If you know SQL, the [SQL to Kusto cheat sheet](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/sqlcheatsheet) can help you get started. |
|
||||
| **Log Analytics workspace** | An Azure resource that collects and stores log data from your Azure resources, applications, and services. You query this data using KQL. |
|
||||
| **Application Insights** | Azure's application performance monitoring (APM) service. It collects telemetry data like requests, exceptions, and traces from your applications. |
|
||||
| **Metrics vs. Logs** | **Metrics** are lightweight numeric values collected at regular intervals (e.g., CPU percentage). **Logs** are detailed records of events with varying schemas (e.g., request logs, error messages). Metrics use a visual query builder; Logs require KQL. |
|
||||
|
||||
## Choose a query editor mode
|
||||
|
||||
The Azure Monitor data source's query editor has four modes depending on which Azure service you want to query:
|
||||
|
||||
- **Metrics** for [Azure Monitor Metrics](#query-azure-monitor-metrics)
|
||||
- **Logs** for [Azure Monitor Logs](#query-azure-monitor-logs)
|
||||
- [**Azure Resource Graph**](#query-azure-resource-graph)
|
||||
- **Traces** for [Application Insights Traces](#query-application-insights-traces)
|
||||
- **Azure Resource Graph** for [Azure Resource Graph](#query-azure-resource-graph)
|
||||
|
||||
## Query Azure Monitor Metrics
|
||||
|
||||
Azure Monitor Metrics collects numeric data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and you can query them to investigate your resources' health and usage and maximise availability and performance.
|
||||
Azure Monitor Metrics collects numeric data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and you can query them to investigate your resources' health and usage and maximize availability and performance.
|
||||
|
||||
Monitor Metrics use a lightweight format that stores only numeric data in a specific structure and supports near real-time scenarios, making it useful for fast detection of issues.
|
||||
In contrast, Azure Monitor Logs can store a variety of data types, each with their own structure.
|
||||
|
||||
{{< figure src="/static/img/docs/azure-monitor/query-editor-metrics.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Logs Metrics sample query visualizing CPU percentage over time" >}}
|
||||
{{< figure src="/static/img/docs/azure-monitor/query-editor-metrics.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor Metrics sample query visualizing CPU percentage over time" >}}
|
||||
|
||||
### Create a Metrics query
|
||||
|
||||
@@ -85,7 +141,7 @@ Optionally, you can apply further aggregations or filter by dimensions.
|
||||
|
||||
The available options change depending on what is relevant to the selected metric.
|
||||
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
|
||||
### Format legend aliases
|
||||
|
||||
@@ -109,7 +165,7 @@ For example:
|
||||
| `{{ dimensionname }}` | _(Legacy for backward compatibility)_ Replaced with the name of the first dimension. |
|
||||
| `{{ dimensionvalue }}` | _(Legacy for backward compatibility)_ Replaced with the value of the first dimension. |
|
||||
|
||||
### Filter using dimensions
|
||||
### Filter with dimensions
|
||||
|
||||
Some metrics also have dimensions, which associate additional metadata.
|
||||
Dimensions are represented as key-value pairs assigned to each value of a metric.
|
||||
@@ -121,7 +177,7 @@ For more information on multi-dimensional metrics, refer to the [Azure Monitor d
|
||||
|
||||
## Query Azure Monitor Logs
|
||||
|
||||
Azure Monitor Logs collects and organises log and performance data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and makes many sources of data available to query together with the [Kusto Query Language (KQL)](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/).
|
||||
Azure Monitor Logs collects and organizes log and performance data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and makes many sources of data available to query together with the [Kusto Query Language (KQL)](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/).
|
||||
|
||||
While Azure Monitor Metrics stores only simplified numerical data, Logs can store different data types, each with their own structure.
|
||||
You can also perform complex analysis of Logs data by using KQL.
|
||||
@@ -130,6 +186,32 @@ The Azure Monitor data source also supports querying of [Basic Logs](https://lea
|
||||
|
||||
{{< figure src="/static/img/docs/azure-monitor/query-editor-logs.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor Logs sample query comparing successful requests to failed requests" >}}
|
||||
|
||||
### Logs query builder (public preview)
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The Logs query builder is a [public preview feature](/docs/release-life-cycle/). It may not be enabled in all Grafana environments.
|
||||
{{< /admonition >}}
|
||||
|
||||
The Logs query builder provides a visual interface for building Azure Monitor Logs queries without writing KQL. This is helpful if you're new to KQL or want to quickly build simple queries.
|
||||
|
||||
**To enable the Logs query builder:**
|
||||
|
||||
1. Enable the `azureMonitorLogsBuilderEditor` [feature toggle](ref:configure-grafana-feature-toggles) in your Grafana configuration.
|
||||
1. Restart Grafana for the change to take effect.
|
||||
|
||||
**To switch between Builder and Code modes:**
|
||||
|
||||
When the feature is enabled, a **Builder / Code** toggle appears in the Logs query editor:
|
||||
|
||||
- **Builder**: Use the visual interface to select tables, columns, filters, and aggregations. The builder generates the KQL query for you.
|
||||
- **Code**: Write KQL queries directly. Use this mode for complex queries that require full KQL capabilities.
|
||||
|
||||
New queries default to Builder mode. Existing queries that were created with raw KQL remain in Code mode.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
You can switch from Builder to Code mode at any time to view or edit the generated KQL. However, switching from Code to Builder mode may not preserve complex queries that can't be represented in the builder interface.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Create a Logs query
|
||||
|
||||
**To create a Logs query:**
|
||||
@@ -140,13 +222,13 @@ The Azure Monitor data source also supports querying of [Basic Logs](https://lea
|
||||
|
||||
Alternatively, you can dynamically query all resources under a single resource group or subscription.
|
||||
{{< admonition type="note" >}}
|
||||
If a timespan is specified in the query, the overlap of the timespan between the query and the dashboard will be used as the query timespan. See the [API documentation for
|
||||
If a time span is specified in the query, the overlap between the query time span and the dashboard time range will be used. See the [API documentation for
|
||||
details.](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters)
|
||||
{{< /admonition >}}
|
||||
|
||||
1. Enter your KQL query.
|
||||
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
|
||||
**To create a Basic Logs query:**
|
||||
|
||||
@@ -161,7 +243,7 @@ You can also augment queries by using [template variables](../template-variables
|
||||
{{< /admonition >}}
|
||||
1. Enter your KQL query.
|
||||
|
||||
You can also augment queries by using [template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/).
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
|
||||
### Logs query examples
|
||||
|
||||
@@ -174,24 +256,28 @@ The Azure documentation includes resources to help you learn KQL:
|
||||
- [Tutorial: Use Kusto queries in Azure Monitor](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/tutorial?pivots=azuremonitor)
|
||||
- [SQL to Kusto cheat sheet](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/sqlcheatsheet)
|
||||
|
||||
> **Time-range:** The time-range that will be used for the query can be modified via the time-range switch. Selecting `Query` will only make use of time-ranges specified within the query.
|
||||
> Specifying `Dashboard` will only make use of the Grafana time-range.
|
||||
> If there are no time-ranges specified within the query, the default Log Analytics time-range will apply.
|
||||
> For more details on this change, refer to the [Azure Monitor Logs API documentation](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters).
|
||||
> If the `Intersection` option was previously chosen it will be migrated by default to `Dashboard`.
|
||||
{{< admonition type="note" >}}
|
||||
**Time-range:** The time-range used for the query can be modified via the time-range switch:
|
||||
|
||||
This example query returns a virtual machine's CPU performance, averaged over 5ms time grains:
|
||||
- Selecting **Query** uses only time-ranges specified within the query.
|
||||
- Selecting **Dashboard** uses only the Grafana dashboard time-range.
|
||||
- If no time-range is specified in the query, the default Log Analytics time-range applies.
|
||||
|
||||
For more details, refer to the [Azure Monitor Logs API documentation](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters). If you previously used the `Intersection` option, it has been migrated to `Dashboard`.
|
||||
{{< /admonition >}}
|
||||
|
||||
This example query returns a virtual machine's CPU performance, averaged over 5-minute time grains:
|
||||
|
||||
```kusto
|
||||
Perf
|
||||
# $__timeFilter is a special Grafana macro that filters the results to the time span of the dashboard
|
||||
// $__timeFilter is a special Grafana macro that filters the results to the time span of the dashboard
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where CounterName == "% Processor Time"
|
||||
| summarize avg(CounterValue) by bin(TimeGenerated, 5m), Computer
|
||||
| order by TimeGenerated asc
|
||||
```
|
||||
|
||||
Use time series queries for values that change over time, usually for graph visualisations such as the Time series panel.
|
||||
Use time series queries for values that change over time, usually for graph visualizations such as the Time series panel.
|
||||
Each query should return at least a datetime column and numeric value column.
|
||||
The result must also be sorted in ascending order by the datetime column.
|
||||
|
||||
@@ -357,21 +443,33 @@ Application Insights stores trace data in an underlying Log Analytics workspace
|
||||
This query type only supports Application Insights resources.
|
||||
{{< /admonition >}}
|
||||
|
||||
Running a query of this kind will return all trace data within the timespan specified by the panel/dashboard.
|
||||
1. (Optional) Specify an **Operation ID** value to filter traces.
|
||||
1. (Optional) Specify **event types** to filter by.
|
||||
1. (Optional) Specify **event properties** to filter by.
|
||||
1. (Optional) Change the **Result format** to switch between tabular format and trace format.
|
||||
|
||||
Optionally, you can apply further filtering or select a specific Operation ID to query. The result format can also be switched between a tabular format or the trace format which will return the data in a format that can be used with the Trace visualization.
|
||||
{{< admonition type="note" >}}
|
||||
Selecting the trace format filters events to only the `trace` type. Use this format with the Trace visualization.
|
||||
{{< /admonition >}}
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Selecting the trace format will filter events with the `trace` type.
|
||||
{{< /admonition >}}
|
||||
Running a query returns all trace data within the time span specified by the panel or dashboard time range.
|
||||
|
||||
1. Specify an Operation ID value.
|
||||
1. Specify event types to filter by.
|
||||
1. Specify event properties to filter by.
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
## Use queries for alerting and recording rules
|
||||
|
||||
## Working with large Azure resource data sets
|
||||
All Azure Monitor query types (Metrics, Logs, Azure Resource Graph, and Traces) can be used with Grafana Alerting and recording rules.
|
||||
|
||||
For detailed information about creating alert rules, supported query types, authentication requirements, and examples, refer to [Azure Monitor alerting](ref:alerting-azure-monitor).
|
||||
|
||||
## Work with large Azure resource datasets
|
||||
|
||||
If a request exceeds the [maximum allowed value of records](https://docs.microsoft.com/en-us/azure/governance/resource-graph/concepts/work-with-data#paging-results), the result is paginated and only the first page of results are returned.
|
||||
You can use filters to reduce the amount of records returned under that value.
|
||||
|
||||
## Next steps
|
||||
|
||||
- [Use template variables](../template-variables/) to create dynamic, reusable dashboards
|
||||
- [Add annotations](ref:annotations-azure-monitor) to overlay events on your graphs
|
||||
- [Set up alerting](ref:alerting-azure-monitor) to create alert rules based on Azure Monitor data
|
||||
- [Troubleshoot](ref:troubleshoot-azure-monitor) common query and configuration issues
|
||||
|
||||
@@ -23,6 +23,7 @@ labels:
|
||||
menuTitle: Template variables
|
||||
title: Azure Monitor template variables
|
||||
weight: 400
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
variables:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -34,6 +35,11 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
---
|
||||
|
||||
# Azure Monitor template variables
|
||||
@@ -42,58 +48,173 @@ Instead of hard-coding details such as resource group or resource name values in
|
||||
This helps you create more interactive, dynamic, and reusable dashboards.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables) documentation.
|
||||
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables).
|
||||
|
||||
## Use query variables
|
||||
## Before you begin
|
||||
|
||||
You can specify these Azure Monitor data source queries in the Variable edit view's **Query Type** field.
|
||||
- Ensure you have [configured the Azure Monitor data source](ref:configure-azure-monitor).
|
||||
- If you want template variables to auto-populate subscriptions, set a **Default Subscription** in the data source configuration.
|
||||
|
||||
| Name | Description |
|
||||
| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Subscriptions** | Returns subscriptions. |
|
||||
| **Resource Groups** | Returns resource groups for a specified subscription. Supports multi-value. |
|
||||
| **Namespaces** | Returns metric namespaces for the specified subscription. If a resource group is provided, only the namespaces within that group are returned. |
|
||||
| **Regions** | Returns regions for the specified subscription |
|
||||
| **Resource Names** | Returns a list of resource names for a specified subscription, resource group and namespace. Supports multi-value. |
|
||||
| **Metric Names** | Returns a list of metric names for a resource. |
|
||||
| **Workspaces** | Returns a list of workspaces for the specified subscription. |
|
||||
| **Logs** | Use a KQL query to return values. |
|
||||
| **Custom Namespaces** | Returns metric namespaces for the specified resource. |
|
||||
| **Custom Metric Names** | Returns a list of custom metric names for the specified resource. |
|
||||
## Create a template variable
|
||||
|
||||
To create a template variable for Azure Monitor:
|
||||
|
||||
1. Open the dashboard where you want to add the variable.
|
||||
1. Click **Dashboard settings** (gear icon) in the top navigation.
|
||||
1. Select **Variables** in the left menu.
|
||||
1. Click **Add variable**.
|
||||
1. Enter a **Name** for your variable (e.g., `subscription`, `resourceGroup`, `resource`).
|
||||
1. In the **Type** dropdown, select **Query**.
|
||||
1. In the **Data source** dropdown, select your Azure Monitor data source.
|
||||
1. In the **Query Type** dropdown, select the appropriate query type (see [Available query types](#available-query-types)).
|
||||
1. Configure any additional fields required by the selected query type.
|
||||
1. Click **Run query** to preview the variable values.
|
||||
1. Configure display options such as **Multi-value** or **Include All option** as needed.
|
||||
1. Click **Apply** to save the variable.
|
||||
|
||||
## Available query types
|
||||
|
||||
The Azure Monitor data source provides the following query types for template variables:
|
||||
|
||||
| Query type | Description |
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Subscriptions** | Returns a list of Azure subscriptions accessible to the configured credentials. |
|
||||
| **Resource Groups** | Returns resource groups for a specified subscription. Supports multi-value selection. |
|
||||
| **Namespaces** | Returns metric namespaces for the specified subscription. If a resource group is specified, returns only namespaces within that group. |
|
||||
| **Regions** | Returns Azure regions available for the specified subscription. |
|
||||
| **Resource Names** | Returns resource names for a specified subscription, resource group, and namespace. Supports multi-value selection. |
|
||||
| **Metric Names** | Returns available metric names for a specified resource. |
|
||||
| **Workspaces** | Returns Log Analytics workspaces for the specified subscription. |
|
||||
| **Logs** | Executes a KQL query and returns the results as variable values. See [Create a Logs variable](#create-a-logs-variable). |
|
||||
| **Custom Namespaces** | Returns custom metric namespaces for a specified resource. |
|
||||
| **Custom Metric Names** | Returns custom metric names for a specified resource. |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Custom metrics cannot be emitted against a subscription or resource group. Select resources only when you need to retrieve custom metric namespaces or custom metric names associated with a specific resource.
|
||||
Custom metrics cannot be emitted against a subscription or resource group. Select specific resources when retrieving custom metric namespaces or custom metric names.
|
||||
{{< /admonition >}}
|
||||
|
||||
You can use any Log Analytics Kusto Query Language (KQL) query that returns a single list of values in the `Query` field.
|
||||
For example:
|
||||
## Create cascading variables
|
||||
|
||||
| Query | List of values returned |
|
||||
| ----------------------------------------------------------------------------------------- | --------------------------------------- |
|
||||
| `workspace("myWorkspace").Heartbeat \| distinct Computer` | Virtual machines |
|
||||
| `workspace("$workspace").Heartbeat \| distinct Computer` | Virtual machines with template variable |
|
||||
| `workspace("$workspace").Perf \| distinct ObjectName` | Objects from the Perf table |
|
||||
| `workspace("$workspace").Perf \| where ObjectName == "$object"` `\| distinct CounterName` | Metric names from the Perf table |
|
||||
Cascading variables (also called dependent or chained variables) allow you to create dropdown menus that filter based on previous selections. This is useful for drilling down from subscription to resource group to specific resource.
|
||||
|
||||
### Query variable example
|
||||
### Example: Subscription → Resource Group → Resource Name
|
||||
|
||||
This time series query uses query variables:
|
||||
**Step 1: Create a Subscription variable**
|
||||
|
||||
1. Create a variable named `subscription`.
|
||||
1. Set **Query Type** to **Subscriptions**.
|
||||
|
||||
**Step 2: Create a Resource Group variable**
|
||||
|
||||
1. Create a variable named `resourceGroup`.
|
||||
1. Set **Query Type** to **Resource Groups**.
|
||||
1. In the **Subscription** field, select `$subscription`.
|
||||
|
||||
**Step 3: Create a Resource Name variable**
|
||||
|
||||
1. Create a variable named `resource`.
|
||||
1. Set **Query Type** to **Resource Names**.
|
||||
1. In the **Subscription** field, select `$subscription`.
|
||||
1. In the **Resource Group** field, select `$resourceGroup`.
|
||||
1. Select the appropriate **Namespace** for your resources (e.g., `Microsoft.Compute/virtualMachines`).
|
||||
|
||||
Now when you change the subscription, the resource group dropdown updates automatically, and when you change the resource group, the resource name dropdown updates.
|
||||
|
||||
## Create a Logs variable
|
||||
|
||||
The **Logs** query type lets you use a KQL query to populate variable values. The query must return a single column of values.
|
||||
|
||||
**To create a Logs variable:**
|
||||
|
||||
1. Create a new variable with **Query Type** set to **Logs**.
|
||||
1. Select a **Resource** (Log Analytics workspace or Application Insights resource).
|
||||
1. Enter a KQL query that returns a single column.
|
||||
|
||||
### Logs variable query examples
|
||||
|
||||
| Query | Returns |
|
||||
| ----------------------------------------- | ------------------------------------- |
|
||||
| `Heartbeat \| distinct Computer` | List of virtual machine names |
|
||||
| `Perf \| distinct ObjectName` | List of performance object names |
|
||||
| `AzureActivity \| distinct ResourceGroup` | List of resource groups with activity |
|
||||
| `AppRequests \| distinct Name` | List of application request names |
|
||||
|
||||
You can reference other variables in your Logs query:
|
||||
|
||||
```kusto
|
||||
workspace("$workspace").Heartbeat | distinct Computer
|
||||
```
|
||||
|
||||
```kusto
|
||||
workspace("$workspace").Perf
|
||||
| where ObjectName == "$object"
|
||||
| distinct CounterName
|
||||
```
|
||||
|
||||
## Variable refresh options
|
||||
|
||||
Control when your variables refresh by setting the **Refresh** option:
|
||||
|
||||
| Option | Behavior |
|
||||
| ------------------------ | ----------------------------------------------------------------------------------------- |
|
||||
| **On dashboard load** | Variables refresh each time the dashboard loads. Best for data that changes infrequently. |
|
||||
| **On time range change** | Variables refresh when the dashboard time range changes. Use for time-sensitive queries. |
|
||||
|
||||
For dashboards with many variables or complex queries, use **On dashboard load** to improve performance.
|
||||
|
||||
## Use variables in queries
|
||||
|
||||
After you create template variables, you can use them in your Azure Monitor queries by referencing them with the `$` prefix.
|
||||
|
||||
### Metrics query example
|
||||
|
||||
In a Metrics query, select your variables in the resource picker fields:
|
||||
|
||||
- **Subscription**: `$subscription`
|
||||
- **Resource Group**: `$resourceGroup`
|
||||
- **Resource Name**: `$resource`
|
||||
|
||||
### Logs query example
|
||||
|
||||
Reference variables directly in your KQL queries:
|
||||
|
||||
```kusto
|
||||
Perf
|
||||
| where ObjectName == "$object" and CounterName == "$metric"
|
||||
| where TimeGenerated >= $__timeFrom() and TimeGenerated <= $__timeTo()
|
||||
| where $__contains(Computer, $computer)
|
||||
| where $__contains(Computer, $computer)
|
||||
| summarize avg(CounterValue) by bin(TimeGenerated, $__interval), Computer
|
||||
| order by TimeGenerated asc
|
||||
```
|
||||
|
||||
### Multi-value variables
|
||||
## Multi-value variables
|
||||
|
||||
It is possible to select multiple values for **Resource Groups** and **Resource Names** and use a single metrics query pointing to those values as long as they:
|
||||
You can enable **Multi-value** selection for **Resource Groups** and **Resource Names** variables. When using multi-value variables in a Metrics query, all selected resources must:
|
||||
|
||||
- Belong to the same subscription.
|
||||
- Are in the same region.
|
||||
- Are of the same type (namespace).
|
||||
- Belong to the same subscription
|
||||
- Be in the same Azure region
|
||||
- Be of the same resource type (namespace)
|
||||
|
||||
Also, note that if a template variable pointing to multiple resource groups or names is used in another template variable as a parameter (e.g. to retrieve metric names), only the first value will be used. This means that the combination of the first resource group and name selected should be valid.
|
||||
{{< admonition type="note" >}}
|
||||
When a multi-value variable is used as a parameter in another variable query (for example, to retrieve metric names), only the first selected value is used. Ensure the first resource group and resource name combination is valid.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Troubleshoot template variables
|
||||
|
||||
If you encounter issues with template variables, try the following solutions.
|
||||
|
||||
### Variable returns no values
|
||||
|
||||
- Verify the Azure Monitor data source is configured correctly and can connect to Azure.
|
||||
- Check that the credentials have appropriate permissions to list the requested resources.
|
||||
- For cascading variables, ensure parent variables have valid selections.
|
||||
|
||||
### Variable values are outdated
|
||||
|
||||
- Check the **Refresh** setting and adjust if needed.
|
||||
- Click the refresh icon next to the variable dropdown to manually refresh.
|
||||
|
||||
### Multi-value selection not working in queries
|
||||
|
||||
- Ensure the resources meet the requirements (same subscription, region, and type).
|
||||
- For Logs queries, use the `$__contains()` macro to handle multi-value variables properly.
|
||||
|
||||
@@ -0,0 +1,320 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/troubleshooting/
|
||||
description: Troubleshooting guide for the Azure Monitor data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- azure
|
||||
- monitor
|
||||
- troubleshooting
|
||||
- errors
|
||||
- authentication
|
||||
- query
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshoot
|
||||
title: Troubleshoot Azure Monitor data source issues
|
||||
weight: 500
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
---
|
||||
|
||||
# Troubleshoot Azure Monitor data source issues
|
||||
|
||||
This document provides solutions to common issues you may encounter when configuring or using the Azure Monitor data source.
|
||||
|
||||
## Configuration and authentication errors
|
||||
|
||||
These errors typically occur when setting up the data source or when authentication credentials are invalid.
|
||||
|
||||
### "Authorization failed" or "Access denied"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Save & test fails with "Authorization failed"
|
||||
- Queries return "Access denied" errors
|
||||
- Subscriptions don't load when clicking **Load Subscriptions**
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| -------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| App registration doesn't have required permissions | Assign the `Reader` role to the app registration on the subscription or resource group you want to monitor. Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current). |
|
||||
| Incorrect tenant ID, client ID, or client secret | Verify the credentials in the Azure Portal under **App registrations** > your app > **Overview** (for IDs) and **Certificates & secrets** (for secret). |
|
||||
| Client secret has expired | Create a new client secret in Azure and update the data source configuration. |
|
||||
| Managed Identity not enabled on the Azure resource | For VMs, enable managed identity in the Azure Portal under **Identity**. For App Service, enable it under **Identity** in the app settings. |
|
||||
| Managed Identity not assigned the Reader role | Assign the `Reader` role to the managed identity on the target subscription or resources. |
|
||||
|
||||
### "Invalid client secret" or "Client secret not found"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Authentication fails immediately after configuration
|
||||
- Error message references invalid credentials
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Ensure you copied the client secret **value**, not the secret ID. In Azure Portal under **Certificates & secrets**, the secret value is only shown once when created. The secret ID is a different identifier and won't work for authentication.
|
||||
2. Verify the client secret was copied correctly (no extra spaces or truncation).
|
||||
3. Check if the secret has expired in Azure Portal under **App registrations** > your app > **Certificates & secrets**.
|
||||
4. Create a new secret and update the data source configuration.
|
||||
|
||||
### "Tenant not found" or "Invalid tenant ID"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Data source test fails with tenant-related errors
|
||||
- Unable to authenticate
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the Directory (tenant) ID in Azure Portal under **Microsoft Entra ID** > **Overview**.
|
||||
2. Ensure you're using the correct Azure cloud setting (Azure, Azure Government, or Azure China).
|
||||
3. Check that the tenant ID is a valid GUID format.
|
||||
|
||||
### Managed Identity not working
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Managed Identity option is available but authentication fails
|
||||
- Error: "Managed identity authentication is not available"
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify `managed_identity_enabled = true` is set in the Grafana server configuration under `[azure]`.
|
||||
2. Confirm the Azure resource hosting Grafana has managed identity enabled.
|
||||
3. For user-assigned managed identity, ensure `managed_identity_client_id` is set correctly.
|
||||
4. Verify the managed identity has the `Reader` role on the target resources.
|
||||
5. Restart Grafana after changing server configuration.
|
||||
|
||||
### Workload Identity not working
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Workload Identity authentication fails in Kubernetes/AKS environment
|
||||
- Token file errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify `workload_identity_enabled = true` is set in the Grafana server configuration.
|
||||
2. Check that the service account is correctly annotated for workload identity.
|
||||
3. Verify the federated credential is configured in Azure.
|
||||
4. Ensure the token path is accessible to the Grafana pod.
|
||||
5. Check the workload identity webhook is running in the cluster.
|
||||
|
||||
## Query errors
|
||||
|
||||
These errors occur when executing queries against Azure Monitor services.
|
||||
|
||||
### "No data" or empty results
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query executes without error but returns no data
|
||||
- Charts show "No data" message
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Time range doesn't contain data | Expand the dashboard time range or verify data exists in Azure Portal. |
|
||||
| Wrong resource selected | Verify you've selected the correct subscription, resource group, and resource. |
|
||||
| Metric not available for resource | Not all metrics are available for all resources. Check available metrics in Azure Portal under the resource's **Metrics** blade. |
|
||||
| Metric has no values | Some metrics only populate under certain conditions (e.g., error counts when errors occur). |
|
||||
| Permissions issue | Verify the identity has read access to the specific resource. |
|
||||
|
||||
### "Bad request" or "Invalid query"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query fails with 400 error
|
||||
- Error message indicates query syntax issues
|
||||
|
||||
**Solutions for Logs queries:**
|
||||
|
||||
1. Validate your KQL syntax in the Azure Portal Log Analytics query editor.
|
||||
2. Check for typos in table names or column names.
|
||||
3. Ensure referenced tables exist in the selected workspace.
|
||||
4. Verify the time range is valid (not in the future, not too far in the past for data retention).
|
||||
|
||||
**Solutions for Metrics queries:**
|
||||
|
||||
1. Verify the metric name is valid for the selected resource type.
|
||||
2. Check that dimension filters use valid dimension names and values.
|
||||
3. Ensure the aggregation type is supported for the selected metric.
|
||||
|
||||
### "Resource not found"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query fails with 404 error
|
||||
- Resource picker shows resources that can't be queried
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the resource still exists in Azure (it may have been deleted or moved).
|
||||
2. Check that the subscription is correct.
|
||||
3. Refresh the resource picker by re-selecting the subscription.
|
||||
4. Verify the identity has access to the resource's resource group.
|
||||
|
||||
### Logs query timeout
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query runs for a long time then fails
|
||||
- Error mentions timeout or query limits
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Narrow the time range to reduce data volume.
|
||||
2. Add filters to reduce the result set.
|
||||
3. Use `summarize` to aggregate data instead of returning raw rows.
|
||||
4. Consider using Basic Logs for large datasets (if enabled).
|
||||
5. Break complex queries into smaller parts.
|
||||
|
||||
### "Metrics not available" for a resource
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Resource appears in picker but no metrics are listed
|
||||
- Metric dropdown is empty
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the resource type supports Azure Monitor metrics.
|
||||
2. Check if the resource is in a region that supports metrics.
|
||||
3. Some resources require diagnostic settings to emit metrics—configure these in Azure Portal.
|
||||
4. Try selecting a different namespace for the resource.
|
||||
|
||||
## Azure Resource Graph errors
|
||||
|
||||
These errors are specific to Azure Resource Graph (ARG) queries.
|
||||
|
||||
### "Query execution failed"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- ARG query fails with execution errors
|
||||
- Results don't match expected resources
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Validate query syntax in Azure Portal Resource Graph Explorer.
|
||||
2. Check that you have access to the subscriptions being queried.
|
||||
3. Verify table names are correct (e.g., `Resources`, `ResourceContainers`).
|
||||
4. Some ARG features require specific permissions, check [ARG documentation](https://docs.microsoft.com/en-us/azure/governance/resource-graph/).
|
||||
|
||||
### Query returns incomplete results
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Not all expected resources appear in results
|
||||
- Results seem truncated
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. ARG queries are paginated. The data source handles pagination automatically, but very large result sets may be limited.
|
||||
2. Add filters to reduce result set size.
|
||||
3. Verify you have access to all subscriptions containing the resources.
|
||||
|
||||
## Application Insights Traces errors
|
||||
|
||||
These errors are specific to the Traces query type.
|
||||
|
||||
### "No traces found"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Trace query returns empty results
|
||||
- Operation ID search finds nothing
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the Application Insights resource is collecting trace data.
|
||||
2. Check that the time range includes when the traces were generated.
|
||||
3. Ensure the Operation ID is correct (copy directly from another trace or log).
|
||||
4. Verify the identity has access to the Application Insights resource.
|
||||
|
||||
## Template variable errors
|
||||
|
||||
For detailed troubleshooting of template variables, refer to the [template variables troubleshooting section](ref:template-variables).
|
||||
|
||||
### Variables return no values
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the data source connection is working (test it in the data source settings).
|
||||
2. Check that parent variables (for cascading variables) have valid selections.
|
||||
3. Verify the identity has permissions to list the requested resources.
|
||||
4. For Logs variables, ensure the KQL query returns a single column.
|
||||
|
||||
### Variables are slow to load
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Set variable refresh to **On dashboard load** instead of **On time range change**.
|
||||
2. Reduce the scope of variable queries (e.g., filter by resource group instead of entire subscription).
|
||||
3. For Logs variables, optimize the KQL query to return results faster.
|
||||
|
||||
## Connection and network errors
|
||||
|
||||
These errors indicate problems with network connectivity between Grafana and Azure services.
|
||||
|
||||
### "Connection refused" or timeout errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Data source test fails with network errors
|
||||
- Queries timeout without returning results
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify network connectivity from Grafana to Azure endpoints.
|
||||
2. Check firewall rules allow outbound HTTPS (port 443) to Azure services.
|
||||
3. For private networks, ensure Private Link or VPN is configured correctly.
|
||||
4. For Grafana Cloud, configure [Private Data Source Connect](ref:configure-azure-monitor) if accessing private resources.
|
||||
|
||||
### SSL/TLS certificate errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Certificate validation failures
|
||||
- SSL handshake errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Ensure the system time is correct (certificate validation fails with incorrect time).
|
||||
2. Verify corporate proxy isn't intercepting HTTPS traffic.
|
||||
3. Check that required CA certificates are installed on the Grafana server.
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you've tried the solutions above and still encounter issues:
|
||||
|
||||
1. Check the [Grafana community forums](https://community.grafana.com/) for similar issues.
|
||||
1. Review the [Azure Monitor data source GitHub issues](https://github.com/grafana/grafana/issues) for known bugs.
|
||||
1. Enable debug logging in Grafana to capture detailed error information.
|
||||
1. Contact Grafana Support if you're an Enterprise, Cloud Pro or Cloud Contracted user.
|
||||
1. When reporting issues, include:
|
||||
- Grafana version
|
||||
- Error messages (redact sensitive information)
|
||||
- Steps to reproduce
|
||||
- Relevant configuration (redact credentials)
|
||||
@@ -52,6 +52,7 @@ The following documents will help you get started with the InfluxDB data source
|
||||
- [Configure the InfluxDB data source](./configure-influxdb-data-source/)
|
||||
- [InfluxDB query editor](./query-editor/)
|
||||
- [InfluxDB templates and variables](./template-variables/)
|
||||
- [Troubleshoot issues with the InfluxDB data source](./troubleshooting/)
|
||||
|
||||
Once you have configured the data source you can:
|
||||
|
||||
|
||||
@@ -0,0 +1,291 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/influxdb/troubleshooting/
|
||||
description: Troubleshooting the InfluxDB data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- influxdb
|
||||
- troubleshooting
|
||||
- errors
|
||||
- flux
|
||||
- influxql
|
||||
- sql
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshooting
|
||||
title: Troubleshoot issues with the InfluxDB data source
|
||||
weight: 600
|
||||
---
|
||||
|
||||
# Troubleshoot issues with the InfluxDB data source
|
||||
|
||||
This document provides troubleshooting information for common errors you may encounter when using the InfluxDB data source in Grafana.
|
||||
|
||||
## Connection errors
|
||||
|
||||
The following errors occur when Grafana cannot establish or maintain a connection to InfluxDB.
|
||||
|
||||
### Failed to connect to InfluxDB
|
||||
|
||||
**Error message:** "error performing influxQL query" or "error performing flux query" or "error performing sql query"
|
||||
|
||||
**Cause:** Grafana cannot establish a network connection to the InfluxDB server.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify that the InfluxDB URL is correct in the data source configuration.
|
||||
1. Check that InfluxDB is running and accessible from the Grafana server.
|
||||
1. Ensure the URL includes the protocol (`http://` or `https://`).
|
||||
1. Verify the port is correct (the InfluxDB default API port is `8086`).
|
||||
1. Ensure there are no firewall rules blocking the connection.
|
||||
1. For Grafana Cloud, ensure you have configured [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) if your InfluxDB instance is not publicly accessible.
|
||||
|
||||
### Request timed out
|
||||
|
||||
**Error message:** "context deadline exceeded" or "request timeout"
|
||||
|
||||
**Cause:** The connection to InfluxDB timed out before receiving a response.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the network latency between Grafana and InfluxDB.
|
||||
1. Verify that InfluxDB is not overloaded or experiencing performance issues.
|
||||
1. Increase the timeout setting in the data source configuration under **Advanced HTTP Settings**.
|
||||
1. Reduce the time range or complexity of your query.
|
||||
|
||||
## Authentication errors
|
||||
|
||||
The following errors occur when there are issues with authentication credentials or permissions.
|
||||
|
||||
### Unauthorized (401)
|
||||
|
||||
**Error message:** "401 Unauthorized" or "authorization failed"
|
||||
|
||||
**Cause:** The authentication credentials are invalid or missing.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify that the token or password is correct in the data source configuration.
|
||||
1. For Flux and SQL, ensure the token has not expired.
|
||||
1. For InfluxQL with InfluxDB 2.x, verify the token is set as an `Authorization` header with the value `Token <your-token>`.
|
||||
1. For InfluxDB 1.x, verify the username and password are correct.
|
||||
1. Check that the token has the required permissions to access the specified bucket or database.
|
||||
|
||||
### Forbidden (403)
|
||||
|
||||
**Error message:** "403 Forbidden" or "access denied"
|
||||
|
||||
**Cause:** The authenticated user or token does not have permission to access the requested resource.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the token has read access to the specified bucket or database.
|
||||
1. Check the token's permissions in the InfluxDB UI under **API Tokens**.
|
||||
1. Ensure the organization ID is correct for Flux queries.
|
||||
1. For InfluxQL with InfluxDB 2.x, verify the DBRP mapping is configured correctly.
|
||||
|
||||
## Configuration errors
|
||||
|
||||
The following errors occur when the data source is not configured correctly.
|
||||
|
||||
### Unknown influx version
|
||||
|
||||
**Error message:** "unknown influx version"
|
||||
|
||||
**Cause:** The query language is not properly configured in the data source settings.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Open the data source configuration in Grafana.
|
||||
1. Verify that a valid query language is selected: **Flux**, **InfluxQL**, or **SQL**.
|
||||
1. Ensure the selected query language matches your InfluxDB version:
|
||||
- Flux: InfluxDB 1.8+ and 2.x
|
||||
- InfluxQL: InfluxDB 1.x and 2.x (with DBRP mapping)
|
||||
- SQL: InfluxDB 3.x only
|
||||
|
||||
### Invalid data source info received
|
||||
|
||||
**Error message:** "invalid data source info received"
|
||||
|
||||
**Cause:** The data source configuration is incomplete or corrupted.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Delete and recreate the data source.
|
||||
1. Ensure all required fields are populated based on your query language:
|
||||
- **Flux:** URL, Organization, Token, Default Bucket
|
||||
- **InfluxQL:** URL, Database, User, Password
|
||||
- **SQL:** URL, Database, Token
|
||||
|
||||
### DBRP mapping required
|
||||
|
||||
**Error message:** "database not found" or queries return no data with InfluxQL on InfluxDB 2.x
|
||||
|
||||
**Cause:** InfluxQL queries on InfluxDB 2.x require a Database and Retention Policy (DBRP) mapping.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Create a DBRP mapping in InfluxDB using the CLI or API.
|
||||
1. Refer to [Manage DBRP Mappings](https://docs.influxdata.com/influxdb/cloud/query-data/influxql/dbrp/) for guidance.
|
||||
1. Verify the database name in Grafana matches the DBRP mapping.
|
||||
|
||||
## Query errors
|
||||
|
||||
The following errors occur when there are issues with query syntax or execution.
|
||||
|
||||
### Query syntax error
|
||||
|
||||
**Error message:** "error parsing query: found THING" or "failed to parse query: found WERE, expected ; at line 1, char 38"
|
||||
|
||||
**Cause:** The query contains invalid syntax.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check your query syntax for typos or invalid keywords.
|
||||
1. For InfluxQL, verify the query follows the correct syntax:
|
||||
|
||||
```sql
|
||||
SELECT <field> FROM <measurement> WHERE <condition>
|
||||
```
|
||||
|
||||
1. For Flux, ensure proper pipe-forward syntax and function calls.
|
||||
1. Use the InfluxDB UI or CLI to test your query directly.
|
||||
|
||||
### Query timeout limit exceeded
|
||||
|
||||
**Error message:** "query-timeout limit exceeded"
|
||||
|
||||
**Cause:** The query took longer than the configured timeout limit in InfluxDB.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Add more specific filters to limit the data scanned.
|
||||
1. Increase the query timeout setting in InfluxDB if you have admin access.
|
||||
1. Optimize your query to reduce complexity.
|
||||
|
||||
### Too many series or data points
|
||||
|
||||
**Error message:** "max-series-per-database limit exceeded" or "A query returned too many data points and the results have been truncated"
|
||||
|
||||
**Cause:** The query is returning more data than the configured limits allow.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Add filters to limit the number of series returned.
|
||||
1. Increase the **Max series** setting in the data source configuration under **Advanced Database Settings**.
|
||||
1. Use aggregation functions to reduce the number of data points.
|
||||
1. For Flux, use `aggregateWindow()` to downsample data.
|
||||
|
||||
### No time column found
|
||||
|
||||
**Error message:** "no time column found"
|
||||
|
||||
**Cause:** The query result does not include a time column, which is required for time series visualization.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Ensure your query includes a time field.
|
||||
1. For Flux, verify the query includes `_time` in the output.
|
||||
1. For SQL, ensure the query returns a timestamp column.
|
||||
1. Check that the time field is not being filtered out or excluded.
|
||||
|
||||
## Health check errors
|
||||
|
||||
The following errors occur when testing the data source connection.
|
||||
|
||||
### Error getting flux query buckets
|
||||
|
||||
**Error message:** "error getting flux query buckets"
|
||||
|
||||
**Cause:** The health check query `buckets()` failed to return results.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the token has permission to list buckets.
|
||||
1. Check that the organization ID is correct.
|
||||
1. Ensure InfluxDB is running and accessible.
|
||||
|
||||
### Error connecting InfluxDB influxQL
|
||||
|
||||
**Error message:** "error connecting InfluxDB influxQL"
|
||||
|
||||
**Cause:** The health check query `SHOW MEASUREMENTS` failed.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the database name is correct.
|
||||
1. Check that the user has permission to run `SHOW MEASUREMENTS`.
|
||||
1. Ensure the database exists and contains measurements.
|
||||
1. For InfluxDB 2.x, verify DBRP mapping is configured.
|
||||
|
||||
### 0 measurements found
|
||||
|
||||
**Error message:** "data source is working. 0 measurements found"
|
||||
|
||||
**Cause:** The connection is successful, but the database contains no measurements.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify you are connecting to the correct database.
|
||||
1. Check that data has been written to the database.
|
||||
1. If the database is new, add some test data to verify the connection.
|
||||
|
||||
## Other common issues
|
||||
|
||||
The following issues don't produce specific error messages but are commonly encountered.
|
||||
|
||||
### Empty query results
|
||||
|
||||
**Cause:** The query returns no data.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the time range includes data in your database.
|
||||
1. Check that the measurement and field names are correct.
|
||||
1. Test the query directly in the InfluxDB UI or CLI.
|
||||
1. Ensure filters are not excluding all data.
|
||||
1. For InfluxQL, verify the retention policy contains data for the selected time range.
|
||||
|
||||
### Slow query performance
|
||||
|
||||
**Cause:** Queries take a long time to execute.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Add more specific filters to limit the data scanned.
|
||||
1. Increase the **Min time interval** setting to reduce the number of data points.
|
||||
1. Check InfluxDB server performance and resource utilization.
|
||||
1. For Flux, use `aggregateWindow()` to downsample data before visualization.
|
||||
1. Consider using continuous queries or tasks to pre-aggregate data.
|
||||
|
||||
### Data appears delayed or missing recent points
|
||||
|
||||
**Cause:** The visualization doesn't show the most recent data.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the dashboard time range and refresh settings.
|
||||
1. Verify the **Min time interval** is not set too high.
|
||||
1. Ensure InfluxDB has finished writing the data.
|
||||
1. Check for clock synchronization issues between Grafana and InfluxDB.
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you continue to experience issues after following this troubleshooting guide:
|
||||
|
||||
1. Check the [InfluxDB documentation](https://docs.influxdata.com/) for API-specific guidance.
|
||||
1. Review the [Grafana community forums](https://community.grafana.com/) for similar issues.
|
||||
1. Contact Grafana Support if you're an Enterprise, Cloud Pro or Cloud Contracted user.
|
||||
1. When reporting issues, include:
|
||||
- Grafana version
|
||||
- InfluxDB version and product (OSS, Cloud, Enterprise)
|
||||
- Query language (Flux, InfluxQL, or SQL)
|
||||
- Error messages (redact sensitive information)
|
||||
- Steps to reproduce
|
||||
- Relevant configuration such as data source settings, HTTP method, and TLS settings (redact tokens, passwords, and other credentials)
|
||||
@@ -223,17 +223,25 @@ To export a dashboard in its current state as a PDF, follow these steps:
|
||||
|
||||
1. Click the **X** at the top-right corner to close the share drawer.
|
||||
|
||||
### Export a dashboard as JSON
|
||||
### Export a dashboard as code
|
||||
|
||||
Export a Grafana JSON file that contains everything you need, including layout, variables, styles, data sources, queries, and so on, so that you can later import the dashboard. To export a JSON file, follow these steps:
|
||||
|
||||
1. Click **Dashboards** in the main menu.
|
||||
1. Open the dashboard you want to export.
|
||||
1. Click the **Export** drop-down list in the top-right corner and select **Export as JSON**.
|
||||
1. Click the **Export** drop-down list in the top-right corner and select **Export as code**.
|
||||
|
||||
The **Export dashboard JSON** drawer opens.
|
||||
The **Export dashboard** drawer opens.
|
||||
|
||||
1. Select the dashboard JSON model that you to export:
|
||||
- **Classic** - Export dashboards created using the [current dashboard schema](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/visualizations/dashboards/build-dashboards/view-dashboard-json-model/).
|
||||
- **V1 Resource** - Export dashboards created using the [current dashboard schema](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/visualizations/dashboards/build-dashboards/view-dashboard-json-model/) wrapped in the `spec` property of the [V1 Kubernetes-style resource](https://play.grafana.org/swagger?api=dashboard.grafana.app-v2alpha1). Choose between **JSON** and **YAML** format.
|
||||
- **V2 Resource** - Export dashboards created using the [V2 Resource schema](https://play.grafana.org/swagger?api=dashboard.grafana.app-v2beta1). Choose between **JSON** and **YAML** format.
|
||||
|
||||
1. Do one of the following:
|
||||
- Toggle the **Export for sharing externally** switch to generate the JSON with a different data source UID.
|
||||
- Toggle the **Remove deployment details** switch to make the dashboard externally shareable.
|
||||
|
||||
1. Toggle the **Export the dashboard to use in another instance** switch to generate the JSON with a different data source UID.
|
||||
1. Click **Download file** or **Copy to clipboard**.
|
||||
1. Click the **X** at the top-right corner to close the share drawer.
|
||||
|
||||
|
||||
@@ -343,6 +343,33 @@ test.describe('Panels test: Table - Kitchen Sink', { tag: ['@panels', '@table']
|
||||
// TODO -- saving for another day.
|
||||
});
|
||||
|
||||
test('Tests nested table expansion', async ({ gotoDashboardPage, selectors, page }) => {
|
||||
const dashboardPage = await gotoDashboardPage({
|
||||
uid: DASHBOARD_UID,
|
||||
queryParams: new URLSearchParams({ editPanel: '4' }),
|
||||
});
|
||||
|
||||
await expect(
|
||||
dashboardPage.getByGrafanaSelector(selectors.components.Panels.Panel.title('Nested tables'))
|
||||
).toBeVisible();
|
||||
|
||||
await waitForTableLoad(page);
|
||||
|
||||
await expect(page.locator('[role="row"]')).toHaveCount(3); // header + 2 rows
|
||||
|
||||
const firstRowExpander = dashboardPage
|
||||
.getByGrafanaSelector(selectors.components.Panels.Visualization.TableNG.RowExpander)
|
||||
.first();
|
||||
|
||||
await firstRowExpander.click();
|
||||
await expect(page.locator('[role="row"]')).not.toHaveCount(3); // more rows are present now, it is dynamic tho.
|
||||
|
||||
// TODO: test sorting
|
||||
|
||||
await firstRowExpander.click();
|
||||
await expect(page.locator('[role="row"]')).toHaveCount(3); // back to original state
|
||||
});
|
||||
|
||||
test('Tests tooltip interactions', async ({ gotoDashboardPage, selectors }) => {
|
||||
const dashboardPage = await gotoDashboardPage({
|
||||
uid: DASHBOARD_UID,
|
||||
|
||||
@@ -804,11 +804,6 @@
|
||||
"count": 2
|
||||
}
|
||||
},
|
||||
"packages/grafana-ui/src/components/Table/TableNG/utils.ts": {
|
||||
"@typescript-eslint/consistent-type-assertions": {
|
||||
"count": 1
|
||||
}
|
||||
},
|
||||
"packages/grafana-ui/src/components/Table/TableRT/Filter.tsx": {
|
||||
"@typescript-eslint/no-explicit-any": {
|
||||
"count": 1
|
||||
|
||||
@@ -575,6 +575,42 @@ module.exports = [
|
||||
"Property[key.name='a11y'][value.type='ObjectExpression'] Property[key.name='test'][value.value='off']",
|
||||
message: 'Skipping a11y tests is not allowed. Please fix the component or story instead.',
|
||||
},
|
||||
{
|
||||
selector: 'MemberExpression[object.name="config"][property.name="apps"]',
|
||||
message:
|
||||
'Usage of config.apps is not allowed. Use the function getAppPluginMetas() from @grafana/runtime instead',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
files: [...commonTestIgnores],
|
||||
ignores: [
|
||||
// FIXME: Remove once all enterprise issues are fixed -
|
||||
// we don't have a suppressions file/approach for enterprise code yet
|
||||
...enterpriseIgnores,
|
||||
],
|
||||
rules: {
|
||||
'no-restricted-syntax': [
|
||||
'error',
|
||||
{
|
||||
selector: 'MemberExpression[object.name="config"][property.name="apps"]',
|
||||
message:
|
||||
'Usage of config.apps is not allowed. Use the function getAppPluginMetas() from @grafana/runtime instead',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
files: [...enterpriseIgnores],
|
||||
rules: {
|
||||
'no-restricted-syntax': [
|
||||
'error',
|
||||
{
|
||||
selector: 'MemberExpression[object.name="config"][property.name="apps"]',
|
||||
message:
|
||||
'Usage of config.apps is not allowed. Use the function getAppPluginMetas() from @grafana/runtime instead',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
|
||||
+1
-1
@@ -289,7 +289,7 @@
|
||||
"@grafana/google-sdk": "0.3.5",
|
||||
"@grafana/i18n": "workspace:*",
|
||||
"@grafana/lezer-logql": "0.2.9",
|
||||
"@grafana/llm": "0.22.1",
|
||||
"@grafana/llm": "1.0.1",
|
||||
"@grafana/monaco-logql": "^0.0.8",
|
||||
"@grafana/o11y-ds-frontend": "workspace:*",
|
||||
"@grafana/plugin-ui": "^0.11.1",
|
||||
|
||||
+5
-1
@@ -1138,7 +1138,7 @@ export type JobResourceSummary = {
|
||||
delete?: number;
|
||||
/** Create or update (export) */
|
||||
error?: number;
|
||||
/** Report errors for this resource type This may not be an exhaustive list and recommend looking at the logs for more info */
|
||||
/** Report errors/warnings for this resource type This may not be an exhaustive list and recommend looking at the logs for more info */
|
||||
errors?: string[];
|
||||
group?: string;
|
||||
kind?: string;
|
||||
@@ -1146,6 +1146,9 @@ export type JobResourceSummary = {
|
||||
noop?: number;
|
||||
total?: number;
|
||||
update?: number;
|
||||
/** The error count */
|
||||
warning?: number;
|
||||
warnings?: string[];
|
||||
write?: number;
|
||||
};
|
||||
export type RepositoryUrLs = {
|
||||
@@ -1176,6 +1179,7 @@ export type JobStatus = {
|
||||
summary?: JobResourceSummary[];
|
||||
/** URLs contains URLs for the reference branch or commit if applicable. */
|
||||
url?: RepositoryUrLs;
|
||||
warnings?: string[];
|
||||
};
|
||||
export type Job = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
|
||||
@@ -499,6 +499,9 @@ export const versionedComponents = {
|
||||
},
|
||||
},
|
||||
TableNG: {
|
||||
RowExpander: {
|
||||
'12.4.0': 'data-testid tableng row expander',
|
||||
},
|
||||
Filters: {
|
||||
HeaderButton: {
|
||||
'12.1.0': 'data-testid tableng header filter',
|
||||
|
||||
@@ -86,6 +86,7 @@ export class GrafanaBootConfig {
|
||||
snapshotEnabled = true;
|
||||
datasources: { [str: string]: DataSourceInstanceSettings } = {};
|
||||
panels: { [key: string]: PanelPluginMeta } = {};
|
||||
/** @deprecated it will be removed in a future release, use getAppPluginMetas function or useAppPluginMetas hook instead */
|
||||
apps: Record<string, AppPluginConfigGrafanaData> = {};
|
||||
auth: AuthSettings = {};
|
||||
minRefreshInterval = '';
|
||||
|
||||
@@ -29,3 +29,4 @@ export {
|
||||
export { UserStorage } from '../utils/userStorage';
|
||||
|
||||
export { initOpenFeature, evaluateBooleanFlag } from './openFeature';
|
||||
export { setAppPluginMetas } from '../services/plugins';
|
||||
|
||||
@@ -0,0 +1,90 @@
|
||||
import { cloneDeep } from 'lodash';
|
||||
import { useAsync } from 'react-use';
|
||||
|
||||
import { AppPluginConfig } from '@grafana/data';
|
||||
|
||||
import { config } from '../config';
|
||||
|
||||
export type AppPluginMetas = Record<string, AppPluginConfig>;
|
||||
|
||||
let apps: AppPluginMetas = {};
|
||||
let appsPromise: Promise<void> | undefined = undefined;
|
||||
|
||||
function areAppsInitialized(): boolean {
|
||||
return Boolean(Object.keys(apps).length);
|
||||
}
|
||||
|
||||
async function initPluginMetas(): Promise<void> {
|
||||
if (appsPromise) {
|
||||
return appsPromise;
|
||||
}
|
||||
|
||||
appsPromise = new Promise((resolve) => {
|
||||
if (config.featureToggles.useMTPlugins) {
|
||||
// add loading app configs from MT API here
|
||||
apps = {};
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
apps = config.apps;
|
||||
resolve();
|
||||
return;
|
||||
});
|
||||
|
||||
return appsPromise;
|
||||
}
|
||||
|
||||
export async function getAppPluginMetas(): Promise<AppPluginConfig[]> {
|
||||
if (!areAppsInitialized()) {
|
||||
await initPluginMetas();
|
||||
}
|
||||
|
||||
return Object.values(cloneDeep(apps));
|
||||
}
|
||||
|
||||
export async function getAppPluginMeta(id: string): Promise<AppPluginConfig | undefined> {
|
||||
if (!areAppsInitialized()) {
|
||||
await initPluginMetas();
|
||||
}
|
||||
|
||||
if (!apps[id]) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return cloneDeep(apps[id]);
|
||||
}
|
||||
|
||||
export function setAppPluginMetas(override: AppPluginMetas) {
|
||||
// We allow overriding apps in tests
|
||||
if (override && process.env.NODE_ENV !== 'test') {
|
||||
throw new Error('setAppPluginMetas() function can only be called from tests.');
|
||||
}
|
||||
|
||||
apps = { ...override };
|
||||
}
|
||||
|
||||
export interface UseAppPluginMetasResult {
|
||||
isAppPluginMetasLoading: boolean;
|
||||
error: Error | undefined;
|
||||
apps: AppPluginConfig[];
|
||||
}
|
||||
|
||||
export function useAppPluginMetas(filterByIds: string[] = []): UseAppPluginMetasResult {
|
||||
const { loading, error, value: apps = [] } = useAsync(getAppPluginMetas);
|
||||
const filtered = apps.filter((app) => filterByIds.includes(app.id));
|
||||
|
||||
return { isAppPluginMetasLoading: loading, error, apps: filtered };
|
||||
}
|
||||
|
||||
export interface UseAppPluginMetaResult {
|
||||
isAppPluginMetaLoading: boolean;
|
||||
error: Error | undefined;
|
||||
app: AppPluginConfig | undefined;
|
||||
}
|
||||
|
||||
export function useAppPluginMeta(filterById: string): UseAppPluginMetaResult {
|
||||
const { loading, error, value: app } = useAsync(() => getAppPluginMeta(filterById));
|
||||
return { isAppPluginMetaLoading: loading, error, app };
|
||||
}
|
||||
@@ -11,3 +11,13 @@
|
||||
|
||||
// This is a dummy export so typescript doesn't error importing an "empty module"
|
||||
export const unstable = {};
|
||||
|
||||
export {
|
||||
type AppPluginMetas,
|
||||
type UseAppPluginMetaResult,
|
||||
type UseAppPluginMetasResult,
|
||||
getAppPluginMeta,
|
||||
getAppPluginMetas,
|
||||
useAppPluginMeta,
|
||||
useAppPluginMetas,
|
||||
} from './services/plugins';
|
||||
|
||||
@@ -106,6 +106,11 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
const gaugeId = useId();
|
||||
const styles = useStyles2(getStyles);
|
||||
|
||||
let effectiveTextMode = textMode;
|
||||
if (effectiveTextMode === 'auto') {
|
||||
effectiveTextMode = vizCount === 1 ? 'value' : 'value_and_name';
|
||||
}
|
||||
|
||||
const startAngle = shape === 'gauge' ? 250 : 0;
|
||||
const endAngle = shape === 'gauge' ? 110 : 360;
|
||||
|
||||
@@ -188,7 +193,7 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
// These elements are only added for first value / bar
|
||||
if (barIndex === 0) {
|
||||
if (glowBar) {
|
||||
defs.push(<GlowGradient key="glow-filter" id={glowFilterId} radius={dimensions.radius} />);
|
||||
defs.push(<GlowGradient key="glow-filter" id={glowFilterId} barWidth={dimensions.barWidth} />);
|
||||
}
|
||||
|
||||
if (glowCenter) {
|
||||
@@ -198,14 +203,14 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
graphics.push(
|
||||
<RadialText
|
||||
key="radial-text"
|
||||
vizCount={vizCount}
|
||||
textMode={textMode}
|
||||
textMode={effectiveTextMode}
|
||||
displayValue={displayValue.display}
|
||||
dimensions={dimensions}
|
||||
theme={theme}
|
||||
valueManualFontSize={props.valueManualFontSize}
|
||||
nameManualFontSize={props.nameManualFontSize}
|
||||
shape={shape}
|
||||
sparkline={displayValue.sparkline}
|
||||
/>
|
||||
);
|
||||
|
||||
@@ -254,6 +259,7 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
theme={theme}
|
||||
color={color}
|
||||
shape={shape}
|
||||
textMode={effectiveTextMode}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
import { css } from '@emotion/css';
|
||||
|
||||
import { FieldDisplay, GrafanaTheme2, FieldConfig } from '@grafana/data';
|
||||
import { GraphFieldConfig, GraphGradientMode, LineInterpolation } from '@grafana/schema';
|
||||
|
||||
import { Sparkline } from '../Sparkline/Sparkline';
|
||||
|
||||
import { RadialShape } from './RadialGauge';
|
||||
import { RadialShape, RadialTextMode } from './RadialGauge';
|
||||
import { GaugeDimensions } from './utils';
|
||||
|
||||
interface RadialSparklineProps {
|
||||
@@ -14,23 +12,22 @@ interface RadialSparklineProps {
|
||||
theme: GrafanaTheme2;
|
||||
color?: string;
|
||||
shape?: RadialShape;
|
||||
textMode: Exclude<RadialTextMode, 'auto'>;
|
||||
}
|
||||
export function RadialSparkline({ sparkline, dimensions, theme, color, shape }: RadialSparklineProps) {
|
||||
export function RadialSparkline({ sparkline, dimensions, theme, color, shape, textMode }: RadialSparklineProps) {
|
||||
const { radius, barWidth } = dimensions;
|
||||
|
||||
if (!sparkline) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const { radius, barWidth } = dimensions;
|
||||
|
||||
const height = radius / 4;
|
||||
const widthFactor = shape === 'gauge' ? 1.6 : 1.4;
|
||||
const width = radius * widthFactor - barWidth;
|
||||
const topPos = shape === 'gauge' ? `${dimensions.gaugeBottomY - height}px` : `calc(50% + ${radius / 2.8}px)`;
|
||||
|
||||
const styles = css({
|
||||
position: 'absolute',
|
||||
top: topPos,
|
||||
});
|
||||
const showNameAndValue = textMode === 'value_and_name';
|
||||
const height = radius / (showNameAndValue ? 4 : 3);
|
||||
const width = radius * (shape === 'gauge' ? 1.6 : 1.4) - barWidth;
|
||||
const topPos =
|
||||
shape === 'gauge'
|
||||
? `${dimensions.gaugeBottomY - height}px`
|
||||
: `calc(50% + ${radius / (showNameAndValue ? 3.3 : 4)}px)`;
|
||||
|
||||
const config: FieldConfig<GraphFieldConfig> = {
|
||||
color: {
|
||||
@@ -45,7 +42,7 @@ export function RadialSparkline({ sparkline, dimensions, theme, color, shape }:
|
||||
};
|
||||
|
||||
return (
|
||||
<div className={styles}>
|
||||
<div style={{ position: 'absolute', top: topPos }}>
|
||||
<Sparkline height={height} width={width} sparkline={sparkline} theme={theme} config={config} />
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
import { css } from '@emotion/css';
|
||||
|
||||
import { DisplayValue, DisplayValueAlignmentFactors, formattedValueToString, GrafanaTheme2 } from '@grafana/data';
|
||||
import {
|
||||
DisplayValue,
|
||||
DisplayValueAlignmentFactors,
|
||||
FieldSparkline,
|
||||
formattedValueToString,
|
||||
GrafanaTheme2,
|
||||
} from '@grafana/data';
|
||||
|
||||
import { useStyles2 } from '../../themes/ThemeContext';
|
||||
import { calculateFontSize } from '../../utils/measureText';
|
||||
@@ -8,21 +14,13 @@ import { calculateFontSize } from '../../utils/measureText';
|
||||
import { RadialShape, RadialTextMode } from './RadialGauge';
|
||||
import { GaugeDimensions } from './utils';
|
||||
|
||||
// function toCartesian(centerX: number, centerY: number, radius: number, angleInDegrees: number) {
|
||||
// let radian = ((angleInDegrees - 90) * Math.PI) / 180.0;
|
||||
// return {
|
||||
// x: centerX + radius * Math.cos(radian),
|
||||
// y: centerY + radius * Math.sin(radian),
|
||||
// };
|
||||
// }
|
||||
|
||||
interface RadialTextProps {
|
||||
displayValue: DisplayValue;
|
||||
theme: GrafanaTheme2;
|
||||
dimensions: GaugeDimensions;
|
||||
textMode: RadialTextMode;
|
||||
vizCount: number;
|
||||
textMode: Exclude<RadialTextMode, 'auto'>;
|
||||
shape: RadialShape;
|
||||
sparkline?: FieldSparkline;
|
||||
alignmentFactors?: DisplayValueAlignmentFactors;
|
||||
valueManualFontSize?: number;
|
||||
nameManualFontSize?: number;
|
||||
@@ -33,8 +31,8 @@ export function RadialText({
|
||||
theme,
|
||||
dimensions,
|
||||
textMode,
|
||||
vizCount,
|
||||
shape,
|
||||
sparkline,
|
||||
alignmentFactors,
|
||||
valueManualFontSize,
|
||||
nameManualFontSize,
|
||||
@@ -46,10 +44,6 @@ export function RadialText({
|
||||
return null;
|
||||
}
|
||||
|
||||
if (textMode === 'auto') {
|
||||
textMode = vizCount === 1 ? 'value' : 'value_and_name';
|
||||
}
|
||||
|
||||
const nameToAlignTo = (alignmentFactors ? alignmentFactors.title : displayValue.title) ?? '';
|
||||
const valueToAlignTo = formattedValueToString(alignmentFactors ? alignmentFactors : displayValue);
|
||||
|
||||
@@ -59,7 +53,7 @@ export function RadialText({
|
||||
|
||||
// Not sure where this comes from but svg text is not using body line-height
|
||||
const lineHeight = 1.21;
|
||||
const valueWidthToRadiusFactor = 0.85;
|
||||
const valueWidthToRadiusFactor = 0.82;
|
||||
const nameToHeightFactor = 0.45;
|
||||
const largeRadiusScalingDecay = 0.86;
|
||||
|
||||
@@ -98,18 +92,23 @@ export function RadialText({
|
||||
const valueHeight = valueFontSize * lineHeight;
|
||||
const nameHeight = nameFontSize * lineHeight;
|
||||
|
||||
const valueY = showName ? centerY - nameHeight / 2 : centerY;
|
||||
const valueNameSpacing = valueHeight / 3.5;
|
||||
const nameY = showValue ? valueY + valueHeight / 2 + valueNameSpacing : centerY;
|
||||
const valueY = showName ? centerY - nameHeight * 0.3 : centerY;
|
||||
const nameY = showValue ? valueY + valueHeight * 0.7 : centerY;
|
||||
const nameColor = showValue ? theme.colors.text.secondary : theme.colors.text.primary;
|
||||
const suffixShift = (valueFontSize - unitFontSize * 1.2) / 2;
|
||||
|
||||
// For gauge shape we shift text up a bit
|
||||
const valueDy = shape === 'gauge' ? -valueFontSize * 0.3 : 0;
|
||||
const nameDy = shape === 'gauge' ? -nameFontSize * 0.7 : 0;
|
||||
// adjust the text up on gauges and when sparklines are present
|
||||
let yOffset = 0;
|
||||
if (shape === 'gauge') {
|
||||
// we render from the center of the gauge, so move up by half of half of the total height
|
||||
yOffset -= (valueHeight + nameHeight) / 4;
|
||||
}
|
||||
if (sparkline) {
|
||||
yOffset -= 8;
|
||||
}
|
||||
|
||||
return (
|
||||
<g>
|
||||
<g transform={`translate(0, ${yOffset})`}>
|
||||
{showValue && (
|
||||
<text
|
||||
x={centerX}
|
||||
@@ -119,7 +118,6 @@ export function RadialText({
|
||||
className={styles.text}
|
||||
textAnchor="middle"
|
||||
dominantBaseline="middle"
|
||||
dy={valueDy}
|
||||
>
|
||||
<tspan fontSize={unitFontSize}>{displayValue.prefix ?? ''}</tspan>
|
||||
<tspan>{displayValue.text}</tspan>
|
||||
@@ -133,7 +131,6 @@ export function RadialText({
|
||||
fontSize={nameFontSize}
|
||||
x={centerX}
|
||||
y={nameY}
|
||||
dy={nameDy}
|
||||
textAnchor="middle"
|
||||
dominantBaseline="middle"
|
||||
fill={nameColor}
|
||||
|
||||
@@ -4,11 +4,12 @@ import { GaugeDimensions } from './utils';
|
||||
|
||||
export interface GlowGradientProps {
|
||||
id: string;
|
||||
radius: number;
|
||||
barWidth: number;
|
||||
}
|
||||
|
||||
export function GlowGradient({ id, radius }: GlowGradientProps) {
|
||||
const glowSize = 0.02 * radius;
|
||||
export function GlowGradient({ id, barWidth }: GlowGradientProps) {
|
||||
// 0.75 is the minimum glow size, and it scales with bar width
|
||||
const glowSize = 0.75 + barWidth * 0.08;
|
||||
|
||||
return (
|
||||
<filter id={id} filterUnits="userSpaceOnUse">
|
||||
@@ -82,7 +83,7 @@ export function MiddleCircleGlow({ dimensions, gaugeId, color }: CenterGlowProps
|
||||
<>
|
||||
<defs>
|
||||
<radialGradient id={gradientId} r={'50%'} fr={'0%'}>
|
||||
<stop offset="0%" stopColor={color} stopOpacity={0.2} />
|
||||
<stop offset="0%" stopColor={color} stopOpacity={0.15} />
|
||||
<stop offset="90%" stopColor={color} stopOpacity={0} />
|
||||
</radialGradient>
|
||||
</defs>
|
||||
|
||||
@@ -16,7 +16,7 @@ export interface SparklineProps extends Themeable2 {
|
||||
sparkline: FieldSparkline;
|
||||
}
|
||||
|
||||
export const Sparkline: React.FC<SparklineProps> = memo((props) => {
|
||||
const SparklineFn: React.FC<SparklineProps> = memo((props) => {
|
||||
const { sparkline, config: fieldConfig, theme, width, height } = props;
|
||||
|
||||
const { frame: alignedDataFrame, warning } = prepareSeries(sparkline, fieldConfig);
|
||||
@@ -30,4 +30,14 @@ export const Sparkline: React.FC<SparklineProps> = memo((props) => {
|
||||
return <UPlotChart data={data} config={configBuilder} width={width} height={height} />;
|
||||
});
|
||||
|
||||
Sparkline.displayName = 'Sparkline';
|
||||
SparklineFn.displayName = 'Sparkline';
|
||||
|
||||
// we converted to function component above, but some apps extend Sparkline, so we need
|
||||
// to keep exporting a class component until those apps are all rolled out.
|
||||
// see https://github.com/grafana/app-observability-plugin/pull/2079
|
||||
// eslint-disable-next-line react-prefer-function-component/react-prefer-function-component
|
||||
export class Sparkline extends React.PureComponent<SparklineProps> {
|
||||
render() {
|
||||
return <SparklineFn {...this.props} />;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,7 +119,14 @@ describe('Get y range', () => {
|
||||
values: [2, 1.999999999999999, 2.000000000000001, 2, 2],
|
||||
type: FieldType.number,
|
||||
config: {},
|
||||
state: { range: { min: 1.999999999999999, max: 2.000000000000001, delta: 0 } },
|
||||
state: { range: { min: 1.9999999999999999999, max: 2.000000000000000001, delta: 0 } },
|
||||
};
|
||||
const decimalsNotCloseYField: Field = {
|
||||
name: 'y',
|
||||
values: [2, 0.0094, 0.0053, 0.0078, 0.0061],
|
||||
type: FieldType.number,
|
||||
config: {},
|
||||
state: { range: { min: 0.0053, max: 0.0094, delta: 0.0041 } },
|
||||
};
|
||||
const xField: Field = {
|
||||
name: 'x',
|
||||
@@ -183,6 +190,11 @@ describe('Get y range', () => {
|
||||
field: decimalsCloseYField,
|
||||
expected: [2, 4],
|
||||
},
|
||||
{
|
||||
description: 'decimal values which are not close to equal should not be rounded out',
|
||||
field: decimalsNotCloseYField,
|
||||
expected: [0.0053, 0.0094],
|
||||
},
|
||||
])(`should return correct range for $description`, ({ field, expected }) => {
|
||||
const actual = getYRange(getAlignedFrame(field));
|
||||
expect(actual).toEqual(expected);
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
FieldType,
|
||||
getFieldColorModeForField,
|
||||
GrafanaTheme2,
|
||||
guessDecimals,
|
||||
isLikelyAscendingVector,
|
||||
nullToValue,
|
||||
roundDecimals,
|
||||
@@ -76,8 +77,6 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
|
||||
min = Math.min(min!, field.config.min ?? Infinity);
|
||||
max = Math.max(max!, field.config.max ?? -Infinity);
|
||||
|
||||
// console.log({ min, max });
|
||||
|
||||
// if noValue is set, ensure that it is included in the range as well
|
||||
const noValue = +field.config?.noValue!;
|
||||
if (!Number.isNaN(noValue)) {
|
||||
@@ -85,9 +84,11 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
|
||||
max = Math.max(max, noValue);
|
||||
}
|
||||
|
||||
const decimals = field.config.decimals ?? Math.max(guessDecimals(min), guessDecimals(max));
|
||||
|
||||
// call roundDecimals to mirror what is going to eventually happen in uplot
|
||||
let roundedMin = roundDecimals(min, field.config.decimals ?? 0);
|
||||
let roundedMax = roundDecimals(max, field.config.decimals ?? 0);
|
||||
let roundedMin = roundDecimals(min, decimals);
|
||||
let roundedMax = roundDecimals(max, decimals);
|
||||
|
||||
// if the rounded min and max are different,
|
||||
// we can return the real min and max.
|
||||
@@ -102,11 +103,9 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
|
||||
roundedMax = 1;
|
||||
} else if (roundedMin < 0) {
|
||||
// both are negative
|
||||
// max = 0;
|
||||
roundedMin *= 2;
|
||||
} else {
|
||||
// both are positive
|
||||
// min = 0;
|
||||
roundedMax *= 2;
|
||||
}
|
||||
|
||||
|
||||
@@ -154,8 +154,18 @@ export function TableNG(props: TableNGProps) {
|
||||
|
||||
const resizeHandler = useColumnResize(onColumnResize);
|
||||
|
||||
const rows = useMemo(() => frameToRecords(data), [data]);
|
||||
const hasNestedFrames = useMemo(() => getIsNestedTable(data.fields), [data]);
|
||||
const nestedFramesFieldName = useMemo(() => {
|
||||
if (!hasNestedFrames) {
|
||||
return;
|
||||
}
|
||||
const firstNestedField = data.fields.find((f) => f.type === FieldType.nestedFrames);
|
||||
if (!firstNestedField) {
|
||||
return;
|
||||
}
|
||||
return getDisplayName(firstNestedField);
|
||||
}, [data, hasNestedFrames]);
|
||||
const rows = useMemo(() => frameToRecords(data, nestedFramesFieldName), [data, nestedFramesFieldName]);
|
||||
const getTextColorForBackground = useMemo(() => memoize(_getTextColorForBackground, { maxSize: 1000 }), []);
|
||||
|
||||
const {
|
||||
@@ -374,7 +384,11 @@ export function TableNG(props: TableNGProps) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const expandedRecords = applySort(frameToRecords(nestedData), nestedData.fields, sortColumns);
|
||||
const expandedRecords = applySort(
|
||||
frameToRecords(nestedData, nestedFramesFieldName),
|
||||
nestedData.fields,
|
||||
sortColumns
|
||||
);
|
||||
if (!expandedRecords.length) {
|
||||
return (
|
||||
<div className={styles.noDataNested}>
|
||||
@@ -398,7 +412,7 @@ export function TableNG(props: TableNGProps) {
|
||||
width: COLUMN.EXPANDER_WIDTH,
|
||||
minWidth: COLUMN.EXPANDER_WIDTH,
|
||||
}),
|
||||
[commonDataGridProps, data.fields.length, expandedRows, sortColumns, styles]
|
||||
[commonDataGridProps, data.fields.length, expandedRows, sortColumns, styles, nestedFramesFieldName]
|
||||
);
|
||||
|
||||
const fromFields = useCallback(
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { css } from '@emotion/css';
|
||||
|
||||
import { GrafanaTheme2 } from '@grafana/data';
|
||||
import { selectors } from '@grafana/e2e-selectors';
|
||||
import { t } from '@grafana/i18n';
|
||||
|
||||
import { useStyles2 } from '../../../../themes/ThemeContext';
|
||||
@@ -16,13 +17,21 @@ export function RowExpander({ onCellExpand, isExpanded }: RowExpanderNGProps) {
|
||||
}
|
||||
}
|
||||
return (
|
||||
<div role="button" tabIndex={0} className={styles.expanderCell} onClick={onCellExpand} onKeyDown={handleKeyDown}>
|
||||
<div
|
||||
role="button"
|
||||
tabIndex={0}
|
||||
className={styles.expanderCell}
|
||||
onClick={onCellExpand}
|
||||
onKeyDown={handleKeyDown}
|
||||
data-testid={selectors.components.Panels.Visualization.TableNG.RowExpander}
|
||||
>
|
||||
<Icon
|
||||
aria-label={
|
||||
isExpanded
|
||||
? t('grafana-ui.row-expander-ng.aria-label-collapse', 'Collapse row')
|
||||
: t('grafana-ui.row-expander.aria-label-expand', 'Expand row')
|
||||
}
|
||||
aria-expanded={isExpanded}
|
||||
name={isExpanded ? 'angle-down' : 'angle-right'}
|
||||
size="lg"
|
||||
/>
|
||||
|
||||
@@ -79,7 +79,6 @@ export interface TableRow {
|
||||
|
||||
// Nested table properties
|
||||
data?: DataFrame;
|
||||
__nestedFrames?: DataFrame[];
|
||||
__expanded?: boolean; // For row expansion state
|
||||
|
||||
// Generic typing for column values
|
||||
@@ -262,7 +261,7 @@ export type TableCellStyles = (theme: GrafanaTheme2, options: TableCellStyleOpti
|
||||
export type Comparator = (a: TableCellValue, b: TableCellValue) => number;
|
||||
|
||||
// Type for converting a DataFrame into an array of TableRows
|
||||
export type FrameToRowsConverter = (frame: DataFrame) => TableRow[];
|
||||
export type FrameToRowsConverter = (frame: DataFrame, nestedFramesFieldName?: string) => TableRow[];
|
||||
|
||||
// Type for mapping column names to their field types
|
||||
export type ColumnTypes = Record<string, FieldType>;
|
||||
|
||||
@@ -675,10 +675,12 @@ export function applySort(
|
||||
/**
|
||||
* @internal
|
||||
*/
|
||||
export const frameToRecords = (frame: DataFrame): TableRow[] => {
|
||||
export const frameToRecords = (frame: DataFrame, nestedFramesFieldName?: string): TableRow[] => {
|
||||
const fnBody = `
|
||||
const rows = Array(frame.length);
|
||||
const values = frame.fields.map(f => f.values);
|
||||
const hasNestedFrames = '${nestedFramesFieldName ?? ''}'.length > 0;
|
||||
|
||||
let rowCount = 0;
|
||||
for (let i = 0; i < frame.length; i++) {
|
||||
rows[rowCount] = {
|
||||
@@ -686,11 +688,14 @@ export const frameToRecords = (frame: DataFrame): TableRow[] => {
|
||||
__index: i,
|
||||
${frame.fields.map((field, fieldIdx) => `${JSON.stringify(getDisplayName(field))}: values[${fieldIdx}][i]`).join(',')}
|
||||
};
|
||||
rowCount += 1;
|
||||
if (rows[rowCount-1]['__nestedFrames']){
|
||||
const childFrame = rows[rowCount-1]['__nestedFrames'];
|
||||
rows[rowCount] = {__depth: 1, __index: i, data: childFrame[0]}
|
||||
rowCount += 1;
|
||||
rowCount++;
|
||||
|
||||
if (hasNestedFrames) {
|
||||
const childFrame = rows[rowCount-1][${JSON.stringify(nestedFramesFieldName)}];
|
||||
if (childFrame){
|
||||
rows[rowCount] = {__depth: 1, __index: i, data: childFrame[0]}
|
||||
rowCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return rows;
|
||||
@@ -698,8 +703,9 @@ export const frameToRecords = (frame: DataFrame): TableRow[] => {
|
||||
|
||||
// Creates a function that converts a DataFrame into an array of TableRows
|
||||
// Uses new Function() for performance as it's faster than creating rows using loops
|
||||
const convert = new Function('frame', fnBody) as FrameToRowsConverter;
|
||||
return convert(frame);
|
||||
// eslint-disable-next-line @typescript-eslint/consistent-type-assertions
|
||||
const convert = new Function('frame', 'nestedFramesFieldName', fnBody) as FrameToRowsConverter;
|
||||
return convert(frame, nestedFramesFieldName);
|
||||
};
|
||||
|
||||
/* ----------------------------- Data grid comparator ---------------------------- */
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
package auditing
|
||||
|
||||
import (
|
||||
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
)
|
||||
|
||||
// NoopBackend is a no-op implementation of audit.Backend
|
||||
type NoopBackend struct{}
|
||||
|
||||
func ProvideNoopBackend() audit.Backend { return &NoopBackend{} }
|
||||
|
||||
func (b *NoopBackend) ProcessEvents(k8sEvents ...*auditinternal.Event) bool { return false }
|
||||
|
||||
func (NoopBackend) Run(stopCh <-chan struct{}) error { return nil }
|
||||
|
||||
func (NoopBackend) Shutdown() {}
|
||||
|
||||
func (NoopBackend) String() string { return "" }
|
||||
|
||||
// NoopPolicyRuleEvaluator is a no-op implementation of audit.PolicyRuleEvaluator
|
||||
type NoopPolicyRuleEvaluator struct{}
|
||||
|
||||
func ProvideNoopPolicyRuleEvaluator() audit.PolicyRuleEvaluator { return &NoopPolicyRuleEvaluator{} }
|
||||
|
||||
func (NoopPolicyRuleEvaluator) EvaluatePolicyRule(authorizer.Attributes) audit.RequestAuditConfig {
|
||||
return audit.RequestAuditConfig{Level: auditinternal.LevelNone}
|
||||
}
|
||||
@@ -61,20 +61,24 @@ func (s *legacyStorage) List(ctx context.Context, options *internalversion.ListO
|
||||
}
|
||||
|
||||
func (s *legacyStorage) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Get"), time.Since(start).Seconds())
|
||||
}()
|
||||
if s.dsConfigHandlerRequestsDuration != nil {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Get"), time.Since(start).Seconds())
|
||||
}()
|
||||
}
|
||||
|
||||
return s.datasources.GetDataSource(ctx, name)
|
||||
}
|
||||
|
||||
// Create implements rest.Creater.
|
||||
func (s *legacyStorage) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
|
||||
}()
|
||||
if s.dsConfigHandlerRequestsDuration != nil {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
|
||||
}()
|
||||
}
|
||||
|
||||
ds, ok := obj.(*v0alpha1.DataSource)
|
||||
if !ok {
|
||||
@@ -85,10 +89,12 @@ func (s *legacyStorage) Create(ctx context.Context, obj runtime.Object, createVa
|
||||
|
||||
// Update implements rest.Updater.
|
||||
func (s *legacyStorage) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
|
||||
}()
|
||||
if s.dsConfigHandlerRequestsDuration != nil {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
|
||||
}()
|
||||
}
|
||||
|
||||
old, err := s.Get(ctx, name, &metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -126,10 +132,12 @@ func (s *legacyStorage) Update(ctx context.Context, name string, objInfo rest.Up
|
||||
|
||||
// Delete implements rest.GracefulDeleter.
|
||||
func (s *legacyStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
|
||||
}()
|
||||
if s.dsConfigHandlerRequestsDuration != nil {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
|
||||
}()
|
||||
}
|
||||
|
||||
err := s.datasources.DeleteDataSource(ctx, name)
|
||||
return nil, false, err
|
||||
|
||||
@@ -3,6 +3,7 @@ package datasource
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
|
||||
@@ -38,14 +39,14 @@ var (
|
||||
// DataSourceAPIBuilder is used just so wire has something unique to return
|
||||
type DataSourceAPIBuilder struct {
|
||||
datasourceResourceInfo utils.ResourceInfo
|
||||
|
||||
pluginJSON plugins.JSONData
|
||||
client PluginClient // will only ever be called with the same plugin id!
|
||||
datasources PluginDatasourceProvider
|
||||
contextProvider PluginContextWrapper
|
||||
accessControl accesscontrol.AccessControl
|
||||
queryTypes *queryV0.QueryTypeDefinitionList
|
||||
configCrudUseNewApis bool
|
||||
pluginJSON plugins.JSONData
|
||||
client PluginClient // will only ever be called with the same plugin id!
|
||||
datasources PluginDatasourceProvider
|
||||
contextProvider PluginContextWrapper
|
||||
accessControl accesscontrol.AccessControl
|
||||
queryTypes *queryV0.QueryTypeDefinitionList
|
||||
configCrudUseNewApis bool
|
||||
dataSourceCRUDMetric *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
func RegisterAPIService(
|
||||
@@ -66,6 +67,16 @@ func RegisterAPIService(
|
||||
var err error
|
||||
var builder *DataSourceAPIBuilder
|
||||
|
||||
dataSourceCRUDMetric := metricutil.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: "grafana",
|
||||
Name: "ds_config_handler_requests_duration_seconds",
|
||||
Help: "Duration of requests handled by datasource configuration handlers",
|
||||
}, []string{"code_path", "handler"})
|
||||
regErr := reg.Register(dataSourceCRUDMetric)
|
||||
if regErr != nil && !errors.As(regErr, &prometheus.AlreadyRegisteredError{}) {
|
||||
return nil, regErr
|
||||
}
|
||||
|
||||
pluginJSONs, err := getDatasourcePlugins(pluginSources)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting list of datasource plugins: %s", err)
|
||||
@@ -91,6 +102,7 @@ func RegisterAPIService(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.SetDataSourceCRUDMetrics(dataSourceCRUDMetric)
|
||||
|
||||
apiRegistrar.RegisterAPI(builder)
|
||||
}
|
||||
@@ -161,6 +173,10 @@ func (b *DataSourceAPIBuilder) GetGroupVersion() schema.GroupVersion {
|
||||
return b.datasourceResourceInfo.GroupVersion()
|
||||
}
|
||||
|
||||
func (b *DataSourceAPIBuilder) SetDataSourceCRUDMetrics(datasourceCRUDMetric *prometheus.HistogramVec) {
|
||||
b.dataSourceCRUDMetric = datasourceCRUDMetric
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme, gv schema.GroupVersion) {
|
||||
scheme.AddKnownTypes(gv,
|
||||
&datasourceV0.DataSource{},
|
||||
@@ -218,13 +234,9 @@ func (b *DataSourceAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver
|
||||
|
||||
if b.configCrudUseNewApis {
|
||||
legacyStore := &legacyStorage{
|
||||
datasources: b.datasources,
|
||||
resourceInfo: &ds,
|
||||
dsConfigHandlerRequestsDuration: metricutil.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: "grafana",
|
||||
Name: "ds_config_handler_requests_duration_seconds",
|
||||
Help: "Duration of requests handled by datasource configuration handlers",
|
||||
}, []string{"code_path", "handler"}),
|
||||
datasources: b.datasources,
|
||||
resourceInfo: &ds,
|
||||
dsConfigHandlerRequestsDuration: b.dataSourceCRUDMetric,
|
||||
}
|
||||
unified, err := grafanaregistry.NewRegistryStore(opts.Scheme, ds, opts.OptsGetter)
|
||||
if err != nil {
|
||||
|
||||
@@ -0,0 +1,164 @@
|
||||
package authorizer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/grafana/authlib/authn"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
dashboardv1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1"
|
||||
folderv1 "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1"
|
||||
"github.com/grafana/grafana/apps/provisioning/pkg/auth"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoConfigProvider = errors.New("no config provider for group resource")
|
||||
ErrNoVersionInfo = errors.New("no version info for group resource")
|
||||
|
||||
Versions = map[schema.GroupResource]string{
|
||||
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: folderv1.VERSION,
|
||||
{Group: dashboardv1.GROUP, Resource: dashboardv1.DASHBOARD_RESOURCE}: dashboardv1.VERSION,
|
||||
}
|
||||
)
|
||||
|
||||
// ConfigProvider is a function that provides a rest.Config for a given context.
|
||||
type ConfigProvider func(ctx context.Context) (*rest.Config, error)
|
||||
|
||||
// DynamicClientFactory is a function that creates a dynamic.Interface from a rest.Config.
|
||||
// This can be overridden in tests.
|
||||
type DynamicClientFactory func(config *rest.Config) (dynamic.Interface, error)
|
||||
|
||||
// ParentProvider implementation that fetches the parent folder information from remote API servers.
|
||||
type ParentProviderImpl struct {
|
||||
configProviders map[schema.GroupResource]ConfigProvider
|
||||
versions map[schema.GroupResource]string
|
||||
dynamicClientFactory DynamicClientFactory
|
||||
|
||||
// Cache of dynamic clients for each group resource
|
||||
// This is used to avoid creating a new dynamic client for each request
|
||||
// and to reuse the same client for the same group resource.
|
||||
clients map[schema.GroupResource]dynamic.Interface
|
||||
clientsMu sync.Mutex
|
||||
}
|
||||
|
||||
// DialConfig holds the configuration for dialing a remote API server.
|
||||
type DialConfig struct {
|
||||
Host string
|
||||
Insecure bool
|
||||
CAFile string
|
||||
Audience string
|
||||
}
|
||||
|
||||
// NewLocalConfigProvider creates a map of ConfigProviders that return the same given config for local API servers.
|
||||
func NewLocalConfigProvider(
|
||||
configProvider ConfigProvider,
|
||||
) map[schema.GroupResource]ConfigProvider {
|
||||
return map[schema.GroupResource]ConfigProvider{
|
||||
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: configProvider,
|
||||
{Group: dashboardv1.GROUP, Resource: dashboardv1.DASHBOARD_RESOURCE}: configProvider,
|
||||
}
|
||||
}
|
||||
|
||||
// NewRemoteConfigProvider creates a map of ConfigProviders for remote API servers based on the given DialConfig.
|
||||
func NewRemoteConfigProvider(cfg map[schema.GroupResource]DialConfig, exchangeClient authn.TokenExchanger) map[schema.GroupResource]ConfigProvider {
|
||||
configProviders := make(map[schema.GroupResource]ConfigProvider, len(cfg))
|
||||
for gr, dialConfig := range cfg {
|
||||
configProviders[gr] = func(ctx context.Context) (*rest.Config, error) {
|
||||
return &rest.Config{
|
||||
Host: dialConfig.Host,
|
||||
WrapTransport: func(rt http.RoundTripper) http.RoundTripper {
|
||||
return auth.NewRoundTripper(exchangeClient, rt, dialConfig.Audience)
|
||||
},
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
Insecure: dialConfig.Insecure,
|
||||
CAFile: dialConfig.CAFile,
|
||||
},
|
||||
QPS: 50,
|
||||
Burst: 100,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return configProviders
|
||||
}
|
||||
|
||||
// NewApiParentProvider creates a new ParentProviderImpl with the given config providers and version info.
|
||||
func NewApiParentProvider(
|
||||
configProviders map[schema.GroupResource]ConfigProvider,
|
||||
version map[schema.GroupResource]string,
|
||||
) *ParentProviderImpl {
|
||||
return &ParentProviderImpl{
|
||||
configProviders: configProviders,
|
||||
versions: version,
|
||||
dynamicClientFactory: func(config *rest.Config) (dynamic.Interface, error) {
|
||||
return dynamic.NewForConfig(config)
|
||||
},
|
||||
clients: make(map[schema.GroupResource]dynamic.Interface),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ParentProviderImpl) HasParent(gr schema.GroupResource) bool {
|
||||
_, ok := p.configProviders[gr]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (p *ParentProviderImpl) getClient(ctx context.Context, gr schema.GroupResource) (dynamic.Interface, error) {
|
||||
p.clientsMu.Lock()
|
||||
client, ok := p.clients[gr]
|
||||
p.clientsMu.Unlock()
|
||||
|
||||
if ok {
|
||||
return client, nil
|
||||
}
|
||||
|
||||
provider, ok := p.configProviders[gr]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %s", ErrNoConfigProvider, gr.String())
|
||||
}
|
||||
restConfig, err := provider(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err = p.dynamicClientFactory(restConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.clientsMu.Lock()
|
||||
p.clients[gr] = client
|
||||
p.clientsMu.Unlock()
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (p *ParentProviderImpl) GetParent(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error) {
|
||||
client, err := p.getClient(ctx, gr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
version, ok := p.versions[gr]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("%w: %s", ErrNoVersionInfo, gr.String())
|
||||
}
|
||||
resourceClient := client.Resource(schema.GroupVersionResource{
|
||||
Group: gr.Group,
|
||||
Resource: gr.Resource,
|
||||
Version: version,
|
||||
}).Namespace(namespace)
|
||||
|
||||
unstructObj, err := resourceClient.Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return unstructObj.GetAnnotations()[utils.AnnoKeyFolder], nil
|
||||
}
|
||||
@@ -0,0 +1,198 @@
|
||||
package authorizer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
folderv1 "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
)
|
||||
|
||||
var configProvider = func(ctx context.Context) (*rest.Config, error) {
|
||||
return &rest.Config{}, nil
|
||||
}
|
||||
|
||||
func TestParentProviderImpl_GetParent(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
gr schema.GroupResource
|
||||
namespace string
|
||||
resourceName string
|
||||
parentFolder string
|
||||
setupFake func(*fakeDynamicClient, *fakeResourceInterface)
|
||||
configProviders map[schema.GroupResource]ConfigProvider
|
||||
versions map[schema.GroupResource]string
|
||||
expectedError string
|
||||
expectedParent string
|
||||
}{
|
||||
{
|
||||
name: "successfully get parent folder",
|
||||
gr: schema.GroupResource{Group: folderv1.GROUP, Resource: folderv1.RESOURCE},
|
||||
namespace: "org-1",
|
||||
resourceName: "dash1",
|
||||
parentFolder: "fold1",
|
||||
setupFake: func(fakeClient *fakeDynamicClient, fakeResource *fakeResourceInterface) {
|
||||
fakeClient.resourceInterface = fakeResource
|
||||
fakeResource.getFunc = func(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
|
||||
obj := &unstructured.Unstructured{}
|
||||
obj.SetAnnotations(map[string]string{utils.AnnoKeyFolder: "fold1"})
|
||||
return obj, nil
|
||||
}
|
||||
},
|
||||
configProviders: map[schema.GroupResource]ConfigProvider{
|
||||
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: configProvider,
|
||||
},
|
||||
versions: Versions,
|
||||
expectedParent: "fold1",
|
||||
},
|
||||
{
|
||||
name: "resource without parent annotation returns empty",
|
||||
gr: schema.GroupResource{Group: folderv1.GROUP, Resource: folderv1.RESOURCE},
|
||||
namespace: "org-1",
|
||||
resourceName: "dash1",
|
||||
setupFake: func(fakeClient *fakeDynamicClient, fakeResource *fakeResourceInterface) {
|
||||
fakeClient.resourceInterface = fakeResource
|
||||
fakeResource.getFunc = func(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
|
||||
obj := &unstructured.Unstructured{}
|
||||
obj.SetAnnotations(map[string]string{})
|
||||
return obj, nil
|
||||
}
|
||||
},
|
||||
configProviders: map[schema.GroupResource]ConfigProvider{
|
||||
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: configProvider,
|
||||
},
|
||||
versions: Versions,
|
||||
expectedParent: "",
|
||||
},
|
||||
{
|
||||
name: "no config provider returns error",
|
||||
gr: schema.GroupResource{Group: "unknown.group", Resource: "unknown"},
|
||||
namespace: "org-1",
|
||||
resourceName: "resource-1",
|
||||
configProviders: map[schema.GroupResource]ConfigProvider{},
|
||||
versions: Versions,
|
||||
expectedError: ErrNoConfigProvider.Error(),
|
||||
},
|
||||
{
|
||||
name: "config provider returns error",
|
||||
gr: schema.GroupResource{Group: folderv1.GROUP, Resource: folderv1.RESOURCE},
|
||||
namespace: "org-1",
|
||||
resourceName: "resource-1",
|
||||
configProviders: map[schema.GroupResource]ConfigProvider{
|
||||
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: func(ctx context.Context) (*rest.Config, error) {
|
||||
return nil, errors.New("config provider error")
|
||||
},
|
||||
},
|
||||
versions: Versions,
|
||||
expectedError: "config provider error",
|
||||
},
|
||||
{
|
||||
name: "no version info returns error",
|
||||
gr: schema.GroupResource{Group: folderv1.GROUP, Resource: folderv1.RESOURCE},
|
||||
namespace: "org-1",
|
||||
resourceName: "resource-1",
|
||||
configProviders: map[schema.GroupResource]ConfigProvider{
|
||||
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: func(ctx context.Context) (*rest.Config, error) {
|
||||
return &rest.Config{}, nil
|
||||
},
|
||||
},
|
||||
versions: map[schema.GroupResource]string{},
|
||||
expectedError: ErrNoVersionInfo.Error(),
|
||||
},
|
||||
{
|
||||
name: "resource get returns error",
|
||||
gr: schema.GroupResource{Group: folderv1.GROUP, Resource: folderv1.RESOURCE},
|
||||
namespace: "org-1",
|
||||
resourceName: "resource-1",
|
||||
setupFake: func(fakeClient *fakeDynamicClient, fakeResource *fakeResourceInterface) {
|
||||
fakeClient.resourceInterface = fakeResource
|
||||
fakeResource.getFunc = func(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
|
||||
return nil, errors.New("resource not found")
|
||||
}
|
||||
},
|
||||
configProviders: map[schema.GroupResource]ConfigProvider{
|
||||
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: func(ctx context.Context) (*rest.Config, error) {
|
||||
return &rest.Config{}, nil
|
||||
},
|
||||
},
|
||||
versions: Versions,
|
||||
expectedError: "resource not found",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient := &fakeDynamicClient{}
|
||||
fakeResource := &fakeResourceInterface{}
|
||||
if tt.setupFake != nil {
|
||||
tt.setupFake(fakeClient, fakeResource)
|
||||
}
|
||||
|
||||
provider := &ParentProviderImpl{
|
||||
configProviders: tt.configProviders,
|
||||
versions: tt.versions,
|
||||
dynamicClientFactory: func(config *rest.Config) (dynamic.Interface, error) {
|
||||
return fakeClient, nil
|
||||
},
|
||||
clients: make(map[schema.GroupResource]dynamic.Interface),
|
||||
}
|
||||
|
||||
parent, err := provider.GetParent(context.Background(), tt.gr, tt.namespace, tt.resourceName)
|
||||
|
||||
if tt.expectedError != "" {
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), tt.expectedError)
|
||||
assert.Empty(t, parent)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedParent, parent)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// fakeDynamicClient is a fake implementation of dynamic.Interface
|
||||
type fakeDynamicClient struct {
|
||||
resourceInterface dynamic.ResourceInterface
|
||||
}
|
||||
|
||||
func (f *fakeDynamicClient) Resource(resource schema.GroupVersionResource) dynamic.NamespaceableResourceInterface {
|
||||
return &fakeNamespaceableResourceInterface{
|
||||
resourceInterface: f.resourceInterface,
|
||||
}
|
||||
}
|
||||
|
||||
// fakeNamespaceableResourceInterface is a fake implementation of dynamic.NamespaceableResourceInterface
|
||||
type fakeNamespaceableResourceInterface struct {
|
||||
dynamic.NamespaceableResourceInterface
|
||||
resourceInterface dynamic.ResourceInterface
|
||||
}
|
||||
|
||||
func (f *fakeNamespaceableResourceInterface) Namespace(namespace string) dynamic.ResourceInterface {
|
||||
if f.resourceInterface != nil {
|
||||
return f.resourceInterface
|
||||
}
|
||||
return &fakeResourceInterface{}
|
||||
}
|
||||
|
||||
// fakeResourceInterface is a fake implementation of dynamic.ResourceInterface
|
||||
type fakeResourceInterface struct {
|
||||
dynamic.ResourceInterface
|
||||
getFunc func(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error)
|
||||
}
|
||||
|
||||
func (f *fakeResourceInterface) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
|
||||
if f.getFunc != nil {
|
||||
return f.getFunc(ctx, name, opts, subresources...)
|
||||
}
|
||||
return &unstructured.Unstructured{}, nil
|
||||
}
|
||||
@@ -10,24 +10,44 @@ import (
|
||||
|
||||
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/auth/authorizer/storewrapper"
|
||||
)
|
||||
|
||||
// TODO: Logs, Metrics, Traces?
|
||||
|
||||
// ParentProvider interface for fetching parent information of resources
|
||||
type ParentProvider interface {
|
||||
// HasParent checks if the given GroupResource has a parent folder
|
||||
HasParent(gr schema.GroupResource) bool
|
||||
// GetParent fetches the parent folder name for the given resource
|
||||
GetParent(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error)
|
||||
}
|
||||
|
||||
// ResourcePermissionsAuthorizer
|
||||
type ResourcePermissionsAuthorizer struct {
|
||||
accessClient types.AccessClient
|
||||
accessClient types.AccessClient
|
||||
parentProvider ParentProvider
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
var _ storewrapper.ResourceStorageAuthorizer = (*ResourcePermissionsAuthorizer)(nil)
|
||||
|
||||
func NewResourcePermissionsAuthorizer(accessClient types.AccessClient) *ResourcePermissionsAuthorizer {
|
||||
func NewResourcePermissionsAuthorizer(
|
||||
accessClient types.AccessClient,
|
||||
parentProvider ParentProvider,
|
||||
) *ResourcePermissionsAuthorizer {
|
||||
return &ResourcePermissionsAuthorizer{
|
||||
accessClient: accessClient,
|
||||
accessClient: accessClient,
|
||||
parentProvider: parentProvider,
|
||||
logger: log.New("iam.resource-permissions-authorizer"),
|
||||
}
|
||||
}
|
||||
|
||||
func isAccessPolicy(authInfo types.AuthInfo) bool {
|
||||
return types.IsIdentityType(authInfo.GetIdentityType(), types.TypeAccessPolicy)
|
||||
}
|
||||
|
||||
// AfterGet implements ResourceStorageAuthorizer.
|
||||
func (r *ResourcePermissionsAuthorizer) AfterGet(ctx context.Context, obj runtime.Object) error {
|
||||
authInfo, ok := types.AuthInfoFrom(ctx)
|
||||
@@ -37,9 +57,24 @@ func (r *ResourcePermissionsAuthorizer) AfterGet(ctx context.Context, obj runtim
|
||||
switch o := obj.(type) {
|
||||
case *iamv0.ResourcePermission:
|
||||
target := o.Spec.Resource
|
||||
targetGR := schema.GroupResource{Group: target.ApiGroup, Resource: target.Resource}
|
||||
|
||||
// TODO: Fetch the resource to retrieve its parent folder.
|
||||
parent := ""
|
||||
// Fetch the parent of the resource
|
||||
// Access Policies have global scope, so no parent check needed
|
||||
if !isAccessPolicy(authInfo) && r.parentProvider.HasParent(targetGR) {
|
||||
p, err := r.parentProvider.GetParent(ctx, targetGR, o.Namespace, target.Name)
|
||||
if err != nil {
|
||||
r.logger.Error("after get: error fetching parent", "error", err.Error(),
|
||||
"namespace", o.Namespace,
|
||||
"group", target.ApiGroup,
|
||||
"resource", target.Resource,
|
||||
"name", target.Name,
|
||||
)
|
||||
return err
|
||||
}
|
||||
parent = p
|
||||
}
|
||||
|
||||
checkReq := types.CheckRequest{
|
||||
Namespace: o.Namespace,
|
||||
@@ -72,9 +107,24 @@ func (r *ResourcePermissionsAuthorizer) beforeWrite(ctx context.Context, obj run
|
||||
switch o := obj.(type) {
|
||||
case *iamv0.ResourcePermission:
|
||||
target := o.Spec.Resource
|
||||
targetGR := schema.GroupResource{Group: target.ApiGroup, Resource: target.Resource}
|
||||
|
||||
// TODO: Fetch the resource to retrieve its parent folder.
|
||||
parent := ""
|
||||
// Fetch the parent of the resource
|
||||
// Access Policies have global scope, so no parent check needed
|
||||
if !isAccessPolicy(authInfo) && r.parentProvider.HasParent(targetGR) {
|
||||
p, err := r.parentProvider.GetParent(ctx, targetGR, o.Namespace, target.Name)
|
||||
if err != nil {
|
||||
r.logger.Error("before write: error fetching parent", "error", err.Error(),
|
||||
"namespace", o.Namespace,
|
||||
"group", target.ApiGroup,
|
||||
"resource", target.Resource,
|
||||
"name", target.Name,
|
||||
)
|
||||
return err
|
||||
}
|
||||
parent = p
|
||||
}
|
||||
|
||||
checkReq := types.CheckRequest{
|
||||
Namespace: o.Namespace,
|
||||
@@ -153,8 +203,29 @@ func (r *ResourcePermissionsAuthorizer) FilterList(ctx context.Context, list run
|
||||
canViewFuncs[gr] = canView
|
||||
}
|
||||
|
||||
// TODO : Fetch the resource to retrieve its parent folder.
|
||||
target := item.Spec.Resource
|
||||
targetGR := schema.GroupResource{Group: target.ApiGroup, Resource: target.Resource}
|
||||
|
||||
parent := ""
|
||||
// Fetch the parent of the resource
|
||||
// It's not efficient to do for every item in the list, but it's a good starting point.
|
||||
// Access Policies have global scope, so no parent check needed
|
||||
if !isAccessPolicy(authInfo) && r.parentProvider.HasParent(targetGR) {
|
||||
p, err := r.parentProvider.GetParent(ctx, targetGR, item.Namespace, target.Name)
|
||||
if err != nil {
|
||||
// Skip item on error fetching parent
|
||||
r.logger.Warn("filter list: error fetching parent, skipping item",
|
||||
"error", err.Error(),
|
||||
"namespace",
|
||||
item.Namespace,
|
||||
"group", target.ApiGroup,
|
||||
"resource", target.Resource,
|
||||
"name", target.Name,
|
||||
)
|
||||
continue
|
||||
}
|
||||
parent = p
|
||||
}
|
||||
|
||||
allowed := canView(item.Spec.Resource.Name, parent)
|
||||
if allowed {
|
||||
|
||||
@@ -5,13 +5,15 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/go-jose/go-jose/v4/jwt"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/grafana/authlib/authn"
|
||||
"github.com/grafana/authlib/types"
|
||||
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/identity"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -63,6 +65,7 @@ func TestResourcePermissions_AfterGet(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
parent := "fold-1"
|
||||
checkFunc := func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error) {
|
||||
require.NotNil(t, id)
|
||||
// Check is called with the user's identity
|
||||
@@ -74,12 +77,18 @@ func TestResourcePermissions_AfterGet(t *testing.T) {
|
||||
require.Equal(t, fold1.Spec.Resource.Resource, req.Resource)
|
||||
require.Equal(t, fold1.Spec.Resource.Name, req.Name)
|
||||
require.Equal(t, utils.VerbGetPermissions, req.Verb)
|
||||
require.Equal(t, parent, folder)
|
||||
|
||||
return types.CheckResponse{Allowed: tt.shouldAllow}, nil
|
||||
}
|
||||
getParentFunc := func(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error) {
|
||||
// For this test, we can return a fixed parent folder ID
|
||||
return parent, nil
|
||||
}
|
||||
|
||||
accessClient := &fakeAccessClient{checkFunc: checkFunc}
|
||||
resPermAuthz := NewResourcePermissionsAuthorizer(accessClient)
|
||||
fakeParentProvider := &fakeParentProvider{hasParent: true, getParentFunc: getParentFunc}
|
||||
resPermAuthz := NewResourcePermissionsAuthorizer(accessClient, fakeParentProvider)
|
||||
ctx := types.WithAuthInfo(context.Background(), user)
|
||||
|
||||
err := resPermAuthz.AfterGet(ctx, fold1)
|
||||
@@ -89,6 +98,7 @@ func TestResourcePermissions_AfterGet(t *testing.T) {
|
||||
require.Error(t, err, "expected error for denied access")
|
||||
}
|
||||
require.True(t, accessClient.checkCalled, "accessClient.Check should be called")
|
||||
require.True(t, fakeParentProvider.getParentCalled, "parentProvider.GetParent should be called")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -121,23 +131,32 @@ func TestResourcePermissions_FilterList(t *testing.T) {
|
||||
require.Equal(t, "dashboards", req.Resource)
|
||||
}
|
||||
|
||||
// Return a checker that allows only specific resources: fold-1 and dash-2
|
||||
// Return a checker that allows access to fold-1 and its content
|
||||
return func(name, folder string) bool {
|
||||
if name == "fold-1" || name == "dash-2" {
|
||||
if name == "fold-1" || folder == "fold-1" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}, &types.NoopZookie{}, nil
|
||||
}
|
||||
|
||||
getParentFunc := func(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error) {
|
||||
if name == "dash-2" {
|
||||
return "fold-1", nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
accessClient := &fakeAccessClient{compileFunc: compileFunc}
|
||||
resPermAuthz := NewResourcePermissionsAuthorizer(accessClient)
|
||||
fakeParentProvider := &fakeParentProvider{hasParent: true, getParentFunc: getParentFunc}
|
||||
resPermAuthz := NewResourcePermissionsAuthorizer(accessClient, fakeParentProvider)
|
||||
ctx := types.WithAuthInfo(context.Background(), user)
|
||||
|
||||
obj, err := resPermAuthz.FilterList(ctx, list)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, list)
|
||||
require.True(t, accessClient.compileCalled, "accessClient.Compile should be called")
|
||||
require.True(t, fakeParentProvider.getParentCalled, "parentProvider.GetParent should be called")
|
||||
|
||||
filtered, ok := obj.(*iamv0.ResourcePermissionList)
|
||||
require.True(t, ok, "response should be of type ResourcePermissionList")
|
||||
@@ -165,6 +184,7 @@ func TestResourcePermissions_beforeWrite(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
parent := "fold-1"
|
||||
checkFunc := func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error) {
|
||||
require.NotNil(t, id)
|
||||
// Check is called with the user's identity
|
||||
@@ -176,12 +196,18 @@ func TestResourcePermissions_beforeWrite(t *testing.T) {
|
||||
require.Equal(t, fold1.Spec.Resource.Resource, req.Resource)
|
||||
require.Equal(t, fold1.Spec.Resource.Name, req.Name)
|
||||
require.Equal(t, utils.VerbSetPermissions, req.Verb)
|
||||
require.Equal(t, parent, folder)
|
||||
|
||||
return types.CheckResponse{Allowed: tt.shouldAllow}, nil
|
||||
}
|
||||
|
||||
getParentFunc := func(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error) {
|
||||
return parent, nil
|
||||
}
|
||||
|
||||
accessClient := &fakeAccessClient{checkFunc: checkFunc}
|
||||
resPermAuthz := NewResourcePermissionsAuthorizer(accessClient)
|
||||
fakeParentProvider := &fakeParentProvider{hasParent: true, getParentFunc: getParentFunc}
|
||||
resPermAuthz := NewResourcePermissionsAuthorizer(accessClient, fakeParentProvider)
|
||||
ctx := types.WithAuthInfo(context.Background(), user)
|
||||
|
||||
err := resPermAuthz.beforeWrite(ctx, fold1)
|
||||
@@ -191,6 +217,7 @@ func TestResourcePermissions_beforeWrite(t *testing.T) {
|
||||
require.Error(t, err, "expected error for denied delete")
|
||||
}
|
||||
require.True(t, accessClient.checkCalled, "accessClient.Check should be called")
|
||||
require.True(t, fakeParentProvider.getParentCalled, "parentProvider.GetParent should be called")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -214,3 +241,18 @@ func (m *fakeAccessClient) Compile(ctx context.Context, id types.AuthInfo, req t
|
||||
}
|
||||
|
||||
var _ types.AccessClient = (*fakeAccessClient)(nil)
|
||||
|
||||
type fakeParentProvider struct {
|
||||
hasParent bool
|
||||
getParentCalled bool
|
||||
getParentFunc func(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error)
|
||||
}
|
||||
|
||||
func (f *fakeParentProvider) HasParent(gr schema.GroupResource) bool {
|
||||
return f.hasParent
|
||||
}
|
||||
|
||||
func (f *fakeParentProvider) GetParent(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error) {
|
||||
f.getParentCalled = true
|
||||
return f.getParentFunc(ctx, gr, namespace, name)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/grafana/authlib/types"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
iamauthorizer "github.com/grafana/grafana/pkg/registry/apis/iam/authorizer"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/iam/externalgroupmapping"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/iam/legacy"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/iam/serviceaccount"
|
||||
@@ -60,6 +61,10 @@ type IdentityAccessManagementAPIBuilder struct {
|
||||
roleBindingsStorage RoleBindingStorageBackend
|
||||
externalGroupMappingStorage ExternalGroupMappingStorageBackend
|
||||
|
||||
// Required for resource permissions authorization
|
||||
// fetches resources parent folders
|
||||
resourceParentProvider iamauthorizer.ParentProvider
|
||||
|
||||
// Access Control
|
||||
authorizer authorizer.Authorizer
|
||||
// legacyAccessClient is used for the identity apis, we need to migrate to the access client
|
||||
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/registry/apis/iam/teambinding"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/iam/user"
|
||||
"github.com/grafana/grafana/pkg/services/accesscontrol"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver"
|
||||
gfauthorizer "github.com/grafana/grafana/pkg/services/apiserver/auth/authorizer"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/auth/authorizer/storewrapper"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/builder"
|
||||
@@ -80,6 +81,7 @@ func RegisterAPIService(
|
||||
orgService org.Service,
|
||||
userService legacyuser.Service,
|
||||
teamService teamservice.Service,
|
||||
restConfig apiserver.RestConfigProvider,
|
||||
) (*IdentityAccessManagementAPIBuilder, error) {
|
||||
dbProvider := legacysql.NewDatabaseProvider(sql)
|
||||
store := legacy.NewLegacySQLStores(dbProvider)
|
||||
@@ -90,6 +92,11 @@ func RegisterAPIService(
|
||||
//nolint:staticcheck // not yet migrated to OpenFeature
|
||||
enableAuthnMutation := features.IsEnabledGlobally(featuremgmt.FlagKubernetesAuthnMutation)
|
||||
|
||||
resourceParentProvider := iamauthorizer.NewApiParentProvider(
|
||||
iamauthorizer.NewLocalConfigProvider(restConfig.GetRestConfig),
|
||||
iamauthorizer.Versions,
|
||||
)
|
||||
|
||||
builder := &IdentityAccessManagementAPIBuilder{
|
||||
store: store,
|
||||
userLegacyStore: user.NewLegacyStore(store, accessClient, enableAuthnMutation, tracing),
|
||||
@@ -104,6 +111,7 @@ func RegisterAPIService(
|
||||
externalGroupMappingStorage: externalGroupMappingStorageBackend,
|
||||
teamGroupsHandler: teamGroupsHandlerImpl,
|
||||
sso: ssoService,
|
||||
resourceParentProvider: resourceParentProvider,
|
||||
authorizer: authorizer,
|
||||
legacyAccessClient: legacyAccessClient,
|
||||
accessClient: accessClient,
|
||||
@@ -142,6 +150,12 @@ func NewAPIService(
|
||||
resourceAuthorizer := gfauthorizer.NewResourceAuthorizer(accessClient)
|
||||
coreRoleAuthorizer := iamauthorizer.NewCoreRoleAuthorizer(accessClient)
|
||||
|
||||
// TODO: in a follow up PR, make this configurable
|
||||
resourceParentProvider := iamauthorizer.NewApiParentProvider(
|
||||
iamauthorizer.NewRemoteConfigProvider(map[schema.GroupResource]iamauthorizer.DialConfig{}, nil),
|
||||
iamauthorizer.Versions,
|
||||
)
|
||||
|
||||
return &IdentityAccessManagementAPIBuilder{
|
||||
store: store,
|
||||
display: user.NewLegacyDisplayREST(store),
|
||||
@@ -152,6 +166,7 @@ func NewAPIService(
|
||||
logger: log.New("iam.apis"),
|
||||
features: features,
|
||||
accessClient: accessClient,
|
||||
resourceParentProvider: resourceParentProvider,
|
||||
zClient: zClient,
|
||||
zTickets: make(chan bool, MaxConcurrentZanzanaWrites),
|
||||
reg: reg,
|
||||
@@ -444,7 +459,7 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateResourcePermissionsAPIGroup(
|
||||
return fmt.Errorf("expected RegistryStoreDualWrite, got %T", dw)
|
||||
}
|
||||
|
||||
authzWrapper := storewrapper.New(regStoreDW, iamauthorizer.NewResourcePermissionsAuthorizer(b.accessClient))
|
||||
authzWrapper := storewrapper.New(regStoreDW, iamauthorizer.NewResourcePermissionsAuthorizer(b.accessClient, b.resourceParentProvider))
|
||||
|
||||
storage[iamv0.ResourcePermissionInfo.StoragePath()] = authzWrapper
|
||||
return nil
|
||||
|
||||
@@ -26,6 +26,18 @@ type StatusPatcher interface {
|
||||
Patch(ctx context.Context, repo *provisioning.Repository, patchOperations ...map[string]interface{}) error
|
||||
}
|
||||
|
||||
// HealthCheckerInterface defines the interface for health checking operations
|
||||
//
|
||||
//go:generate mockery --name=HealthCheckerInterface --structname=MockHealthChecker
|
||||
type HealthCheckerInterface interface {
|
||||
ShouldCheckHealth(repo *provisioning.Repository) bool
|
||||
RefreshHealth(ctx context.Context, repo repository.Repository) (*provisioning.TestResults, provisioning.HealthStatus, error)
|
||||
RefreshHealthWithPatchOps(ctx context.Context, repo repository.Repository) (*provisioning.TestResults, provisioning.HealthStatus, []map[string]interface{}, error)
|
||||
RefreshTimestamp(ctx context.Context, repo *provisioning.Repository) error
|
||||
RecordFailure(ctx context.Context, failureType provisioning.HealthFailureType, err error, repo *provisioning.Repository) error
|
||||
HasRecentFailure(healthStatus provisioning.HealthStatus, failureType provisioning.HealthFailureType) bool
|
||||
}
|
||||
|
||||
// HealthChecker provides unified health checking for repositories
|
||||
type HealthChecker struct {
|
||||
statusPatcher StatusPatcher
|
||||
@@ -162,6 +174,33 @@ func (hc *HealthChecker) RefreshHealth(ctx context.Context, repo repository.Repo
|
||||
return testResults, newHealthStatus, nil
|
||||
}
|
||||
|
||||
// RefreshHealthWithPatchOps performs a health check on an existing repository
|
||||
// and returns the test results, health status, and patch operations to apply.
|
||||
// This method does NOT apply the patch itself, allowing the caller to batch
|
||||
// multiple status updates together to avoid race conditions.
|
||||
func (hc *HealthChecker) RefreshHealthWithPatchOps(ctx context.Context, repo repository.Repository) (*provisioning.TestResults, provisioning.HealthStatus, []map[string]interface{}, error) {
|
||||
cfg := repo.Config()
|
||||
|
||||
// Use health checker to perform comprehensive health check with existing status
|
||||
testResults, newHealthStatus, err := hc.refreshHealth(ctx, repo, cfg.Status.Health)
|
||||
if err != nil {
|
||||
return nil, provisioning.HealthStatus{}, nil, fmt.Errorf("health check failed: %w", err)
|
||||
}
|
||||
|
||||
var patchOps []map[string]interface{}
|
||||
|
||||
// Only return patch operation if health status actually changed
|
||||
if hc.hasHealthStatusChanged(cfg.Status.Health, newHealthStatus) {
|
||||
patchOps = append(patchOps, map[string]interface{}{
|
||||
"op": "replace",
|
||||
"path": "/status/health",
|
||||
"value": newHealthStatus,
|
||||
})
|
||||
}
|
||||
|
||||
return testResults, newHealthStatus, patchOps, nil
|
||||
}
|
||||
|
||||
// RefreshTimestamp updates the health status timestamp without changing other fields
|
||||
func (hc *HealthChecker) RefreshTimestamp(ctx context.Context, repo *provisioning.Repository) error {
|
||||
// Update the timestamp on the existing health status
|
||||
|
||||
@@ -532,6 +532,136 @@ func TestRefreshHealth(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRefreshHealthWithPatchOps(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
testResult *provisioning.TestResults
|
||||
testError error
|
||||
existingStatus provisioning.HealthStatus
|
||||
expectError bool
|
||||
expectedHealth bool
|
||||
expectPatchOps bool
|
||||
expectedPatchPath string
|
||||
}{
|
||||
{
|
||||
name: "successful health check with status change",
|
||||
testResult: &provisioning.TestResults{
|
||||
Success: true,
|
||||
Code: 200,
|
||||
},
|
||||
testError: nil,
|
||||
existingStatus: provisioning.HealthStatus{
|
||||
Healthy: false,
|
||||
Error: provisioning.HealthFailureHealth,
|
||||
Checked: time.Now().Add(-time.Hour).UnixMilli(),
|
||||
},
|
||||
expectError: false,
|
||||
expectedHealth: true,
|
||||
expectPatchOps: true,
|
||||
expectedPatchPath: "/status/health",
|
||||
},
|
||||
{
|
||||
name: "failed health check with status change",
|
||||
testResult: &provisioning.TestResults{
|
||||
Success: false,
|
||||
Code: 500,
|
||||
Errors: []provisioning.ErrorDetails{
|
||||
{Detail: "connection failed"},
|
||||
},
|
||||
},
|
||||
testError: nil,
|
||||
existingStatus: provisioning.HealthStatus{
|
||||
Healthy: true,
|
||||
Checked: time.Now().Add(-time.Hour).UnixMilli(),
|
||||
},
|
||||
expectError: false,
|
||||
expectedHealth: false,
|
||||
expectPatchOps: true,
|
||||
expectedPatchPath: "/status/health",
|
||||
},
|
||||
{
|
||||
name: "no status change - no patch ops returned",
|
||||
testResult: &provisioning.TestResults{
|
||||
Success: true,
|
||||
Code: 200,
|
||||
},
|
||||
testError: nil,
|
||||
existingStatus: provisioning.HealthStatus{
|
||||
Healthy: true,
|
||||
Checked: time.Now().Add(-15 * time.Second).UnixMilli(),
|
||||
},
|
||||
expectError: false,
|
||||
expectedHealth: true,
|
||||
expectPatchOps: false,
|
||||
},
|
||||
{
|
||||
name: "test repository error",
|
||||
testResult: nil,
|
||||
testError: errors.New("repository test failed"),
|
||||
existingStatus: provisioning.HealthStatus{
|
||||
Healthy: true,
|
||||
Checked: time.Now().Add(-time.Hour).UnixMilli(),
|
||||
},
|
||||
expectError: true,
|
||||
expectedHealth: false,
|
||||
expectPatchOps: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create mock repository
|
||||
mockRepo := &mockRepository{
|
||||
config: &provisioning.Repository{
|
||||
Spec: provisioning.RepositorySpec{
|
||||
Title: "Test Repository",
|
||||
Type: provisioning.LocalRepositoryType,
|
||||
},
|
||||
Status: provisioning.RepositoryStatus{
|
||||
Health: tt.existingStatus,
|
||||
},
|
||||
},
|
||||
testResult: tt.testResult,
|
||||
testError: tt.testError,
|
||||
}
|
||||
|
||||
// Create health checker with validator and tester
|
||||
validator := repository.NewValidator(30*time.Second, []provisioning.SyncTargetType{provisioning.SyncTargetTypeFolder, provisioning.SyncTargetTypeInstance}, true)
|
||||
hc := NewHealthChecker(nil, prometheus.NewPedanticRegistry(), repository.NewSimpleRepositoryTester(validator))
|
||||
|
||||
// Call RefreshHealthWithPatchOps
|
||||
testResults, healthStatus, patchOps, err := hc.RefreshHealthWithPatchOps(context.Background(), mockRepo)
|
||||
|
||||
// Verify error
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, testResults)
|
||||
return
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify health status
|
||||
assert.Equal(t, tt.expectedHealth, healthStatus.Healthy)
|
||||
|
||||
// Verify patch operations
|
||||
if tt.expectPatchOps {
|
||||
assert.NotEmpty(t, patchOps, "expected patch operations to be returned")
|
||||
assert.Len(t, patchOps, 1)
|
||||
assert.Equal(t, "replace", patchOps[0]["op"])
|
||||
assert.Equal(t, tt.expectedPatchPath, patchOps[0]["path"])
|
||||
assert.Equal(t, healthStatus, patchOps[0]["value"])
|
||||
} else {
|
||||
assert.Empty(t, patchOps, "expected no patch operations to be returned")
|
||||
}
|
||||
|
||||
// Verify test results
|
||||
if tt.testResult != nil {
|
||||
assert.Equal(t, tt.testResult, testResults)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasHealthStatusChanged(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -0,0 +1,187 @@
|
||||
// Code generated by mockery v2.53.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
repository "github.com/grafana/grafana/apps/provisioning/pkg/repository"
|
||||
|
||||
v0alpha1 "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
|
||||
)
|
||||
|
||||
// MockHealthChecker is an autogenerated mock type for the HealthCheckerInterface type
|
||||
type MockHealthChecker struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// HasRecentFailure provides a mock function with given fields: healthStatus, failureType
|
||||
func (_m *MockHealthChecker) HasRecentFailure(healthStatus v0alpha1.HealthStatus, failureType v0alpha1.HealthFailureType) bool {
|
||||
ret := _m.Called(healthStatus, failureType)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for HasRecentFailure")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(v0alpha1.HealthStatus, v0alpha1.HealthFailureType) bool); ok {
|
||||
r0 = rf(healthStatus, failureType)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// RecordFailure provides a mock function with given fields: ctx, failureType, err, repo
|
||||
func (_m *MockHealthChecker) RecordFailure(ctx context.Context, failureType v0alpha1.HealthFailureType, err error, repo *v0alpha1.Repository) error {
|
||||
ret := _m.Called(ctx, failureType, err, repo)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for RecordFailure")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, v0alpha1.HealthFailureType, error, *v0alpha1.Repository) error); ok {
|
||||
r0 = rf(ctx, failureType, err, repo)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// RefreshHealth provides a mock function with given fields: ctx, repo
|
||||
func (_m *MockHealthChecker) RefreshHealth(ctx context.Context, repo repository.Repository) (*v0alpha1.TestResults, v0alpha1.HealthStatus, error) {
|
||||
ret := _m.Called(ctx, repo)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for RefreshHealth")
|
||||
}
|
||||
|
||||
var r0 *v0alpha1.TestResults
|
||||
var r1 v0alpha1.HealthStatus
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, repository.Repository) (*v0alpha1.TestResults, v0alpha1.HealthStatus, error)); ok {
|
||||
return rf(ctx, repo)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, repository.Repository) *v0alpha1.TestResults); ok {
|
||||
r0 = rf(ctx, repo)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v0alpha1.TestResults)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, repository.Repository) v0alpha1.HealthStatus); ok {
|
||||
r1 = rf(ctx, repo)
|
||||
} else {
|
||||
r1 = ret.Get(1).(v0alpha1.HealthStatus)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(2).(func(context.Context, repository.Repository) error); ok {
|
||||
r2 = rf(ctx, repo)
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
}
|
||||
|
||||
return r0, r1, r2
|
||||
}
|
||||
|
||||
// RefreshHealthWithPatchOps provides a mock function with given fields: ctx, repo
|
||||
func (_m *MockHealthChecker) RefreshHealthWithPatchOps(ctx context.Context, repo repository.Repository) (*v0alpha1.TestResults, v0alpha1.HealthStatus, []map[string]interface{}, error) {
|
||||
ret := _m.Called(ctx, repo)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for RefreshHealthWithPatchOps")
|
||||
}
|
||||
|
||||
var r0 *v0alpha1.TestResults
|
||||
var r1 v0alpha1.HealthStatus
|
||||
var r2 []map[string]interface{}
|
||||
var r3 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, repository.Repository) (*v0alpha1.TestResults, v0alpha1.HealthStatus, []map[string]interface{}, error)); ok {
|
||||
return rf(ctx, repo)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, repository.Repository) *v0alpha1.TestResults); ok {
|
||||
r0 = rf(ctx, repo)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v0alpha1.TestResults)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, repository.Repository) v0alpha1.HealthStatus); ok {
|
||||
r1 = rf(ctx, repo)
|
||||
} else {
|
||||
r1 = ret.Get(1).(v0alpha1.HealthStatus)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(2).(func(context.Context, repository.Repository) []map[string]interface{}); ok {
|
||||
r2 = rf(ctx, repo)
|
||||
} else {
|
||||
if ret.Get(2) != nil {
|
||||
r2 = ret.Get(2).([]map[string]interface{})
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(3).(func(context.Context, repository.Repository) error); ok {
|
||||
r3 = rf(ctx, repo)
|
||||
} else {
|
||||
r3 = ret.Error(3)
|
||||
}
|
||||
|
||||
return r0, r1, r2, r3
|
||||
}
|
||||
|
||||
// RefreshTimestamp provides a mock function with given fields: ctx, repo
|
||||
func (_m *MockHealthChecker) RefreshTimestamp(ctx context.Context, repo *v0alpha1.Repository) error {
|
||||
ret := _m.Called(ctx, repo)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for RefreshTimestamp")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *v0alpha1.Repository) error); ok {
|
||||
r0 = rf(ctx, repo)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ShouldCheckHealth provides a mock function with given fields: repo
|
||||
func (_m *MockHealthChecker) ShouldCheckHealth(repo *v0alpha1.Repository) bool {
|
||||
ret := _m.Called(repo)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ShouldCheckHealth")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(*v0alpha1.Repository) bool); ok {
|
||||
r0 = rf(repo)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewMockHealthChecker creates a new instance of MockHealthChecker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockHealthChecker(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockHealthChecker {
|
||||
mock := &MockHealthChecker{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.52.4. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.4. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -561,11 +561,16 @@ func (rc *RepositoryController) process(item *queueItem) error {
|
||||
}
|
||||
|
||||
// Handle health checks using the health checker
|
||||
_, healthStatus, err := rc.healthChecker.RefreshHealth(ctx, repo)
|
||||
_, healthStatus, healthPatchOps, err := rc.healthChecker.RefreshHealthWithPatchOps(ctx, repo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update health status: %w", err)
|
||||
}
|
||||
|
||||
// Add health patch operations first
|
||||
if len(healthPatchOps) > 0 {
|
||||
patchOperations = append(patchOperations, healthPatchOps...)
|
||||
}
|
||||
|
||||
// determine the sync strategy and sync status to apply
|
||||
syncOptions := rc.determineSyncStrategy(ctx, obj, repo, shouldResync, healthStatus)
|
||||
patchOperations = append(patchOperations, rc.determineSyncStatusOps(obj, syncOptions, healthStatus)...)
|
||||
|
||||
@@ -350,6 +350,161 @@ type mockJobsQueueStore struct {
|
||||
*jobs.MockStore
|
||||
}
|
||||
|
||||
func TestRepositoryController_process_UnhealthyRepositoryStatusUpdate(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
repo *provisioning.Repository
|
||||
healthStatus provisioning.HealthStatus
|
||||
hasHealthStatusChanged bool
|
||||
expectedUnhealthyMessage bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "unhealthy repository should set unhealthy message in sync status",
|
||||
repo: &provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-repo",
|
||||
Namespace: "default",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: provisioning.RepositorySpec{
|
||||
Sync: provisioning.SyncOptions{
|
||||
Enabled: true,
|
||||
IntervalSeconds: 300,
|
||||
},
|
||||
},
|
||||
Status: provisioning.RepositoryStatus{
|
||||
ObservedGeneration: 1,
|
||||
Health: provisioning.HealthStatus{
|
||||
Healthy: true,
|
||||
Checked: time.Now().Add(-10 * time.Minute).UnixMilli(),
|
||||
},
|
||||
Sync: provisioning.SyncStatus{
|
||||
State: provisioning.JobStateSuccess,
|
||||
Finished: time.Now().Add(-1 * time.Minute).UnixMilli(),
|
||||
Message: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
healthStatus: provisioning.HealthStatus{
|
||||
Healthy: false,
|
||||
Error: provisioning.HealthFailureHealth,
|
||||
Checked: time.Now().UnixMilli(),
|
||||
Message: []string{"connection failed"},
|
||||
},
|
||||
hasHealthStatusChanged: true,
|
||||
expectedUnhealthyMessage: true,
|
||||
description: "should set unhealthy message when repository becomes unhealthy",
|
||||
},
|
||||
{
|
||||
name: "unhealthy repository should not duplicate unhealthy message",
|
||||
repo: &provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-repo",
|
||||
Namespace: "default",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: provisioning.RepositorySpec{
|
||||
Sync: provisioning.SyncOptions{
|
||||
Enabled: true,
|
||||
IntervalSeconds: 300,
|
||||
},
|
||||
},
|
||||
Status: provisioning.RepositoryStatus{
|
||||
ObservedGeneration: 1,
|
||||
Health: provisioning.HealthStatus{
|
||||
Healthy: false,
|
||||
Checked: time.Now().Add(-2 * time.Minute).UnixMilli(),
|
||||
},
|
||||
Sync: provisioning.SyncStatus{
|
||||
State: provisioning.JobStateError,
|
||||
Finished: time.Now().Add(-1 * time.Minute).UnixMilli(),
|
||||
Message: []string{"Repository is unhealthy"},
|
||||
},
|
||||
},
|
||||
},
|
||||
healthStatus: provisioning.HealthStatus{
|
||||
Healthy: false,
|
||||
Error: provisioning.HealthFailureHealth,
|
||||
Checked: time.Now().UnixMilli(),
|
||||
Message: []string{"connection failed"},
|
||||
},
|
||||
hasHealthStatusChanged: false,
|
||||
expectedUnhealthyMessage: false,
|
||||
description: "should not set unhealthy message when it already exists",
|
||||
},
|
||||
{
|
||||
name: "healthy repository should clear unhealthy message",
|
||||
repo: &provisioning.Repository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-repo",
|
||||
Namespace: "default",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: provisioning.RepositorySpec{
|
||||
Sync: provisioning.SyncOptions{
|
||||
Enabled: true,
|
||||
IntervalSeconds: 300,
|
||||
},
|
||||
},
|
||||
Status: provisioning.RepositoryStatus{
|
||||
ObservedGeneration: 1,
|
||||
Health: provisioning.HealthStatus{
|
||||
Healthy: false,
|
||||
Checked: time.Now().Add(-2 * time.Minute).UnixMilli(),
|
||||
},
|
||||
Sync: provisioning.SyncStatus{
|
||||
State: provisioning.JobStateError,
|
||||
Finished: time.Now().Add(-1 * time.Minute).UnixMilli(),
|
||||
Message: []string{"Repository is unhealthy"},
|
||||
},
|
||||
},
|
||||
},
|
||||
healthStatus: provisioning.HealthStatus{
|
||||
Healthy: true,
|
||||
Checked: time.Now().UnixMilli(),
|
||||
Message: []string{},
|
||||
},
|
||||
hasHealthStatusChanged: true,
|
||||
expectedUnhealthyMessage: false,
|
||||
description: "should clear unhealthy message when repository becomes healthy",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create controller
|
||||
rc := &RepositoryController{}
|
||||
|
||||
// Determine sync status ops (this is a pure function, no mocks needed)
|
||||
syncOps := rc.determineSyncStatusOps(tc.repo, nil, tc.healthStatus)
|
||||
|
||||
// Verify expectations
|
||||
hasUnhealthyOp := false
|
||||
hasClearUnhealthyOp := false
|
||||
for _, op := range syncOps {
|
||||
if path, ok := op["path"].(string); ok {
|
||||
if path == "/status/sync/message" {
|
||||
if messages, ok := op["value"].([]string); ok {
|
||||
if len(messages) > 0 && messages[0] == "Repository is unhealthy" {
|
||||
hasUnhealthyOp = true
|
||||
} else if len(messages) == 0 {
|
||||
hasClearUnhealthyOp = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tc.expectedUnhealthyMessage {
|
||||
assert.True(t, hasUnhealthyOp, tc.description+": expected unhealthy message operation")
|
||||
} else if len(tc.repo.Status.Sync.Message) > 0 && tc.healthStatus.Healthy {
|
||||
assert.True(t, hasClearUnhealthyOp, tc.description+": expected clear unhealthy message operation")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepositoryController_shouldResync_StaleSyncStatus(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
||||
@@ -35,12 +35,13 @@ func maybeNotifyProgress(threshold time.Duration, fn ProgressFn) ProgressFn {
|
||||
|
||||
// FIXME: ProgressRecorder should be initialized in the queue
|
||||
type JobResourceResult struct {
|
||||
Name string
|
||||
Group string
|
||||
Kind string
|
||||
Path string
|
||||
Action repository.FileAction
|
||||
Error error
|
||||
Name string
|
||||
Group string
|
||||
Kind string
|
||||
Path string
|
||||
Action repository.FileAction
|
||||
Error error
|
||||
Warning error
|
||||
}
|
||||
|
||||
type jobProgressRecorder struct {
|
||||
@@ -193,6 +194,10 @@ func (r *jobProgressRecorder) updateSummary(result JobResourceResult) {
|
||||
errorMsg := fmt.Sprintf("%s (file: %s, name: %s, action: %s)", result.Error.Error(), result.Path, result.Name, result.Action)
|
||||
summary.Errors = append(summary.Errors, errorMsg)
|
||||
summary.Error++
|
||||
} else if result.Warning != nil {
|
||||
warningMsg := fmt.Sprintf("%s (file: %s, name: %s, action: %s)", result.Warning.Error(), result.Path, result.Name, result.Action)
|
||||
summary.Warnings = append(summary.Warnings, warningMsg)
|
||||
summary.Warning++
|
||||
} else {
|
||||
switch result.Action {
|
||||
case repository.FileActionDeleted:
|
||||
@@ -266,8 +271,17 @@ func (r *jobProgressRecorder) Complete(ctx context.Context, err error) provision
|
||||
jobStatus.Message = err.Error()
|
||||
}
|
||||
|
||||
jobStatus.Summary = r.summary()
|
||||
summaries := r.summary()
|
||||
jobStatus.Summary = summaries
|
||||
jobStatus.Errors = r.errors
|
||||
|
||||
// Extract warnings from summaries
|
||||
warnings := make([]string, 0)
|
||||
for _, summary := range summaries {
|
||||
warnings = append(warnings, summary.Warnings...)
|
||||
}
|
||||
jobStatus.Warnings = warnings
|
||||
|
||||
jobStatus.URLs = r.refURLs
|
||||
|
||||
tooManyErrors := r.maxErrors > 0 && r.errorCount >= r.maxErrors
|
||||
@@ -283,6 +297,9 @@ func (r *jobProgressRecorder) Complete(ctx context.Context, err error) provision
|
||||
jobStatus.Message = "completed with errors"
|
||||
jobStatus.State = provisioning.JobStateWarning
|
||||
}
|
||||
} else if len(jobStatus.Warnings) > 0 {
|
||||
jobStatus.State = provisioning.JobStateWarning
|
||||
jobStatus.Message = "completed with warnings"
|
||||
}
|
||||
|
||||
// Override message if progress have a more explicit message
|
||||
|
||||
@@ -2,9 +2,11 @@ package jobs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
|
||||
"github.com/grafana/grafana/apps/provisioning/pkg/repository"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -83,3 +85,167 @@ func TestJobProgressRecorderCompleteIncludesRefURLs(t *testing.T) {
|
||||
assert.Equal(t, provisioning.JobStateSuccess, finalStatus.State)
|
||||
assert.Equal(t, "completed successfully", finalStatus.Message)
|
||||
}
|
||||
|
||||
func TestJobProgressRecorderWarningStatus(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a progress recorder
|
||||
mockProgressFn := func(ctx context.Context, status provisioning.JobStatus) error {
|
||||
return nil
|
||||
}
|
||||
recorder := newJobProgressRecorder(mockProgressFn).(*jobProgressRecorder)
|
||||
|
||||
// Record a result with a warning
|
||||
warningErr := errors.New("deprecated API used")
|
||||
result := JobResourceResult{
|
||||
Name: "test-resource",
|
||||
Group: "test.grafana.app",
|
||||
Kind: "Dashboard",
|
||||
Path: "dashboards/test.json",
|
||||
Action: repository.FileActionUpdated,
|
||||
Warning: warningErr,
|
||||
}
|
||||
recorder.Record(ctx, result)
|
||||
|
||||
// Record another result with a different warning
|
||||
warningErr2 := errors.New("missing optional field")
|
||||
result2 := JobResourceResult{
|
||||
Name: "test-resource-2",
|
||||
Group: "test.grafana.app",
|
||||
Kind: "Dashboard",
|
||||
Path: "dashboards/test2.json",
|
||||
Action: repository.FileActionCreated,
|
||||
Warning: warningErr2,
|
||||
}
|
||||
recorder.Record(ctx, result2)
|
||||
|
||||
// Record a result with a warning from a different resource type
|
||||
warningErr3 := errors.New("validation warning")
|
||||
result3 := JobResourceResult{
|
||||
Name: "test-resource-3",
|
||||
Group: "test.grafana.app",
|
||||
Kind: "DataSource",
|
||||
Path: "datasources/test.yaml",
|
||||
Action: repository.FileActionCreated,
|
||||
Warning: warningErr3,
|
||||
}
|
||||
recorder.Record(ctx, result3)
|
||||
|
||||
// Verify warnings are stored in summaries
|
||||
recorder.mu.RLock()
|
||||
require.Len(t, recorder.summaries, 2) // Dashboard and DataSource
|
||||
dashboardSummary := recorder.summaries["test.grafana.app:Dashboard"]
|
||||
require.NotNil(t, dashboardSummary)
|
||||
assert.Equal(t, int64(2), dashboardSummary.Warning)
|
||||
assert.Len(t, dashboardSummary.Warnings, 2)
|
||||
assert.Contains(t, dashboardSummary.Warnings[0], "deprecated API used")
|
||||
assert.Contains(t, dashboardSummary.Warnings[1], "missing optional field")
|
||||
|
||||
datasourceSummary := recorder.summaries["test.grafana.app:DataSource"]
|
||||
require.NotNil(t, datasourceSummary)
|
||||
assert.Equal(t, int64(1), datasourceSummary.Warning)
|
||||
assert.Len(t, datasourceSummary.Warnings, 1)
|
||||
assert.Contains(t, datasourceSummary.Warnings[0], "validation warning")
|
||||
recorder.mu.RUnlock()
|
||||
|
||||
// Complete the job
|
||||
finalStatus := recorder.Complete(ctx, nil)
|
||||
|
||||
// Verify the final status includes warnings
|
||||
require.NotNil(t, finalStatus.Warnings)
|
||||
assert.Len(t, finalStatus.Warnings, 3)
|
||||
assert.Contains(t, finalStatus.Warnings[0], "deprecated API used")
|
||||
assert.Contains(t, finalStatus.Warnings[1], "missing optional field")
|
||||
assert.Contains(t, finalStatus.Warnings[2], "validation warning")
|
||||
|
||||
// Verify the state is set to Warning
|
||||
assert.Equal(t, provisioning.JobStateWarning, finalStatus.State)
|
||||
assert.Equal(t, "completed with warnings", finalStatus.Message)
|
||||
|
||||
// Verify summaries are included
|
||||
require.Len(t, finalStatus.Summary, 2)
|
||||
|
||||
// Verify no errors were recorded
|
||||
assert.Empty(t, finalStatus.Errors)
|
||||
}
|
||||
|
||||
func TestJobProgressRecorderWarningWithErrors(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a progress recorder
|
||||
mockProgressFn := func(ctx context.Context, status provisioning.JobStatus) error {
|
||||
return nil
|
||||
}
|
||||
recorder := newJobProgressRecorder(mockProgressFn).(*jobProgressRecorder)
|
||||
|
||||
// Record a result with an error (errors take precedence)
|
||||
errorErr := errors.New("failed to process")
|
||||
result := JobResourceResult{
|
||||
Name: "test-resource",
|
||||
Group: "test.grafana.app",
|
||||
Kind: "Dashboard",
|
||||
Path: "dashboards/test.json",
|
||||
Action: repository.FileActionUpdated,
|
||||
Error: errorErr,
|
||||
}
|
||||
recorder.Record(ctx, result)
|
||||
|
||||
// Record a result with only a warning
|
||||
warningErr := errors.New("deprecated API used")
|
||||
result2 := JobResourceResult{
|
||||
Name: "test-resource-2",
|
||||
Group: "test.grafana.app",
|
||||
Kind: "Dashboard",
|
||||
Path: "dashboards/test2.json",
|
||||
Action: repository.FileActionCreated,
|
||||
Warning: warningErr,
|
||||
}
|
||||
recorder.Record(ctx, result2)
|
||||
|
||||
// Complete the job
|
||||
finalStatus := recorder.Complete(ctx, nil)
|
||||
|
||||
// When there are errors, the state should be Warning (not Error unless too many)
|
||||
// and warnings should still be included
|
||||
assert.Equal(t, provisioning.JobStateWarning, finalStatus.State)
|
||||
assert.Equal(t, "completed with errors", finalStatus.Message)
|
||||
assert.Len(t, finalStatus.Errors, 1)
|
||||
assert.Contains(t, finalStatus.Errors[0], "failed to process")
|
||||
|
||||
// Warnings should still be extracted from summaries
|
||||
require.NotNil(t, finalStatus.Warnings)
|
||||
assert.Len(t, finalStatus.Warnings, 1)
|
||||
assert.Contains(t, finalStatus.Warnings[0], "deprecated API used")
|
||||
}
|
||||
|
||||
func TestJobProgressRecorderWarningOnlyNoErrors(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a progress recorder
|
||||
mockProgressFn := func(ctx context.Context, status provisioning.JobStatus) error {
|
||||
return nil
|
||||
}
|
||||
recorder := newJobProgressRecorder(mockProgressFn).(*jobProgressRecorder)
|
||||
|
||||
// Record only warnings, no errors
|
||||
warningErr := errors.New("deprecated API used")
|
||||
result := JobResourceResult{
|
||||
Name: "test-resource",
|
||||
Group: "test.grafana.app",
|
||||
Kind: "Dashboard",
|
||||
Path: "dashboards/test.json",
|
||||
Action: repository.FileActionUpdated,
|
||||
Warning: warningErr,
|
||||
}
|
||||
recorder.Record(ctx, result)
|
||||
|
||||
// Complete the job
|
||||
finalStatus := recorder.Complete(ctx, nil)
|
||||
|
||||
// Verify the state is Warning (not Error) when only warnings exist
|
||||
assert.Equal(t, provisioning.JobStateWarning, finalStatus.State)
|
||||
assert.Equal(t, "completed with warnings", finalStatus.Message)
|
||||
assert.Empty(t, finalStatus.Errors)
|
||||
require.NotNil(t, finalStatus.Warnings)
|
||||
assert.Len(t, finalStatus.Warnings, 1)
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ func RegisterAPIService(features featuremgmt.FeatureToggles, apiregistration bui
|
||||
}
|
||||
|
||||
func (b *ServiceAPIBuilder) GetAuthorizer() authorizer.Authorizer {
|
||||
//nolint:staticcheck // not yet migrated to Resource Authorizer
|
||||
return roleauthorizer.NewRoleAuthorizer()
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package apiregistry
|
||||
import (
|
||||
"github.com/google/wire"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apiserver/auditing"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/collections"
|
||||
dashboardinternal "github.com/grafana/grafana/pkg/registry/apis/dashboard"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/datasource"
|
||||
@@ -33,6 +34,10 @@ var WireSetExts = wire.NewSet(
|
||||
|
||||
externalgroupmapping.ProvideNoopTeamGroupsREST,
|
||||
wire.Bind(new(externalgroupmapping.TeamGroupsHandler), new(*externalgroupmapping.NoopTeamGroupsREST)),
|
||||
|
||||
// Auditing Options
|
||||
auditing.ProvideNoopBackend,
|
||||
auditing.ProvideNoopPolicyRuleEvaluator,
|
||||
)
|
||||
|
||||
var provisioningExtras = wire.NewSet(
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
|
||||
"github.com/grafana/grafana-app-sdk/app"
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/apiserver/rest"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/appinstaller"
|
||||
roleauthorizer "github.com/grafana/grafana/pkg/services/apiserver/auth/authorizer"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
|
||||
"github.com/grafana/grafana/pkg/services/correlations"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
@@ -60,6 +62,11 @@ func RegisterAppInstaller(
|
||||
return installer, nil
|
||||
}
|
||||
|
||||
func (a *AppInstaller) GetAuthorizer() authorizer.Authorizer {
|
||||
//nolint:staticcheck // not yet migrated to Resource Authorizer
|
||||
return roleauthorizer.NewRoleAuthorizer()
|
||||
}
|
||||
|
||||
func (a *AppInstaller) GetLegacyStorage(requested schema.GroupVersionResource) rest.Storage {
|
||||
kind := correlationsV0.CorrelationKind()
|
||||
gvr := schema.GroupVersionResource{
|
||||
|
||||
@@ -6,17 +6,20 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
|
||||
"github.com/grafana/grafana-app-sdk/app"
|
||||
appsdkapiserver "github.com/grafana/grafana-app-sdk/k8s/apiserver"
|
||||
"github.com/grafana/grafana-app-sdk/simple"
|
||||
|
||||
"github.com/grafana/grafana/apps/playlist/pkg/apis"
|
||||
playlistv0alpha1 "github.com/grafana/grafana/apps/playlist/pkg/apis/playlist/v0alpha1"
|
||||
playlistapp "github.com/grafana/grafana/apps/playlist/pkg/app"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
grafanarest "github.com/grafana/grafana/pkg/apiserver/rest"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/appinstaller"
|
||||
roleauthorizer "github.com/grafana/grafana/pkg/services/apiserver/auth/authorizer"
|
||||
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
playlistsvc "github.com/grafana/grafana/pkg/services/playlist"
|
||||
@@ -63,6 +66,11 @@ func RegisterAppInstaller(
|
||||
return installer, nil
|
||||
}
|
||||
|
||||
func (p *PlaylistAppInstaller) GetAuthorizer() authorizer.Authorizer {
|
||||
//nolint:staticcheck // not yet migrated to Resource Authorizer
|
||||
return roleauthorizer.NewRoleAuthorizer()
|
||||
}
|
||||
|
||||
// GetLegacyStorage returns the legacy storage for the playlist app.
|
||||
func (p *PlaylistAppInstaller) GetLegacyStorage(requested schema.GroupVersionResource) grafanarest.Storage {
|
||||
gvr := playlistv0alpha1.PlaylistKind().GroupVersionResource()
|
||||
|
||||
@@ -3,12 +3,14 @@ package quotas
|
||||
import (
|
||||
"github.com/grafana/grafana/apps/quotas/pkg/apis"
|
||||
"github.com/grafana/grafana/pkg/storage/unified/resource"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
|
||||
"github.com/grafana/grafana-app-sdk/app"
|
||||
appsdkapiserver "github.com/grafana/grafana-app-sdk/k8s/apiserver"
|
||||
"github.com/grafana/grafana-app-sdk/simple"
|
||||
quotasapp "github.com/grafana/grafana/apps/quotas/pkg/app"
|
||||
roleauthorizer "github.com/grafana/grafana/pkg/services/apiserver/auth/authorizer"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/setting"
|
||||
)
|
||||
@@ -22,6 +24,11 @@ type QuotasAppInstaller struct {
|
||||
cfg *setting.Cfg
|
||||
}
|
||||
|
||||
func (a *QuotasAppInstaller) GetAuthorizer() authorizer.Authorizer {
|
||||
//nolint:staticcheck // not yet migrated to Resource Authorizer
|
||||
return roleauthorizer.NewRoleAuthorizer()
|
||||
}
|
||||
|
||||
func RegisterAppInstaller(
|
||||
cfg *setting.Cfg,
|
||||
features featuremgmt.FeatureToggles,
|
||||
|
||||
Generated
+9
-4
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/api"
|
||||
"github.com/grafana/grafana/pkg/api/avatar"
|
||||
"github.com/grafana/grafana/pkg/api/routing"
|
||||
"github.com/grafana/grafana/pkg/apiserver/auditing"
|
||||
"github.com/grafana/grafana/pkg/bus"
|
||||
"github.com/grafana/grafana/pkg/configprovider"
|
||||
"github.com/grafana/grafana/pkg/expr"
|
||||
@@ -831,7 +832,9 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
|
||||
}
|
||||
v2 := appregistry.ProvideAppInstallers(featureToggles, playlistAppInstaller, appInstaller, shortURLAppInstaller, alertingRulesAppInstaller, correlationsAppInstaller, alertingNotificationsAppInstaller, logsDrilldownAppInstaller, annotationAppInstaller, exampleAppInstaller, advisorAppInstaller, alertingHistorianAppInstaller, quotasAppInstaller)
|
||||
builderMetrics := builder.ProvideBuilderMetrics(registerer)
|
||||
apiserverService, err := apiserver.ProvideService(cfg, featureToggles, routeRegisterImpl, tracingService, serverLockService, sqlStore, kvStore, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, pluginstoreService, dualwriteService, resourceClient, inlineSecureValueSupport, eventualRestConfigProvider, v, eventualRestConfigProvider, registerer, aggregatorRunner, v2, builderMetrics)
|
||||
backend := auditing.ProvideNoopBackend()
|
||||
policyRuleEvaluator := auditing.ProvideNoopPolicyRuleEvaluator()
|
||||
apiserverService, err := apiserver.ProvideService(cfg, featureToggles, routeRegisterImpl, tracingService, serverLockService, sqlStore, kvStore, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, pluginstoreService, dualwriteService, resourceClient, inlineSecureValueSupport, eventualRestConfigProvider, v, eventualRestConfigProvider, registerer, aggregatorRunner, v2, builderMetrics, backend, policyRuleEvaluator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -879,7 +882,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
|
||||
folderAPIBuilder := folders.RegisterAPIService(cfg, featureToggles, apiserverService, folderimplService, folderPermissionsService, accessControl, acimplService, accessClient, registerer, resourceClient, zanzanaClient)
|
||||
storageBackendImpl := noopstorage.ProvideStorageBackend()
|
||||
noopTeamGroupsREST := externalgroupmapping.ProvideNoopTeamGroupsREST()
|
||||
identityAccessManagementAPIBuilder, err := iam.RegisterAPIService(cfg, featureToggles, apiserverService, ssosettingsimplService, sqlStore, accessControl, accessClient, zanzanaClient, registerer, storageBackendImpl, storageBackendImpl, tracingService, storageBackendImpl, storageBackendImpl, noopTeamGroupsREST, dualwriteService, resourceClient, orgService, userService, teamService)
|
||||
identityAccessManagementAPIBuilder, err := iam.RegisterAPIService(cfg, featureToggles, apiserverService, ssosettingsimplService, sqlStore, accessControl, accessClient, zanzanaClient, registerer, storageBackendImpl, storageBackendImpl, tracingService, storageBackendImpl, storageBackendImpl, noopTeamGroupsREST, dualwriteService, resourceClient, orgService, userService, teamService, eventualRestConfigProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1489,7 +1492,9 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
|
||||
}
|
||||
v2 := appregistry.ProvideAppInstallers(featureToggles, playlistAppInstaller, appInstaller, shortURLAppInstaller, alertingRulesAppInstaller, correlationsAppInstaller, alertingNotificationsAppInstaller, logsDrilldownAppInstaller, annotationAppInstaller, exampleAppInstaller, advisorAppInstaller, alertingHistorianAppInstaller, quotasAppInstaller)
|
||||
builderMetrics := builder.ProvideBuilderMetrics(registerer)
|
||||
apiserverService, err := apiserver.ProvideService(cfg, featureToggles, routeRegisterImpl, tracingService, serverLockService, sqlStore, kvStore, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, pluginstoreService, dualwriteService, resourceClient, inlineSecureValueSupport, eventualRestConfigProvider, v, eventualRestConfigProvider, registerer, aggregatorRunner, v2, builderMetrics)
|
||||
backend := auditing.ProvideNoopBackend()
|
||||
policyRuleEvaluator := auditing.ProvideNoopPolicyRuleEvaluator()
|
||||
apiserverService, err := apiserver.ProvideService(cfg, featureToggles, routeRegisterImpl, tracingService, serverLockService, sqlStore, kvStore, middlewareHandler, scopedPluginDatasourceProvider, plugincontextProvider, pluginstoreService, dualwriteService, resourceClient, inlineSecureValueSupport, eventualRestConfigProvider, v, eventualRestConfigProvider, registerer, aggregatorRunner, v2, builderMetrics, backend, policyRuleEvaluator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1537,7 +1542,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
|
||||
folderAPIBuilder := folders.RegisterAPIService(cfg, featureToggles, apiserverService, folderimplService, folderPermissionsService, accessControl, acimplService, accessClient, registerer, resourceClient, zanzanaClient)
|
||||
storageBackendImpl := noopstorage.ProvideStorageBackend()
|
||||
noopTeamGroupsREST := externalgroupmapping.ProvideNoopTeamGroupsREST()
|
||||
identityAccessManagementAPIBuilder, err := iam.RegisterAPIService(cfg, featureToggles, apiserverService, ssosettingsimplService, sqlStore, accessControl, accessClient, zanzanaClient, registerer, storageBackendImpl, storageBackendImpl, tracingService, storageBackendImpl, storageBackendImpl, noopTeamGroupsREST, dualwriteService, resourceClient, orgService, userService, teamService)
|
||||
identityAccessManagementAPIBuilder, err := iam.RegisterAPIService(cfg, featureToggles, apiserverService, ssosettingsimplService, sqlStore, accessControl, accessClient, zanzanaClient, registerer, storageBackendImpl, storageBackendImpl, tracingService, storageBackendImpl, storageBackendImpl, noopTeamGroupsREST, dualwriteService, resourceClient, orgService, userService, teamService, eventualRestConfigProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -114,6 +114,8 @@ func RegisterAuthorizers(
|
||||
registrar.Register(gv, authorizer)
|
||||
logger.Debug("Registered authorizer", "group", gv.Group, "version", gv.Version, "app")
|
||||
}
|
||||
} else {
|
||||
panic("authorizer cannot be nil for api group: " + installer.GroupVersions()[0].Group)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ func TestRegisterAuthorizers(t *testing.T) {
|
||||
name string
|
||||
appInstallers []appsdkapiserver.AppInstaller
|
||||
expectedRegisters int
|
||||
expectedPanic bool
|
||||
}{
|
||||
{
|
||||
name: "empty installers list",
|
||||
@@ -30,7 +31,7 @@ func TestRegisterAuthorizers(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRegisters: 0,
|
||||
expectedPanic: true,
|
||||
},
|
||||
{
|
||||
name: "single installer with authorizer provider",
|
||||
@@ -46,6 +47,20 @@ func TestRegisterAuthorizers(t *testing.T) {
|
||||
},
|
||||
expectedRegisters: 1,
|
||||
},
|
||||
{
|
||||
name: "single installer with invalid authorizer provider",
|
||||
appInstallers: []appsdkapiserver.AppInstaller{
|
||||
&mockAppInstallerWithAuth{
|
||||
mockAppInstaller: &mockAppInstaller{
|
||||
groupVersions: []schema.GroupVersion{
|
||||
{Group: "test.example.com", Version: "v1"},
|
||||
},
|
||||
},
|
||||
mockAuthorizer: nil,
|
||||
},
|
||||
},
|
||||
expectedPanic: true,
|
||||
},
|
||||
{
|
||||
name: "installer with multiple group versions",
|
||||
appInstallers: []appsdkapiserver.AppInstaller{
|
||||
@@ -63,7 +78,7 @@ func TestRegisterAuthorizers(t *testing.T) {
|
||||
expectedRegisters: 3,
|
||||
},
|
||||
{
|
||||
name: "multiple installers with mixed authorizer support",
|
||||
name: "multiple installers with authorizer support",
|
||||
appInstallers: []appsdkapiserver.AppInstaller{
|
||||
&mockAppInstallerWithAuth{
|
||||
mockAppInstaller: &mockAppInstaller{
|
||||
@@ -73,11 +88,6 @@ func TestRegisterAuthorizers(t *testing.T) {
|
||||
},
|
||||
mockAuthorizer: &mockAuthorizer{},
|
||||
},
|
||||
&mockAppInstaller{
|
||||
groupVersions: []schema.GroupVersion{
|
||||
{Group: "other.example.com", Version: "v1"},
|
||||
},
|
||||
},
|
||||
&mockAppInstallerWithAuth{
|
||||
mockAppInstaller: &mockAppInstaller{
|
||||
groupVersions: []schema.GroupVersion{
|
||||
@@ -88,7 +98,7 @@ func TestRegisterAuthorizers(t *testing.T) {
|
||||
mockAuthorizer: &mockAuthorizer{},
|
||||
},
|
||||
},
|
||||
expectedRegisters: 3, // 1 from first installer + 2 from third installer
|
||||
expectedRegisters: 3, // 1 from first installer + 2 from second installer
|
||||
},
|
||||
}
|
||||
|
||||
@@ -96,6 +106,13 @@ func TestRegisterAuthorizers(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
registrar := &mockAuthorizerRegistrar{}
|
||||
if tt.expectedPanic {
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Errorf("%s case did not panic as expected", t.Name())
|
||||
}
|
||||
}()
|
||||
}
|
||||
RegisterAuthorizers(ctx, tt.appInstallers, registrar)
|
||||
require.Equal(t, tt.expectedRegisters, len(registrar.registrations))
|
||||
})
|
||||
|
||||
@@ -38,12 +38,12 @@ func NewGrafanaBuiltInSTAuthorizer(cfg *setting.Cfg) *GrafanaAuthorizer {
|
||||
|
||||
// Individual services may have explicit implementations
|
||||
apis := make(map[string]authorizer.Authorizer)
|
||||
// The apiVersion flavors will run first and can return early when FGAC has appropriate rules
|
||||
authorizers = append(authorizers, &authorizerForAPI{apis})
|
||||
|
||||
// org role is last -- and will return allow for verbs that match expectations
|
||||
// The apiVersion flavors will run first and can return early when FGAC has appropriate rules
|
||||
// NOTE: role authorizer is now used by some api groups as their specific authorizer
|
||||
// but there are still some apis not directly registered in the embedded delegate that benefit from including it here
|
||||
// org role authorizer is last -- and will return allow for verbs that match expectations
|
||||
// it is only helpful here for remote APIs in some cloud use-cases.
|
||||
//nolint:staticcheck // remove once build handler chains are untangled between local and remote APIs handling
|
||||
authorizers = append(authorizers, NewRoleAuthorizer())
|
||||
return &GrafanaAuthorizer{
|
||||
apis: apis,
|
||||
|
||||
@@ -19,6 +19,7 @@ var orgRoleNoneAsViewerAPIGroups = []string{
|
||||
|
||||
type roleAuthorizer struct{}
|
||||
|
||||
// Deprecated: NewRoleAuthorizer exists for apps that were launched with simplistic authorization requirements. Consider using NewResourceAuthorizer instead.
|
||||
func NewRoleAuthorizer() *roleAuthorizer {
|
||||
return &roleAuthorizer{}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
genericapifilters "k8s.io/apiserver/pkg/endpoints/filters"
|
||||
"k8s.io/apiserver/pkg/endpoints/responsewriter"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
@@ -113,6 +114,9 @@ type service struct {
|
||||
appInstallers []appsdkapiserver.AppInstaller
|
||||
builderMetrics *builder.BuilderMetrics
|
||||
dualWriterMetrics *grafanarest.DualWriterMetrics
|
||||
|
||||
auditBackend audit.Backend
|
||||
auditPolicyRuleEvaluator audit.PolicyRuleEvaluator
|
||||
}
|
||||
|
||||
func ProvideService(
|
||||
@@ -137,6 +141,8 @@ func ProvideService(
|
||||
aggregatorRunner aggregatorrunner.AggregatorRunner,
|
||||
appInstallers []appsdkapiserver.AppInstaller,
|
||||
builderMetrics *builder.BuilderMetrics,
|
||||
auditBackend audit.Backend,
|
||||
auditPolicyRuleEvaluator audit.PolicyRuleEvaluator,
|
||||
) (*service, error) {
|
||||
scheme := builder.ProvideScheme()
|
||||
codecs := builder.ProvideCodecFactory(scheme)
|
||||
@@ -167,6 +173,8 @@ func ProvideService(
|
||||
appInstallers: appInstallers,
|
||||
builderMetrics: builderMetrics,
|
||||
dualWriterMetrics: grafanarest.NewDualWriterMetrics(reg),
|
||||
auditBackend: auditBackend,
|
||||
auditPolicyRuleEvaluator: auditPolicyRuleEvaluator,
|
||||
}
|
||||
// This will be used when running as a dskit service
|
||||
s.NamedService = services.NewBasicService(s.start, s.running, nil).WithName(modules.GrafanaAPIServer)
|
||||
@@ -355,6 +363,10 @@ func (s *service) start(ctx context.Context) error {
|
||||
appinstaller.BuildOpenAPIDefGetter(s.appInstallers),
|
||||
}
|
||||
|
||||
// Auditing Options
|
||||
serverConfig.AuditBackend = s.auditBackend
|
||||
serverConfig.AuditPolicyRuleEvaluator = s.auditPolicyRuleEvaluator
|
||||
|
||||
// Add OpenAPI specs for each group+version (existing builders)
|
||||
err = builder.SetupConfig(
|
||||
s.scheme,
|
||||
|
||||
@@ -77,6 +77,10 @@ var (
|
||||
"user.sync.user-externalUID-mismatch",
|
||||
errutil.WithPublicMessage("User externalUID mismatch"),
|
||||
)
|
||||
errSCIMAuthModuleMismatch = errutil.Unauthorized(
|
||||
"user.sync.scim-auth-module-mismatch",
|
||||
errutil.WithPublicMessage("User was provisioned via SCIM and must login via SAML"),
|
||||
)
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -308,6 +312,21 @@ func (s *UserSync) SyncUserHook(ctx context.Context, id *authn.Identity, _ *auth
|
||||
// just try to fetch the user one more to make the other request work.
|
||||
if errors.Is(err, user.ErrUserAlreadyExists) {
|
||||
usr, _, err = s.getUser(ctx, id)
|
||||
|
||||
// Check if this is a SCIM-provisioned user trying to login via an auth module that is not SAML or GCOM
|
||||
if err == nil && usr != nil && usr.IsProvisioned && id.AuthenticatedBy != login.GrafanaComAuthModule {
|
||||
_, authErr := s.authInfoService.GetAuthInfo(ctx, &login.GetAuthInfoQuery{
|
||||
UserId: usr.ID,
|
||||
AuthModule: id.AuthenticatedBy,
|
||||
})
|
||||
if errors.Is(authErr, user.ErrUserNotFound) {
|
||||
s.log.FromContext(ctx).Error("SCIM-provisioned user attempted login via non-SAML auth module",
|
||||
"user_id", usr.ID,
|
||||
"attempted_module", id.AuthenticatedBy,
|
||||
)
|
||||
return errSCIMAuthModuleMismatch.Errorf("user was provisioned via SCIM but attempted login via %s", id.AuthenticatedBy)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -1926,3 +1926,100 @@ func TestUserSync_SCIMLoginUsageStatSet(t *testing.T) {
|
||||
finalCount := finalStats["stats.features.scim.has_successful_login.count"].(int)
|
||||
require.Equal(t, int(1), finalCount)
|
||||
}
|
||||
|
||||
func TestUserSync_SyncUserHook_SCIMAuthModuleMismatch(t *testing.T) {
|
||||
userSrv := usertest.NewMockService(t)
|
||||
authInfoSrv := authinfotest.NewMockAuthInfoService(t)
|
||||
|
||||
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(nil, user.ErrUserNotFound).Once()
|
||||
|
||||
userSrv.On("Create", mock.Anything, mock.Anything).Return(nil, user.ErrUserAlreadyExists).Once()
|
||||
|
||||
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(&user.User{
|
||||
ID: 1,
|
||||
Email: "test@test.com",
|
||||
IsProvisioned: true,
|
||||
}, nil).Once()
|
||||
|
||||
authInfoSrv.On("GetAuthInfo", mock.Anything, mock.MatchedBy(func(q *login.GetAuthInfoQuery) bool {
|
||||
return q.AuthModule == "oauth_azuread"
|
||||
})).Return(nil, user.ErrUserNotFound).Once()
|
||||
|
||||
s := ProvideUserSync(
|
||||
userSrv,
|
||||
authinfoimpl.ProvideOSSUserProtectionService(),
|
||||
authInfoSrv,
|
||||
"atest.FakeQuotaService{},
|
||||
tracing.NewNoopTracerService(),
|
||||
featuremgmt.WithFeatures(),
|
||||
setting.NewCfg(),
|
||||
nil,
|
||||
)
|
||||
|
||||
email := "test@test.com"
|
||||
|
||||
err := s.SyncUserHook(context.Background(), &authn.Identity{
|
||||
AuthenticatedBy: "oauth_azuread",
|
||||
ClientParams: authn.ClientParams{
|
||||
SyncUser: true,
|
||||
AllowSignUp: true,
|
||||
LookUpParams: login.UserLookupParams{
|
||||
Email: &email,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
require.Error(t, err)
|
||||
assert.ErrorIs(t, err, errSCIMAuthModuleMismatch)
|
||||
assert.Contains(t, err.Error(), "SCIM")
|
||||
assert.Contains(t, err.Error(), "oauth_azuread")
|
||||
}
|
||||
|
||||
func TestUserSync_SyncUserHook_SCIMUserAllowsGCOMLogin(t *testing.T) {
|
||||
userSrv := usertest.NewMockService(t)
|
||||
authInfoSrv := authinfotest.NewMockAuthInfoService(t)
|
||||
|
||||
authInfoSrv.On("GetAuthInfo", mock.Anything, mock.MatchedBy(func(q *login.GetAuthInfoQuery) bool {
|
||||
return q.AuthModule == login.GrafanaComAuthModule && q.AuthId == "gcom-user-123"
|
||||
})).Return(nil, user.ErrUserNotFound).Once()
|
||||
|
||||
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(nil, user.ErrUserNotFound).Once()
|
||||
userSrv.On("Create", mock.Anything, mock.Anything).Return(nil, user.ErrUserAlreadyExists).Once()
|
||||
|
||||
authInfoSrv.On("GetAuthInfo", mock.Anything, mock.MatchedBy(func(q *login.GetAuthInfoQuery) bool {
|
||||
return q.AuthModule == login.GrafanaComAuthModule && q.AuthId == "gcom-user-123"
|
||||
})).Return(nil, user.ErrUserNotFound).Once()
|
||||
|
||||
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(&user.User{
|
||||
ID: 1,
|
||||
Email: "test@test.com",
|
||||
IsProvisioned: true,
|
||||
}, nil).Once()
|
||||
|
||||
s := ProvideUserSync(
|
||||
userSrv,
|
||||
authinfoimpl.ProvideOSSUserProtectionService(),
|
||||
authInfoSrv,
|
||||
"atest.FakeQuotaService{},
|
||||
tracing.NewNoopTracerService(),
|
||||
featuremgmt.WithFeatures(),
|
||||
setting.NewCfg(),
|
||||
nil,
|
||||
)
|
||||
|
||||
email := "test@test.com"
|
||||
|
||||
err := s.SyncUserHook(context.Background(), &authn.Identity{
|
||||
AuthenticatedBy: login.GrafanaComAuthModule,
|
||||
AuthID: "gcom-user-123",
|
||||
ClientParams: authn.ClientParams{
|
||||
SyncUser: true,
|
||||
AllowSignUp: true,
|
||||
LookUpParams: login.UserLookupParams{
|
||||
Email: &email,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,12 @@ import (
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
|
||||
authzv1 "github.com/grafana/authlib/authz/proto/v1"
|
||||
|
||||
dashboardV1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1"
|
||||
folders "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1"
|
||||
iamv0alpha1 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/services/accesscontrol"
|
||||
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
)
|
||||
|
||||
@@ -44,7 +48,8 @@ func getTypeInfo(group, resource string) (typeInfo, bool) {
|
||||
|
||||
func NewResourceInfoFromCheck(r *authzv1.CheckRequest) ResourceInfo {
|
||||
typ, relations := getTypeAndRelations(r.GetGroup(), r.GetResource())
|
||||
return newResource(
|
||||
|
||||
resource := newResource(
|
||||
typ,
|
||||
r.GetGroup(),
|
||||
r.GetResource(),
|
||||
@@ -53,6 +58,19 @@ func NewResourceInfoFromCheck(r *authzv1.CheckRequest) ResourceInfo {
|
||||
r.GetSubresource(),
|
||||
relations,
|
||||
)
|
||||
|
||||
// Special case for creating folders and resources in the root folder
|
||||
if r.GetVerb() == utils.VerbCreate {
|
||||
if resource.IsFolderResource() && resource.name == "" {
|
||||
resource.name = accesscontrol.GeneralFolderUID
|
||||
} else if resource.HasFolderSupport() && resource.folder == "" {
|
||||
resource.folder = accesscontrol.GeneralFolderUID
|
||||
}
|
||||
|
||||
return resource
|
||||
}
|
||||
|
||||
return resource
|
||||
}
|
||||
|
||||
func NewResourceInfoFromBatchItem(i *authzextv1.BatchCheckItem) ResourceInfo {
|
||||
@@ -164,3 +182,15 @@ func (r ResourceInfo) IsValidRelation(relation string) bool {
|
||||
func (r ResourceInfo) HasSubresource() bool {
|
||||
return r.subresource != ""
|
||||
}
|
||||
|
||||
var resourcesWithFolderSupport = map[string]bool{
|
||||
dashboardV1.DashboardResourceInfo.GroupResource().Group: true,
|
||||
}
|
||||
|
||||
func (r ResourceInfo) HasFolderSupport() bool {
|
||||
return resourcesWithFolderSupport[r.group]
|
||||
}
|
||||
|
||||
func (r ResourceInfo) IsFolderResource() bool {
|
||||
return r.group == folders.FolderResourceInfo.GroupResource().Group
|
||||
}
|
||||
|
||||
@@ -228,6 +228,9 @@ func TranslateToResourceTuple(subject string, action, kind, name string) (*openf
|
||||
}
|
||||
|
||||
if name == "*" {
|
||||
if m.group != "" && m.resource != "" {
|
||||
return NewGroupResourceTuple(subject, m.relation, m.group, m.resource, m.subresource), true
|
||||
}
|
||||
return NewGroupResourceTuple(subject, m.relation, translation.group, translation.resource, m.subresource), true
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,89 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
openfgav1 "github.com/openfga/api/proto/openfga/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
)
|
||||
|
||||
type translationTestCase struct {
|
||||
testName string
|
||||
subject string
|
||||
action string
|
||||
kind string
|
||||
name string
|
||||
expected *openfgav1.TupleKey
|
||||
}
|
||||
|
||||
func TestTranslateToResourceTuple(t *testing.T) {
|
||||
tests := []translationTestCase{
|
||||
{
|
||||
testName: "dashboards:read in folders",
|
||||
subject: "user:1",
|
||||
action: "dashboards:read",
|
||||
kind: "folders",
|
||||
name: "*",
|
||||
expected: &openfgav1.TupleKey{
|
||||
User: "user:1",
|
||||
Relation: "get",
|
||||
Object: "group_resource:dashboard.grafana.app/dashboards",
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "dashboards:read for all dashboards",
|
||||
subject: "user:1",
|
||||
action: "dashboards:read",
|
||||
kind: "dashboards",
|
||||
name: "*",
|
||||
expected: &openfgav1.TupleKey{
|
||||
User: "user:1",
|
||||
Relation: "get",
|
||||
Object: "group_resource:dashboard.grafana.app/dashboards",
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "dashboards:read for general folder",
|
||||
subject: "user:1",
|
||||
action: "dashboards:read",
|
||||
kind: "folders",
|
||||
name: "general",
|
||||
expected: &openfgav1.TupleKey{
|
||||
User: "user:1",
|
||||
Relation: "resource_get",
|
||||
Object: "folder:general",
|
||||
Condition: &openfgav1.RelationshipCondition{
|
||||
Name: "subresource_filter",
|
||||
Context: &structpb.Struct{
|
||||
Fields: map[string]*structpb.Value{
|
||||
"subresources": structpb.NewListValue(&structpb.ListValue{
|
||||
Values: []*structpb.Value{structpb.NewStringValue("dashboard.grafana.app/dashboards")},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "folders:read",
|
||||
subject: "user:1",
|
||||
action: "folders:read",
|
||||
kind: "folders",
|
||||
name: "*",
|
||||
expected: &openfgav1.TupleKey{
|
||||
User: "user:1",
|
||||
Relation: "get",
|
||||
Object: "group_resource:folder.grafana.app/folders",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.testName, func(t *testing.T) {
|
||||
tuple, ok := TranslateToResourceTuple(test.subject, test.action, test.kind, test.name)
|
||||
require.True(t, ok)
|
||||
require.EqualExportedValues(t, test.expected, tuple)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -212,4 +212,16 @@ func testCheck(t *testing.T, server *Server) {
|
||||
require.NoError(t, err)
|
||||
assert.True(t, res.GetAllowed(), "user should be able to view dashboards in folder 6")
|
||||
})
|
||||
|
||||
t.Run("user:18 should be able to create folder in root folder", func(t *testing.T) {
|
||||
res, err := server.Check(newContextWithNamespace(), newReq("user:18", utils.VerbCreate, folderGroup, folderResource, "", "", ""))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, res.GetAllowed())
|
||||
})
|
||||
|
||||
t.Run("user:18 should be able to create dashboard in root folder", func(t *testing.T) {
|
||||
res, err := server.Check(newContextWithNamespace(), newReq("user:18", utils.VerbCreate, dashboardGroup, dashboardResource, "", "", ""))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, res.GetAllowed())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -71,6 +71,8 @@ func setup(t *testing.T, srv *Server) *Server {
|
||||
common.NewTypedResourceTuple("user:15", common.RelationGet, common.TypeUser, userGroup, userResource, statusSubresource, "1"),
|
||||
common.NewTypedResourceTuple("user:16", common.RelationGet, common.TypeServiceAccount, serviceAccountGroup, serviceAccountResource, statusSubresource, "1"),
|
||||
common.NewFolderTuple("user:17", common.RelationSetView, "4"),
|
||||
common.NewFolderTuple("user:18", common.RelationCreate, "general"),
|
||||
common.NewFolderResourceTuple("user:18", common.RelationCreate, dashboardGroup, dashboardResource, "", "general"),
|
||||
}
|
||||
|
||||
return setupOpenFGADatabase(t, srv, tuples)
|
||||
|
||||
@@ -304,8 +304,15 @@ type DeleteDashboardCommand struct {
|
||||
RemovePermissions bool
|
||||
}
|
||||
|
||||
type ProvisioningConfig struct {
|
||||
Name string
|
||||
OrgID int64
|
||||
Folder string
|
||||
AllowUIUpdates bool
|
||||
}
|
||||
|
||||
type DeleteOrphanedProvisionedDashboardsCommand struct {
|
||||
ReaderNames []string
|
||||
Config []ProvisioningConfig
|
||||
}
|
||||
|
||||
type DashboardProvisioningSearchResults struct {
|
||||
@@ -405,6 +412,8 @@ type DashboardSearchProjection struct {
|
||||
FolderTitle string
|
||||
SortMeta int64
|
||||
Tags []string
|
||||
ManagedBy utils.ManagerKind
|
||||
ManagerId string
|
||||
Deleted *time.Time
|
||||
}
|
||||
|
||||
|
||||
@@ -877,24 +877,32 @@ func (dr *DashboardServiceImpl) waitForSearchQuery(ctx context.Context, query *d
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) DeleteOrphanedProvisionedDashboards(ctx context.Context, cmd *dashboards.DeleteOrphanedProvisionedDashboardsCommand) error {
|
||||
// cleanup duplicate provisioned dashboards first (this will have the same name and external_id)
|
||||
// note: only works in modes 1-3
|
||||
if err := dr.DeleteDuplicateProvisionedDashboards(ctx); err != nil {
|
||||
dr.log.Error("Failed to delete duplicate provisioned dashboards", "error", err)
|
||||
}
|
||||
|
||||
// check each org for orphaned provisioned dashboards
|
||||
orgs, err := dr.orgService.Search(ctx, &org.SearchOrgsQuery{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
orgIDs := make([]int64, 0, len(orgs))
|
||||
for _, org := range orgs {
|
||||
orgIDs = append(orgIDs, org.ID)
|
||||
}
|
||||
|
||||
if err := dr.DeleteDuplicateProvisionedDashboards(ctx, orgIDs, cmd.Config); err != nil {
|
||||
dr.log.Error("Failed to delete duplicate provisioned dashboards", "error", err)
|
||||
}
|
||||
|
||||
currentNames := make([]string, 0, len(cmd.Config))
|
||||
for _, cfg := range cmd.Config {
|
||||
currentNames = append(currentNames, cfg.Name)
|
||||
}
|
||||
|
||||
for _, org := range orgs {
|
||||
ctx, _ := identity.WithServiceIdentity(ctx, org.ID)
|
||||
// find all dashboards in the org that have a file repo set that is not in the given readers list
|
||||
foundDashs, err := dr.searchProvisionedDashboardsThroughK8s(ctx, &dashboards.FindPersistedDashboardsQuery{
|
||||
ManagedBy: utils.ManagerKindClassicFP, //nolint:staticcheck
|
||||
ManagerIdentityNotIn: cmd.ReaderNames,
|
||||
ManagerIdentityNotIn: currentNames,
|
||||
OrgId: org.ID,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -921,7 +929,129 @@ func (dr *DashboardServiceImpl) DeleteOrphanedProvisionedDashboards(ctx context.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) DeleteDuplicateProvisionedDashboards(ctx context.Context) error {
|
||||
// searchExistingProvisionedData fetches provisioned data for the purposes of
|
||||
// duplication cleanup. Returns the set of folder UIDs for folders with the
|
||||
// given title, and the set of resources contained in those folders.
|
||||
func (dr *DashboardServiceImpl) searchExistingProvisionedData(
|
||||
ctx context.Context, orgID int64, folderTitle string,
|
||||
) ([]string, []dashboards.DashboardSearchProjection, error) {
|
||||
ctx, user := identity.WithServiceIdentity(ctx, orgID)
|
||||
cmd := folder.SearchFoldersQuery{
|
||||
OrgID: orgID,
|
||||
SignedInUser: user,
|
||||
Title: folderTitle,
|
||||
TitleExactMatch: true,
|
||||
}
|
||||
|
||||
searchResults, err := dr.folderService.SearchFolders(ctx, cmd)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("checking if provisioning reset is required: %w", err)
|
||||
}
|
||||
|
||||
var matchingFolders []string //nolint:prealloc
|
||||
for _, result := range searchResults {
|
||||
f, err := dr.folderService.Get(ctx, &folder.GetFolderQuery{
|
||||
OrgID: orgID,
|
||||
UID: &result.UID,
|
||||
SignedInUser: user,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// We are only interested in folders at the top-level of the folder hierarchy.
|
||||
// Cleanup is not performed for provisioned folders that were moved to
|
||||
// a different location.
|
||||
if f.ParentUID != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
matchingFolders = append(matchingFolders, f.UID)
|
||||
}
|
||||
|
||||
if len(matchingFolders) == 0 {
|
||||
// If there are no folders with the same title as the provisioned folder we
|
||||
// are looking for, there is nothing to be cleaned up.
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
resources, err := dr.FindDashboards(ctx, &dashboards.FindPersistedDashboardsQuery{
|
||||
OrgId: orgID,
|
||||
SignedInUser: user,
|
||||
FolderUIDs: matchingFolders,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return matchingFolders, resources, nil
|
||||
}
|
||||
|
||||
// maybeResetProvisioning will check for duplicated provisioned dashboards in the database. These duplications
|
||||
// happen when multiple provisioned dashboards of the same title are found, or multiple provisioned
|
||||
// folders are found. In this case, provisioned resources are deleted, allowing the provisioning
|
||||
// process to start from scratch after this function returns.
|
||||
func (dr *DashboardServiceImpl) maybeResetProvisioning(ctx context.Context, orgs []int64, configs []dashboards.ProvisioningConfig) {
|
||||
if skipReason := canBeAutomaticallyCleanedUp(configs); skipReason != "" {
|
||||
dr.log.Info("not eligible for automated cleanup", "reason", skipReason)
|
||||
return
|
||||
}
|
||||
|
||||
folderTitle := configs[0].Folder
|
||||
provisionedNames := map[string]bool{}
|
||||
for _, c := range configs {
|
||||
provisionedNames[c.Name] = true
|
||||
}
|
||||
|
||||
for _, orgID := range orgs {
|
||||
ctx, user := identity.WithServiceIdentity(ctx, orgID)
|
||||
provFolders, resources, err := dr.searchExistingProvisionedData(ctx, orgID, folderTitle)
|
||||
if err != nil {
|
||||
dr.log.Error("failed to search for provisioned data for cleanup", "org", orgID, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
steps, err := cleanupSteps(provFolders, resources, provisionedNames)
|
||||
if err != nil {
|
||||
dr.log.Warn("not possible to perform automated duplicate cleanup", "org", orgID, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, step := range steps {
|
||||
var err error
|
||||
|
||||
switch step.Type {
|
||||
case searchstore.TypeDashboard:
|
||||
err = dr.deleteDashboard(ctx, 0, step.UID, orgID, false)
|
||||
case searchstore.TypeFolder:
|
||||
err = dr.folderService.Delete(ctx, &folder.DeleteFolderCommand{
|
||||
OrgID: orgID,
|
||||
SignedInUser: user,
|
||||
UID: step.UID,
|
||||
})
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
dr.log.Info("deleted duplicated provisioned resource",
|
||||
"type", step.Type, "uid", step.UID,
|
||||
)
|
||||
} else {
|
||||
dr.log.Error("failed to delete duplicated provisioned resource",
|
||||
"type", step.Type, "uid", step.UID, "error", err,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dr *DashboardServiceImpl) DeleteDuplicateProvisionedDashboards(ctx context.Context, orgs []int64, configs []dashboards.ProvisioningConfig) error {
|
||||
// Start from scratch if duplications that cannot be fixed by the logic
|
||||
// below are found in the database.
|
||||
dr.maybeResetProvisioning(ctx, orgs, configs)
|
||||
|
||||
// cleanup duplicate provisioned dashboards (i.e., with the same name and external_id).
|
||||
// Note: only works in modes 1-3. This logic can be removed once mode5 is
|
||||
// enabled everywhere.
|
||||
duplicates, err := dr.dashboardStore.GetDuplicateProvisionedDashboards(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1511,6 +1641,8 @@ func (dr *DashboardServiceImpl) FindDashboards(ctx context.Context, query *dashb
|
||||
FolderTitle: folderTitle,
|
||||
FolderID: folderID,
|
||||
FolderSlug: slugify.Slugify(folderTitle),
|
||||
ManagedBy: hit.ManagedBy.Kind,
|
||||
ManagerId: hit.ManagedBy.ID,
|
||||
Tags: hit.Tags,
|
||||
}
|
||||
|
||||
|
||||
@@ -779,7 +779,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
|
||||
}, nil).Twice()
|
||||
|
||||
err := service.DeleteOrphanedProvisionedDashboards(context.Background(), &dashboards.DeleteOrphanedProvisionedDashboardsCommand{
|
||||
ReaderNames: []string{"test"},
|
||||
Config: []dashboards.ProvisioningConfig{{Name: "test"}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
@@ -874,7 +874,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
|
||||
}, nil).Once()
|
||||
|
||||
err := singleOrgService.DeleteOrphanedProvisionedDashboards(ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{
|
||||
ReaderNames: []string{"test"},
|
||||
Config: []dashboards.ProvisioningConfig{{Name: "test"}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
@@ -906,7 +906,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
|
||||
}, nil)
|
||||
|
||||
err := singleOrgService.DeleteOrphanedProvisionedDashboards(ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{
|
||||
ReaderNames: []string{"test"},
|
||||
Config: []dashboards.ProvisioningConfig{{Name: "test"}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
k8sCliMock.AssertExpectations(t)
|
||||
|
||||
@@ -0,0 +1,107 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/services/dashboards"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/searchstore"
|
||||
)
|
||||
|
||||
// canBeAutomaticallyCleanedUp determines whether this instance can be automatically cleaned up
|
||||
// if duplicated provisioned resources are found. To ensure the process does not delete
|
||||
// resources it shouldn't, automatic cleanups only happen if all provisioned dashboards
|
||||
// are stored in the same folder (by title), and no dashboards allow UI updates.
|
||||
func canBeAutomaticallyCleanedUp(configs []dashboards.ProvisioningConfig) string {
|
||||
if len(configs) == 0 {
|
||||
return "no provisioned dashboards"
|
||||
}
|
||||
|
||||
folderTitle := configs[0].Folder
|
||||
if len(folderTitle) == 0 {
|
||||
return fmt.Sprintf("dashboard has no folder: %s", configs[0].Name)
|
||||
}
|
||||
|
||||
for _, cfg := range configs {
|
||||
if cfg.AllowUIUpdates {
|
||||
return "contains dashboards with allowUiUpdates"
|
||||
}
|
||||
|
||||
if cfg.Folder != folderTitle {
|
||||
return "dashboards provisioned across multiple folders"
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
type deleteProvisionedResource struct {
|
||||
Type string
|
||||
UID string
|
||||
}
|
||||
|
||||
// cleanupSteps computes the sequence of steps to be performed in order to cleanup the
|
||||
// provisioning resources and allow the process to start from scratch when duplication
|
||||
// is detected. The sequence of steps will dictate the order in which dashboards and folders
|
||||
// are to be deleted.
|
||||
func cleanupSteps(provFolders []string, resources []dashboards.DashboardSearchProjection, configDashboards map[string]bool) ([]deleteProvisionedResource, error) {
|
||||
var hasDuplicatedProvisionedDashboard bool
|
||||
var hasUserCreatedResource bool
|
||||
var uniqueNames = map[string]struct{}{}
|
||||
var deleteProvisionedDashboards []deleteProvisionedResource //nolint:prealloc
|
||||
|
||||
for _, r := range resources {
|
||||
// nolint:staticcheck
|
||||
if r.IsFolder || r.ManagedBy != utils.ManagerKindClassicFP {
|
||||
hasUserCreatedResource = true
|
||||
continue
|
||||
}
|
||||
|
||||
// Only delete dashboards if they are included in the provisioning configuration
|
||||
// for this instance.
|
||||
if !configDashboards[r.ManagerId] {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, exists := uniqueNames[r.ManagerId]; exists {
|
||||
hasDuplicatedProvisionedDashboard = true
|
||||
}
|
||||
|
||||
uniqueNames[r.ManagerId] = struct{}{}
|
||||
deleteProvisionedDashboards = append(deleteProvisionedDashboards, deleteProvisionedResource{
|
||||
Type: searchstore.TypeDashboard,
|
||||
UID: r.UID,
|
||||
})
|
||||
}
|
||||
|
||||
if len(provFolders) == 0 {
|
||||
// When there are no provisioned folders, there is nothing to do.
|
||||
return nil, nil
|
||||
} else if len(provFolders) == 1 {
|
||||
// If only one folder was found, keep it and delete the provisioned dashboards if
|
||||
// duplication was found.
|
||||
if hasDuplicatedProvisionedDashboard {
|
||||
return deleteProvisionedDashboards, nil
|
||||
}
|
||||
} else {
|
||||
// If multiple folders were found *and* a user-created resource exists in
|
||||
// one of them, bail, as we wouldn't be able to delete one of the duplicated folders.
|
||||
if hasUserCreatedResource {
|
||||
return nil, errors.New("multiple provisioning folders exist with at least one user-created resource")
|
||||
}
|
||||
|
||||
// Delete provisioned dashboards first, and then the folders.
|
||||
steps := deleteProvisionedDashboards
|
||||
for _, uid := range provFolders {
|
||||
steps = append(steps, deleteProvisionedResource{
|
||||
Type: searchstore.TypeFolder,
|
||||
UID: uid,
|
||||
})
|
||||
}
|
||||
|
||||
return steps, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
@@ -0,0 +1,279 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/services/dashboards"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore/searchstore"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_canBeAutomaticallyCleanedUp(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
configs []dashboards.ProvisioningConfig
|
||||
expectedSkip string
|
||||
}{
|
||||
{
|
||||
name: "no dashboards defined in the configuration",
|
||||
configs: []dashboards.ProvisioningConfig{},
|
||||
expectedSkip: "no provisioned dashboards",
|
||||
},
|
||||
{
|
||||
name: "first defined dashboard has no folder defined",
|
||||
configs: []dashboards.ProvisioningConfig{
|
||||
{Name: "1", Folder: ""},
|
||||
{Folder: "f1"},
|
||||
},
|
||||
expectedSkip: "dashboard has no folder: 1",
|
||||
},
|
||||
{
|
||||
name: "one of the provisioned dashboards has no folder defined",
|
||||
configs: []dashboards.ProvisioningConfig{
|
||||
{Name: "1", Folder: "f1"},
|
||||
{Name: "2", Folder: "f1"},
|
||||
{Name: "3", Folder: ""},
|
||||
{Name: "4", Folder: "f1"},
|
||||
},
|
||||
expectedSkip: "dashboards provisioned across multiple folders",
|
||||
},
|
||||
{
|
||||
name: "one of the provisioned dashboards allows UI updates",
|
||||
configs: []dashboards.ProvisioningConfig{
|
||||
{Name: "1", Folder: "f1"},
|
||||
{Name: "2", Folder: "f1", AllowUIUpdates: true},
|
||||
{Name: "3", Folder: "f1"},
|
||||
{Name: "4", Folder: "f1"},
|
||||
},
|
||||
expectedSkip: "contains dashboards with allowUiUpdates",
|
||||
},
|
||||
{
|
||||
name: "one of the provisioned dashboards is in a different folder",
|
||||
configs: []dashboards.ProvisioningConfig{
|
||||
{Name: "1", Folder: "f1"},
|
||||
{Name: "2", Folder: "f1"},
|
||||
{Name: "3", Folder: "f1"},
|
||||
{Name: "4", Folder: "different"},
|
||||
},
|
||||
expectedSkip: "dashboards provisioned across multiple folders",
|
||||
},
|
||||
{
|
||||
name: "can be skipped when all conditions are met",
|
||||
configs: []dashboards.ProvisioningConfig{
|
||||
{Name: "1", Folder: "f1"},
|
||||
{Name: "2", Folder: "f1"},
|
||||
{Name: "3", Folder: "f1"},
|
||||
{Name: "4", Folder: "f1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
require.Equal(t, tc.expectedSkip, canBeAutomaticallyCleanedUp(tc.configs))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_cleanupSteps(t *testing.T) {
|
||||
isDashboard, isFolder := false, true
|
||||
|
||||
fromUser := func(uid, name string, isFolder bool) dashboards.DashboardSearchProjection {
|
||||
return dashboards.DashboardSearchProjection{
|
||||
UID: uid,
|
||||
ManagerId: name,
|
||||
IsFolder: isFolder,
|
||||
}
|
||||
}
|
||||
|
||||
provisioned := func(uid, name string, isFolder bool) dashboards.DashboardSearchProjection {
|
||||
dashboard := fromUser(uid, name, isFolder)
|
||||
dashboard.ManagedBy = utils.ManagerKindClassicFP //nolint:staticcheck
|
||||
return dashboard
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
provisionedFolders []string
|
||||
provisionedResources []dashboards.DashboardSearchProjection
|
||||
configDashboards []string
|
||||
expectedSteps []deleteProvisionedResource
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "no provisioned folders, nothing to do",
|
||||
provisionedFolders: []string{},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple folders, a user-created dashboard in one of them",
|
||||
provisionedFolders: []string{"folder1", "folder2"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
fromUser("d3", "User1", isDashboard),
|
||||
provisioned("d4", "Provisioned3", isDashboard),
|
||||
},
|
||||
expectedErr: "multiple provisioning folders exist with at least one user-created resource",
|
||||
},
|
||||
{
|
||||
name: "multiple folders, a user-created folder in one of them",
|
||||
provisionedFolders: []string{"folder1", "folder2"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
provisioned("d3", "Provisioned3", isDashboard),
|
||||
fromUser("f1", "UserFolder1", isFolder),
|
||||
},
|
||||
expectedErr: "multiple provisioning folders exist with at least one user-created resource",
|
||||
},
|
||||
{
|
||||
name: "single folder, some dashboards duplicated",
|
||||
provisionedFolders: []string{"folder1"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
// Provisioned1 is duplicated.
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
provisioned("d3", "Provisioned1", isDashboard),
|
||||
provisioned("d4", "Provisioned3", isDashboard),
|
||||
},
|
||||
expectedSteps: []deleteProvisionedResource{
|
||||
{Type: searchstore.TypeDashboard, UID: "d1"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d2"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d3"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d4"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single folder, duplicated dashboards, user-created dashboards are ignored",
|
||||
provisionedFolders: []string{"folder1"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
// Provisioned1 is duplicated.
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
fromUser("d3", "User1", isDashboard),
|
||||
provisioned("d4", "Provisioned3", isDashboard),
|
||||
provisioned("d5", "Provisioned1", isDashboard),
|
||||
},
|
||||
// User dashboard (d3) is not deleted.
|
||||
expectedSteps: []deleteProvisionedResource{
|
||||
{Type: searchstore.TypeDashboard, UID: "d1"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d2"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d4"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d5"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single folder, duplicated dashboards, user-created folders are ignored",
|
||||
provisionedFolders: []string{"folder1"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
// Provisioned1 is duplicated.
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
provisioned("d3", "Provisioned3", isDashboard),
|
||||
provisioned("d4", "Provisioned1", isDashboard),
|
||||
fromUser("f1", "UserFolder1", isFolder),
|
||||
},
|
||||
// User folder (f1) is not deleted.
|
||||
expectedSteps: []deleteProvisionedResource{
|
||||
{Type: searchstore.TypeDashboard, UID: "d1"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d2"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d3"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d4"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple folders, only provisioned dashboards",
|
||||
provisionedFolders: []string{"folder1", "folder2"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
provisioned("d3", "Provisioned3", isDashboard),
|
||||
provisioned("d4", "Provisioned4", isDashboard),
|
||||
},
|
||||
// Delete all dashboards, then all folders.
|
||||
expectedSteps: []deleteProvisionedResource{
|
||||
{Type: searchstore.TypeDashboard, UID: "d1"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d2"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d3"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d4"},
|
||||
{Type: searchstore.TypeFolder, UID: "folder1"},
|
||||
{Type: searchstore.TypeFolder, UID: "folder2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single folder, only deletes dashboards defined in the config file",
|
||||
provisionedFolders: []string{"folder1"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
provisioned("d3", "Provisioned1", isDashboard),
|
||||
provisioned("d4", "Provisioned4", isDashboard),
|
||||
provisioned("d5", "Provisioned4", isDashboard),
|
||||
},
|
||||
// Delete duplicated dashboards, but keep Provisioned4, since it's not in the config file.
|
||||
expectedSteps: []deleteProvisionedResource{
|
||||
{Type: searchstore.TypeDashboard, UID: "d1"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d2"},
|
||||
{Type: searchstore.TypeDashboard, UID: "d3"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single folder, no duplicated dashboards",
|
||||
provisionedFolders: []string{"folder1"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
provisioned("d3", "Provisioned3", isDashboard),
|
||||
provisioned("d4", "Provisioned4", isDashboard),
|
||||
},
|
||||
expectedSteps: nil, // no duplicates, nothing to do
|
||||
},
|
||||
{
|
||||
name: "single folder, no duplicated dashboards, multiple user-created resources",
|
||||
provisionedFolders: []string{"folder1"},
|
||||
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
|
||||
provisionedResources: []dashboards.DashboardSearchProjection{
|
||||
provisioned("d1", "Provisioned1", isDashboard),
|
||||
provisioned("d2", "Provisioned2", isDashboard),
|
||||
fromUser("f1", "UserFolder1", isFolder),
|
||||
provisioned("d3", "Provisioned3", isDashboard),
|
||||
fromUser("d4", "User1", isDashboard),
|
||||
provisioned("d5", "Provisioned4", isDashboard),
|
||||
fromUser("d6", "User2", isDashboard),
|
||||
fromUser("f2", "UserFolder2", isFolder),
|
||||
},
|
||||
expectedSteps: nil, // no duplicates in the provisioned set, nothing to do
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
provisionedSet := make(map[string]bool)
|
||||
for _, name := range tc.configDashboards {
|
||||
provisionedSet[name] = true
|
||||
}
|
||||
|
||||
steps, err := cleanupSteps(tc.provisionedFolders, tc.provisionedResources, provisionedSet)
|
||||
if tc.expectedErr == "" {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedSteps, steps)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Equal(t, tc.expectedErr, err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -202,6 +202,11 @@ func (s *Service) searchFoldersFromApiServer(ctx context.Context, query folder.S
|
||||
if query.Title != "" {
|
||||
// allow wildcard search
|
||||
request.Query = "*" + strings.ToLower(query.Title) + "*"
|
||||
// or perform exact match if requested
|
||||
if query.TitleExactMatch {
|
||||
request.Query = query.Title
|
||||
}
|
||||
|
||||
// if using query, you need to specify the fields you want
|
||||
request.Fields = dashboardsearch.IncludeFields
|
||||
}
|
||||
|
||||
@@ -224,12 +224,13 @@ type GetFoldersQuery struct {
|
||||
}
|
||||
|
||||
type SearchFoldersQuery struct {
|
||||
OrgID int64
|
||||
UIDs []string
|
||||
IDs []int64
|
||||
Title string
|
||||
Limit int64
|
||||
SignedInUser identity.Requester `json:"-"`
|
||||
OrgID int64
|
||||
UIDs []string
|
||||
IDs []int64
|
||||
Title string
|
||||
TitleExactMatch bool
|
||||
Limit int64
|
||||
SignedInUser identity.Requester `json:"-"`
|
||||
}
|
||||
|
||||
// GetParentsQuery captures the information required by the folder service to
|
||||
|
||||
@@ -412,11 +412,16 @@ func (srv RulerSrv) RoutePostNameRulesConfig(c *contextmodel.ReqContext, ruleGro
|
||||
deletePermanently = true
|
||||
}
|
||||
|
||||
namespace, err := srv.store.GetNamespaceByUID(c.Req.Context(), namespaceUID, c.GetOrgID(), c.SignedInUser)
|
||||
f, err := srv.store.GetNamespaceByUID(c.Req.Context(), namespaceUID, c.GetOrgID(), c.SignedInUser)
|
||||
if err != nil {
|
||||
return toNamespaceErrorResponse(err)
|
||||
}
|
||||
|
||||
namespace := ngmodels.NewNamespace(f)
|
||||
if err := namespace.ValidateForRuleStorage(); err != nil {
|
||||
return ErrResp(http.StatusBadRequest, fmt.Errorf("%w: %s", ngmodels.ErrAlertRuleFailedValidation, err), "")
|
||||
}
|
||||
|
||||
if err := srv.checkGroupLimits(ruleGroupConfig); err != nil {
|
||||
return ErrResp(http.StatusBadRequest, err, "")
|
||||
}
|
||||
@@ -841,10 +846,14 @@ func (srv RulerSrv) RouteUpdateNamespaceRules(c *contextmodel.ReqContext, body a
|
||||
return ErrResp(http.StatusBadRequest, errors.New("missing request body"), "")
|
||||
}
|
||||
|
||||
namespace, err := srv.store.GetNamespaceByUID(c.Req.Context(), namespaceUID, c.GetOrgID(), c.SignedInUser)
|
||||
f, err := srv.store.GetNamespaceByUID(c.Req.Context(), namespaceUID, c.GetOrgID(), c.SignedInUser)
|
||||
if err != nil {
|
||||
return toNamespaceErrorResponse(err)
|
||||
}
|
||||
namespace := ngmodels.NewNamespace(f)
|
||||
if err := namespace.ValidateForRuleStorage(); err != nil {
|
||||
return ErrResp(http.StatusBadRequest, fmt.Errorf("%w: %s", ngmodels.ErrAlertRuleFailedValidation, err), "")
|
||||
}
|
||||
|
||||
ruleGroups, _, err := srv.searchAuthorizedAlertRules(c.Req.Context(), authorizedRuleGroupQuery{
|
||||
User: c.SignedInUser,
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/utils"
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
ac "github.com/grafana/grafana/pkg/services/accesscontrol"
|
||||
"github.com/grafana/grafana/pkg/services/accesscontrol/acimpl"
|
||||
@@ -1288,4 +1289,64 @@ func TestRouteUpdateNamespaceRules(t *testing.T) {
|
||||
updatedRules := getRecordedUpdatedRules(ruleStore)
|
||||
require.Empty(t, updatedRules)
|
||||
})
|
||||
|
||||
t.Run("should reject update when folder is managed by ManagerKindRepo", func(t *testing.T) {
|
||||
ruleStore := fakes.NewRuleStore(t)
|
||||
provisioningStore := fakes.NewFakeProvisioningStore()
|
||||
|
||||
// Create a managed folder
|
||||
managedFolder := randFolder()
|
||||
managedFolder.ManagedBy = utils.ManagerKindRepo
|
||||
ruleStore.Folders[orgID] = append(ruleStore.Folders[orgID], managedFolder)
|
||||
|
||||
// Create some rules in the managed folder
|
||||
ruleGen := models.RuleGen.With(
|
||||
models.RuleGen.WithOrgID(orgID),
|
||||
models.RuleGen.WithNamespaceUID(managedFolder.UID),
|
||||
)
|
||||
rules := ruleGen.GenerateManyRef(2)
|
||||
ruleStore.PutRule(context.Background(), rules...)
|
||||
|
||||
permissions := createPermissionsForRules(rules, orgID)
|
||||
requestCtx := createRequestContextWithPerms(orgID, permissions, nil)
|
||||
|
||||
svc := createServiceWithProvenanceStore(ruleStore, provisioningStore)
|
||||
response := svc.RouteUpdateNamespaceRules(requestCtx, apimodels.UpdateNamespaceRulesRequest{
|
||||
IsPaused: util.Pointer(true),
|
||||
}, managedFolder.UID)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, response.Status())
|
||||
require.Contains(t, string(response.Body()), "cannot store rules in folder managed by Git Sync")
|
||||
|
||||
// Verify no rules were updated
|
||||
updatedRules := getRecordedUpdatedRules(ruleStore)
|
||||
require.Empty(t, updatedRules)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRoutePostNameRulesConfig(t *testing.T) {
|
||||
t.Run("should reject creation when folder is managed by ManagerKindRepo", func(t *testing.T) {
|
||||
orgID := rand.Int63()
|
||||
ruleStore := fakes.NewRuleStore(t)
|
||||
|
||||
// Create a managed folder
|
||||
managedFolder := randFolder()
|
||||
managedFolder.ManagedBy = utils.ManagerKindRepo
|
||||
ruleStore.Folders[orgID] = append(ruleStore.Folders[orgID], managedFolder)
|
||||
|
||||
permissions := map[int64]map[string][]string{
|
||||
orgID: {
|
||||
dashboards.ScopeFoldersProvider.GetResourceScopeUID(managedFolder.UID): {dashboards.ActionFoldersRead},
|
||||
},
|
||||
}
|
||||
requestCtx := createRequestContextWithPerms(orgID, permissions, nil)
|
||||
|
||||
svc := createService(ruleStore, nil)
|
||||
response := svc.RoutePostNameRulesConfig(requestCtx, apimodels.PostableRuleGroupConfig{
|
||||
Name: "test-group",
|
||||
}, managedFolder.UID)
|
||||
|
||||
require.Equal(t, http.StatusBadRequest, response.Status())
|
||||
require.Contains(t, string(response.Body()), "cannot store rules in folder managed by Git Sync")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -296,7 +296,7 @@ func (srv PrometheusSrv) RouteGetRuleStatuses(c *contextmodel.ReqContext) respon
|
||||
allowedNamespaces := map[string]string{}
|
||||
for namespaceUID, folder := range namespaceMap {
|
||||
// only add namespaces that the user has access to rules in
|
||||
hasAccess, err := srv.authz.HasAccessInFolder(c.Req.Context(), c.SignedInUser, ngmodels.Namespace(*folder.ToFolderReference()))
|
||||
hasAccess, err := srv.authz.HasAccessInFolder(c.Req.Context(), c.SignedInUser, ngmodels.NewNamespace(folder))
|
||||
if err != nil {
|
||||
ruleResponse.Status = "error"
|
||||
ruleResponse.Error = fmt.Sprintf("failed to get namespaces visible to the user: %s", err.Error())
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user