Compare commits

..

5 Commits

Author SHA1 Message Date
Oscar Kilhed 4f55f483e6 Merge branch 'main' into oscark/fix-link-overflowing-on-small-viewport 2025-12-15 15:51:40 +01:00
oscarkilhed 130d33fdf1 Merge branch 'main' into oscark/fix-link-overflowing-on-small-viewport 2025-12-09 12:38:42 +01:00
oscarkilhed 7b4854f8af fix margin to rest of the dashboard 2025-12-09 12:38:31 +01:00
oscarkilhed f0064e03c9 reset eslint-supressions.json 2025-12-08 11:26:14 +01:00
oscarkilhed 80f8c4cc10 Stop text overflowing on small viewports 2025-12-08 11:21:47 +01:00
129 changed files with 629 additions and 4880 deletions
+4 -4
View File
@@ -208,7 +208,7 @@
/pkg/tests/apis/shorturl @grafana/sharing-squad
/pkg/tests/api/correlations/ @grafana/datapro
/pkg/tsdb/grafanads/ @grafana/grafana-backend-group
/pkg/tsdb/opentsdb/ @grafana/oss-big-tent
/pkg/tsdb/opentsdb/ @grafana/partner-datasources
/pkg/util/ @grafana/grafana-backend-group
/pkg/web/ @grafana/grafana-backend-group
@@ -260,7 +260,7 @@
/devenv/dev-dashboards/dashboards.go @grafana/dataviz-squad
/devenv/dev-dashboards/home.json @grafana/dataviz-squad
/devenv/dev-dashboards/datasource-elasticsearch/ @grafana/partner-datasources
/devenv/dev-dashboards/datasource-opentsdb/ @grafana/oss-big-tent
/devenv/dev-dashboards/datasource-opentsdb/ @grafana/partner-datasources
/devenv/dev-dashboards/datasource-influxdb/ @grafana/partner-datasources
/devenv/dev-dashboards/datasource-mssql/ @grafana/partner-datasources
/devenv/dev-dashboards/datasource-loki/ @grafana/plugins-platform-frontend
@@ -307,7 +307,7 @@
/devenv/docker/blocks/mysql_exporter/ @grafana/oss-big-tent
/devenv/docker/blocks/mysql_opendata/ @grafana/oss-big-tent
/devenv/docker/blocks/mysql_tests/ @grafana/oss-big-tent
/devenv/docker/blocks/opentsdb/ @grafana/oss-big-tent
/devenv/docker/blocks/opentsdb/ @grafana/partner-datasources
/devenv/docker/blocks/postgres/ @grafana/oss-big-tent
/devenv/docker/blocks/postgres_tests/ @grafana/oss-big-tent
/devenv/docker/blocks/prometheus/ @grafana/oss-big-tent
@@ -1101,7 +1101,7 @@ eslint-suppressions.json @grafanabot
/public/app/plugins/datasource/mixed/ @grafana/dashboards-squad
/public/app/plugins/datasource/mssql/ @grafana/partner-datasources
/public/app/plugins/datasource/mysql/ @grafana/oss-big-tent
/public/app/plugins/datasource/opentsdb/ @grafana/oss-big-tent
/public/app/plugins/datasource/opentsdb/ @grafana/partner-datasources
/public/app/plugins/datasource/grafana-postgresql-datasource/ @grafana/oss-big-tent
/public/app/plugins/datasource/prometheus/ @grafana/oss-big-tent
/public/app/plugins/datasource/cloud-monitoring/ @grafana/partner-datasources
+6 -7
View File
@@ -111,13 +111,12 @@ jobs:
ownerRepo: 'grafana/grafana-enterprise'
from: ${{ needs.setup.outputs.release_branch }}
to: ${{ needs.create_next_release_branch_enterprise.outputs.branch }}
# Removed this for now since it doesn't work
# post_changelog_on_forum:
# needs: setup
# uses: grafana/grafana/.github/workflows/community-release.yml@main
# with:
# version: ${{ needs.setup.outputs.version }}
# dry_run: ${{ needs.setup.outputs.dry_run == 'true' }}
post_changelog_on_forum:
needs: setup
uses: grafana/grafana/.github/workflows/community-release.yml@main
with:
version: ${{ needs.setup.outputs.version }}
dry_run: ${{ needs.setup.outputs.dry_run == 'true' }}
create_github_release:
# a github release requires a git tag
# The github-release action retrieves the changelog using the /repos/grafana/grafana/contents/CHANGELOG.md API
+1 -1
View File
@@ -149,7 +149,7 @@ require (
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/google/wire v0.7.0 // indirect
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7 // indirect
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba // indirect
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect
github.com/grafana/dataplane/sdata v0.0.9 // indirect
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect
+2 -2
View File
@@ -606,8 +606,8 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7 h1:ZzG/gCclEit9w0QUfQt9GURcOycAIGcsQAhY1u0AEX0=
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7/go.mod h1:l7v67cgP7x72ajB9UPZlumdrHqNztpKoqQ52cU8T3LU=
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba h1:psKWNETD5nGxmFAlqnWsXoRyUwSa2GHNEMSEDKGKfQ4=
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba/go.mod h1:l7v67cgP7x72ajB9UPZlumdrHqNztpKoqQ52cU8T3LU=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
+1 -1
View File
@@ -4,7 +4,7 @@ go 1.25.5
require (
github.com/go-kit/log v0.2.1
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4
github.com/grafana/grafana-app-sdk v0.48.5
github.com/grafana/grafana-app-sdk/logging v0.48.3
+4 -2
View File
@@ -216,10 +216,12 @@ github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grafana/grafana-app-sdk v0.48.5 h1:MS8l9fTZz+VbTfgApn09jw27GxhQ6fNOWGhC4ydvZmM=
github.com/grafana/grafana-app-sdk v0.48.5/go.mod h1:HJsMOSBmt/D/Ihs1SvagOwmXKi0coBMVHlfvdd+qe9Y=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7 h1:ZzG/gCclEit9w0QUfQt9GURcOycAIGcsQAhY1u0AEX0=
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7/go.mod h1:l7v67cgP7x72ajB9UPZlumdrHqNztpKoqQ52cU8T3LU=
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba h1:psKWNETD5nGxmFAlqnWsXoRyUwSa2GHNEMSEDKGKfQ4=
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba/go.mod h1:l7v67cgP7x72ajB9UPZlumdrHqNztpKoqQ52cU8T3LU=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4=
github.com/grafana/grafana-app-sdk v0.48.5 h1:MS8l9fTZz+VbTfgApn09jw27GxhQ6fNOWGhC4ydvZmM=
@@ -1,74 +0,0 @@
package v2alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
)
// SetDefaults_Dashboard ensures all panel queries have unique refIds
// This is called by the Kubernetes defaulting mechanism when dashboards are returned
func SetDefaults_Dashboard(obj *Dashboard) {
EnsureUniqueRefIds(&obj.Spec)
}
// EnsureUniqueRefIds ensures all queries within each panel have unique refIds
// This matches the frontend behavior in PanelModel.ensureQueryIds()
func EnsureUniqueRefIds(spec *DashboardSpec) {
for _, element := range spec.Elements {
if element.PanelKind != nil {
ensureUniqueRefIdsForPanel(element.PanelKind)
}
}
}
func ensureUniqueRefIdsForPanel(panel *DashboardPanelKind) {
queries := panel.Spec.Data.Spec.Queries
if len(queries) == 0 {
return
}
// First pass: collect existing refIds
existingRefIds := make(map[string]bool)
for i := range queries {
if queries[i].Spec.RefId != "" {
existingRefIds[queries[i].Spec.RefId] = true
}
}
// Second pass: assign unique refIds to queries without one
for i := range queries {
if queries[i].Spec.RefId == "" {
queries[i].Spec.RefId = getNextRefId(existingRefIds)
existingRefIds[queries[i].Spec.RefId] = true
}
}
}
// getNextRefId generates the next available refId (A, B, C, ..., Z, AA, AB, etc.)
// This matches the frontend behavior in packages/grafana-data/src/query/refId.ts
func getNextRefId(existingRefIds map[string]bool) string {
for num := 0; ; num++ {
refId := getRefIdFromNumber(num)
if !existingRefIds[refId] {
return refId
}
}
}
// getRefIdFromNumber converts a number to a refId (0=A, 1=B, ..., 25=Z, 26=AA, 27=AB, etc.)
func getRefIdFromNumber(num int) string {
const letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if num < len(letters) {
return string(letters[num])
}
return getRefIdFromNumber(num/len(letters)-1) + string(letters[num%len(letters)])
}
// RegisterCustomDefaults registers custom defaulting functions for Dashboard types.
// This should be called from RegisterDefaults in zz_generated.defaults.go
// However, since that file is auto-generated, we provide this as a separate registration
func RegisterCustomDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&Dashboard{}, func(obj interface{}) {
SetDefaults_Dashboard(obj.(*Dashboard))
})
return nil
}
@@ -99,9 +99,5 @@ func addKnownTypes(scheme *runtime.Scheme) error {
}
func addDefaultingFuncs(scheme *runtime.Scheme) error {
if err := RegisterDefaults(scheme); err != nil {
return err
}
// Register custom defaults to ensure unique refIds in panel queries
return RegisterCustomDefaults(scheme)
return RegisterDefaults(scheme)
}
@@ -1,74 +0,0 @@
package v2beta1
import (
"k8s.io/apimachinery/pkg/runtime"
)
// SetDefaults_Dashboard ensures all panel queries have unique refIds
// This is called by the Kubernetes defaulting mechanism when dashboards are returned
func SetDefaults_Dashboard(obj *Dashboard) {
EnsureUniqueRefIds(&obj.Spec)
}
// EnsureUniqueRefIds ensures all queries within each panel have unique refIds
// This matches the frontend behavior in PanelModel.ensureQueryIds()
func EnsureUniqueRefIds(spec *DashboardSpec) {
for _, element := range spec.Elements {
if element.PanelKind != nil {
ensureUniqueRefIdsForPanel(element.PanelKind)
}
}
}
func ensureUniqueRefIdsForPanel(panel *DashboardPanelKind) {
queries := panel.Spec.Data.Spec.Queries
if len(queries) == 0 {
return
}
// First pass: collect existing refIds
existingRefIds := make(map[string]bool)
for i := range queries {
if queries[i].Spec.RefId != "" {
existingRefIds[queries[i].Spec.RefId] = true
}
}
// Second pass: assign unique refIds to queries without one
for i := range queries {
if queries[i].Spec.RefId == "" {
queries[i].Spec.RefId = getNextRefId(existingRefIds)
existingRefIds[queries[i].Spec.RefId] = true
}
}
}
// getNextRefId generates the next available refId (A, B, C, ..., Z, AA, AB, etc.)
// This matches the frontend behavior in packages/grafana-data/src/query/refId.ts
func getNextRefId(existingRefIds map[string]bool) string {
for num := 0; ; num++ {
refId := getRefIdFromNumber(num)
if !existingRefIds[refId] {
return refId
}
}
}
// getRefIdFromNumber converts a number to a refId (0=A, 1=B, ..., 25=Z, 26=AA, 27=AB, etc.)
func getRefIdFromNumber(num int) string {
const letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if num < len(letters) {
return string(letters[num])
}
return getRefIdFromNumber(num/len(letters)-1) + string(letters[num%len(letters)])
}
// RegisterCustomDefaults registers custom defaulting functions for Dashboard types.
// This should be called from RegisterDefaults in zz_generated.defaults.go
// However, since that file is auto-generated, we provide this as a separate registration
func RegisterCustomDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&Dashboard{}, func(obj interface{}) {
SetDefaults_Dashboard(obj.(*Dashboard))
})
return nil
}
@@ -1,219 +0,0 @@
package v2beta1
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestGetRefIdFromNumber(t *testing.T) {
testCases := []struct {
num int
expected string
}{
{0, "A"},
{1, "B"},
{25, "Z"},
{26, "AA"},
{27, "AB"},
{51, "AZ"},
{52, "BA"},
{701, "ZZ"},
{702, "AAA"},
}
for _, tc := range testCases {
t.Run(tc.expected, func(t *testing.T) {
result := getRefIdFromNumber(tc.num)
assert.Equal(t, tc.expected, result, "getRefIdFromNumber(%d) should return %s", tc.num, tc.expected)
})
}
}
func TestGetNextRefId(t *testing.T) {
testCases := []struct {
name string
existing map[string]bool
expected string
}{
{
name: "empty map returns A",
existing: map[string]bool{},
expected: "A",
},
{
name: "A exists returns B",
existing: map[string]bool{"A": true},
expected: "B",
},
{
name: "A and B exist returns C",
existing: map[string]bool{"A": true, "B": true},
expected: "C",
},
{
name: "gap in sequence returns first available",
existing: map[string]bool{"A": true, "C": true, "D": true},
expected: "B",
},
{
name: "A-Z exist returns AA",
existing: func() map[string]bool {
m := make(map[string]bool)
for i := 0; i < 26; i++ {
m[string(rune('A'+i))] = true
}
return m
}(),
expected: "AA",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := getNextRefId(tc.existing)
assert.Equal(t, tc.expected, result)
})
}
}
func TestEnsureUniqueRefIds(t *testing.T) {
t.Run("assigns unique refIds to queries without refIds", func(t *testing.T) {
spec := &DashboardSpec{
Elements: map[string]DashboardElement{
"panel-1": {
PanelKind: &DashboardPanelKind{
Kind: "Panel",
Spec: DashboardPanelSpec{
Data: DashboardQueryGroupKind{
Spec: DashboardQueryGroupSpec{
Queries: []DashboardPanelQueryKind{
{Spec: DashboardPanelQuerySpec{RefId: ""}},
{Spec: DashboardPanelQuerySpec{RefId: ""}},
{Spec: DashboardPanelQuerySpec{RefId: ""}},
},
},
},
},
},
},
},
}
EnsureUniqueRefIds(spec)
panel := spec.Elements["panel-1"].PanelKind
require.NotNil(t, panel)
require.Len(t, panel.Spec.Data.Spec.Queries, 3)
assert.Equal(t, "A", panel.Spec.Data.Spec.Queries[0].Spec.RefId)
assert.Equal(t, "B", panel.Spec.Data.Spec.Queries[1].Spec.RefId)
assert.Equal(t, "C", panel.Spec.Data.Spec.Queries[2].Spec.RefId)
})
t.Run("preserves existing refIds and fills gaps", func(t *testing.T) {
spec := &DashboardSpec{
Elements: map[string]DashboardElement{
"panel-1": {
PanelKind: &DashboardPanelKind{
Kind: "Panel",
Spec: DashboardPanelSpec{
Data: DashboardQueryGroupKind{
Spec: DashboardQueryGroupSpec{
Queries: []DashboardPanelQueryKind{
{Spec: DashboardPanelQuerySpec{RefId: "A"}},
{Spec: DashboardPanelQuerySpec{RefId: ""}},
{Spec: DashboardPanelQuerySpec{RefId: "D"}},
{Spec: DashboardPanelQuerySpec{RefId: ""}},
},
},
},
},
},
},
},
}
EnsureUniqueRefIds(spec)
panel := spec.Elements["panel-1"].PanelKind
require.NotNil(t, panel)
require.Len(t, panel.Spec.Data.Spec.Queries, 4)
assert.Equal(t, "A", panel.Spec.Data.Spec.Queries[0].Spec.RefId)
assert.Equal(t, "B", panel.Spec.Data.Spec.Queries[1].Spec.RefId)
assert.Equal(t, "D", panel.Spec.Data.Spec.Queries[2].Spec.RefId)
assert.Equal(t, "C", panel.Spec.Data.Spec.Queries[3].Spec.RefId)
})
t.Run("handles library panels (no modification)", func(t *testing.T) {
spec := &DashboardSpec{
Elements: map[string]DashboardElement{
"panel-1": {
LibraryPanelKind: &DashboardLibraryPanelKind{
Kind: "LibraryPanel",
Spec: DashboardLibraryPanelKindSpec{
LibraryPanel: DashboardLibraryPanelRef{
Uid: "lib-uid",
Name: "lib-name",
},
},
},
},
},
}
// Should not panic
EnsureUniqueRefIds(spec)
})
t.Run("handles multiple panels", func(t *testing.T) {
spec := &DashboardSpec{
Elements: map[string]DashboardElement{
"panel-1": {
PanelKind: &DashboardPanelKind{
Kind: "Panel",
Spec: DashboardPanelSpec{
Data: DashboardQueryGroupKind{
Spec: DashboardQueryGroupSpec{
Queries: []DashboardPanelQueryKind{
{Spec: DashboardPanelQuerySpec{RefId: ""}},
{Spec: DashboardPanelQuerySpec{RefId: ""}},
},
},
},
},
},
},
"panel-2": {
PanelKind: &DashboardPanelKind{
Kind: "Panel",
Spec: DashboardPanelSpec{
Data: DashboardQueryGroupKind{
Spec: DashboardQueryGroupSpec{
Queries: []DashboardPanelQueryKind{
{Spec: DashboardPanelQuerySpec{RefId: ""}},
{Spec: DashboardPanelQuerySpec{RefId: ""}},
},
},
},
},
},
},
},
}
EnsureUniqueRefIds(spec)
// Each panel should have unique refIds independently
panel1 := spec.Elements["panel-1"].PanelKind
panel2 := spec.Elements["panel-2"].PanelKind
require.NotNil(t, panel1)
require.NotNil(t, panel2)
assert.Equal(t, "A", panel1.Spec.Data.Spec.Queries[0].Spec.RefId)
assert.Equal(t, "B", panel1.Spec.Data.Spec.Queries[1].Spec.RefId)
assert.Equal(t, "A", panel2.Spec.Data.Spec.Queries[0].Spec.RefId)
assert.Equal(t, "B", panel2.Spec.Data.Spec.Queries[1].Spec.RefId)
})
}
@@ -99,9 +99,5 @@ func addKnownTypes(scheme *runtime.Scheme) error {
}
func addDefaultingFuncs(scheme *runtime.Scheme) error {
if err := RegisterDefaults(scheme); err != nil {
return err
}
// Register custom defaults to ensure unique refIds in panel queries
return RegisterCustomDefaults(scheme)
return RegisterDefaults(scheme)
}
@@ -530,7 +530,7 @@
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": false,
"collapse": true,
"hideHeader": true,
"layout": {
"kind": "GridLayout",
@@ -546,7 +546,7 @@
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": false,
"collapse": true,
"hideHeader": true,
"layout": {
"kind": "GridLayout",
@@ -548,7 +548,7 @@
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": false,
"collapse": true,
"hideHeader": true,
"layout": {
"kind": "GridLayout",
@@ -574,7 +574,7 @@
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": false,
"collapse": true,
"hideHeader": true,
"layout": {
"kind": "GridLayout",
@@ -1663,7 +1663,7 @@
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": false,
"collapse": true,
"hideHeader": true,
"layout": {
"kind": "GridLayout",
@@ -1727,7 +1727,7 @@
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": false,
"collapse": true,
"hideHeader": true,
"layout": {
"kind": "GridLayout",
@@ -328,7 +328,7 @@
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": false,
"collapse": true,
"hideHeader": true,
"layout": {
"kind": "GridLayout",
@@ -335,7 +335,7 @@
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": false,
"collapse": true,
"hideHeader": true,
"layout": {
"kind": "GridLayout",
@@ -501,9 +501,11 @@ func convertToRowsLayout(ctx context.Context, panels []interface{}, dsIndexProvi
if currentRow != nil {
// If currentRow is a hidden-header row (panels before first explicit row),
// it should not be collapsed because it will disappear and be visible only in edit mode
// set its collapse to match the first explicit row's collapsed value
// This matches frontend behavior: collapse: panel.collapsed
if currentRow.Spec.HideHeader != nil && *currentRow.Spec.HideHeader {
currentRow.Spec.Collapse = &[]bool{false}[0]
rowCollapsed := getBoolField(panelMap, "collapsed", false)
currentRow.Spec.Collapse = &rowCollapsed
}
// Flush current row to layout
rows = append(rows, *currentRow)
@@ -75,9 +75,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false,
"rounded": true,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -154,9 +154,9 @@
"effects": {
"barGlow": false,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -233,9 +233,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -312,9 +312,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -391,9 +391,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -470,9 +470,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": false,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -549,9 +549,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": false,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -641,9 +641,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -720,9 +720,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -799,9 +799,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -878,9 +878,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -974,9 +974,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -1053,9 +1053,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -1132,9 +1132,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": true
},
"orientation": "auto",
"reduceOptions": {
@@ -1211,9 +1211,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -1290,9 +1290,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -1386,9 +1386,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": true
},
"orientation": "auto",
"reduceOptions": {
@@ -1469,9 +1469,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": true
},
"orientation": "auto",
"reduceOptions": {
@@ -1552,9 +1552,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": true
},
"orientation": "auto",
"reduceOptions": {
@@ -1643,9 +1643,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
@@ -1727,9 +1727,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
@@ -1825,9 +1825,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
@@ -1910,9 +1910,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
@@ -1994,9 +1994,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
@@ -2078,9 +2078,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
@@ -2172,9 +2172,7 @@
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -2240,9 +2238,7 @@
},
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
@@ -2279,4 +2275,4 @@
"title": "Panel tests - Gauge (new)",
"uid": "panel-tests-gauge-new",
"weekStart": ""
}
}
@@ -955,9 +955,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -1162,4 +1162,4 @@
"title": "Panel tests - Old gauge to new",
"uid": "panel-tests-old-gauge-to-new",
"weekStart": ""
}
}
+1 -1
View File
@@ -221,7 +221,7 @@ require (
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7 // indirect
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba // indirect
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 // indirect
github.com/grafana/dataplane/sdata v0.0.9 // indirect
+2 -2
View File
@@ -817,8 +817,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7 h1:ZzG/gCclEit9w0QUfQt9GURcOycAIGcsQAhY1u0AEX0=
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7/go.mod h1:l7v67cgP7x72ajB9UPZlumdrHqNztpKoqQ52cU8T3LU=
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba h1:psKWNETD5nGxmFAlqnWsXoRyUwSa2GHNEMSEDKGKfQ4=
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba/go.mod h1:l7v67cgP7x72ajB9UPZlumdrHqNztpKoqQ52cU8T3LU=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
+1 -1
View File
@@ -74,7 +74,7 @@ require (
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7 // indirect
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba // indirect
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 // indirect
github.com/grafana/dataplane/sdata v0.0.9 // indirect
+2 -2
View File
@@ -174,8 +174,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7 h1:ZzG/gCclEit9w0QUfQt9GURcOycAIGcsQAhY1u0AEX0=
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7/go.mod h1:l7v67cgP7x72ajB9UPZlumdrHqNztpKoqQ52cU8T3LU=
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba h1:psKWNETD5nGxmFAlqnWsXoRyUwSa2GHNEMSEDKGKfQ4=
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba/go.mod h1:l7v67cgP7x72ajB9UPZlumdrHqNztpKoqQ52cU8T3LU=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
-4
View File
@@ -1327,10 +1327,6 @@ alertmanager_max_silences_count =
# Maximum silence size in bytes. Default: 0 (no limit).
alertmanager_max_silence_size_bytes =
# Maximum size of the expanded template output in bytes. Default: 10485760 (0 - no limit).
# The result of template expansion will be truncated to the limit.
alertmanager_max_template_output_bytes =
# Redis server address or addresses. It can be a single Redis address if using Redis standalone,
# or a list of comma-separated addresses if using Redis Cluster/Sentinel.
ha_redis_address =
@@ -21,28 +21,11 @@ weight: 120
# Install a plugin
{{< admonition type="note" >}}
Installing plugins from the Grafana website into a Grafana Cloud instance will be removed in February 2026.
If you're a Grafana Cloud user, follow [Install a plugin through the Grafana UI](#install-a-plugin-through-the-grafana-uiinstall-a-plugin-through-the-grafana-ui) instead.
{{< /admonition >}}
## Install a plugin through the Grafana UI
The most common way to install a plugin is through the Grafana UI.
1. In Grafana, click **Administration > Plugins and data > Plugins** in the side navigation menu to view all plugins.
1. Browse and find a plugin.
1. Click the plugin's logo.
1. Click **Install**.
You can use use the following alternative methods to install a plugin depending on your environment or setup.
Besides the UI, you can use alternative methods to install a plugin depending on your environment or set-up.
## Install a plugin using Grafana CLI
The Grafana CLI allows you to install, upgrade, and manage your Grafana plugins using a command line tool. For more information about Grafana CLI plugin commands, refer to [Plugin commands](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/cli/#plugins-commands).
The Grafana CLI allows you to install, upgrade, and manage your Grafana plugins using a command line tool. For more information about Grafana CLI plugin commands, refer to [Plugin commands](/docs/grafana/<GRAFANA_VERSION>/cli/#plugins-commands).
## Install a plugin from a ZIP file
@@ -44,7 +44,7 @@ refs:
destination: /docs/grafana-cloud/alerting-and-irm/oncall/user-and-team-management/#available-grafana-oncall-rbac-roles--granted-actions
---
# Grafana RBAC role definitions
# RBAC role definitions
{{< admonition type="note" >}}
Available in [Grafana Enterprise](/docs/grafana/<GRAFANA_VERSION>/introduction/grafana-enterprise/) and [Grafana Cloud](/docs/grafana-cloud).
@@ -59,7 +59,7 @@ The following tables list permissions associated with basic and fixed roles. Thi
| Grafana Admin | `basic_grafana_admin` |
| `fixed:authentication.config:writer`<br>`fixed:general.auth.config:writer`<br>`fixed:ldap:writer`<br>`fixed:licensing:writer`<br>`fixed:migrationassistant:migrator`<br>`fixed:org.users:writer`<br>`fixed:organization:maintainer`<br>`fixed:plugins:maintainer`<br>`fixed:provisioning:writer`<br>`fixed:roles:writer`<br>`fixed:settings:reader`<br>`fixed:settings:writer`<br>`fixed:stats:reader`<br>`fixed:support.bundles:writer`<br>`fixed:usagestats:reader`<br>`fixed:users:writer` | Default [Grafana server administrator](/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/#grafana-server-administrators) assignments. |
| Admin | `basic_admin` | All roles assigned to Editor and `fixed:reports:writer` <br>`fixed:datasources:writer`<br>`fixed:organization:writer`<br>`fixed:datasources.permissions:writer`<br>`fixed:teams:writer`<br>`fixed:dashboards:writer`<br>`fixed:dashboards.permissions:writer`<br>`fixed:dashboards.public:writer`<br>`fixed:folders:writer`<br>`fixed:folders.permissions:writer`<br>`fixed:alerting:writer`<br>`fixed:alerting.provisioning.secrets:reader`<br>`fixed:alerting.provisioning:writer`<br>`fixed:datasources.caching:writer`<br>`fixed:plugins:writer`<br>`fixed:library.panels:writer` | Default [Grafana organization administrator](ref:rbac-basic-roles) assignments. |
| Editor | `basic_editor` | All roles assigned to Viewer and `fixed:datasources:explorer` <br>`fixed:dashboards:creator`<br>`fixed:folders:creator`<br>`fixed:annotations:writer`<br>`fixed:alerting:writer`<br>`fixed:library.panels:creator`<br>`fixed:library.panels:general.writer`<br>`fixed:alerting.provisioning.provenance:writer` | Default [Editor](ref:rbac-basic-roles) assignments. |
| Editor | `basic_editor` | All roles assigned to Viewer and `fixed:datasources:explorer` <br>`fixed:dashboards:creator`<br>`fixed:folders:creator`<br>`fixed:annotations:writer`<br>`fixed:alerting:writer`<br>`fixed:library.panels:creator`<br>`fixed:library.panels:general.writer`<br>`fixed:alerting.provisioning.status:writer` | Default [Editor](ref:rbac-basic-roles) assignments. |
| Viewer | `basic_viewer` | `fixed:datasources.id:reader`<br>`fixed:organization:reader`<br>`fixed:annotations:reader`<br>`fixed:annotations.dashboard:writer`<br>`fixed:alerting:reader`<br>`fixed:plugins.app:reader`<br>`fixed:dashboards.insights:reader`<br>`fixed:datasources.insights:reader`<br>`fixed:library.panels:general.reader`<br>`fixed:folders.general:reader`<br>`fixed:datasources.builtin:reader` | Default [Viewer](ref:rbac-basic-roles) assignments. |
| No Basic Role | n/a | | Default [No Basic Role](ref:rbac-basic-roles) |
@@ -74,86 +74,86 @@ These UUIDs won't be available if your instance was created before Grafana v10.2
To learn how to use the roles API to determine the role UUIDs, refer to [Manage RBAC roles](ref:rbac-manage-rbac-roles).
{{< /admonition >}}
| Fixed role | UUID | Permissions | Description |
| ----------------------------------------------- | ----------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `fixed:alerting:reader` | `fixed_O2oP1_uBFozI2i93klAkcvEWR30` | All permissions from `fixed:alerting.rules:reader` <br>`fixed:alerting.instances:reader`<br>`fixed:alerting.notifications:reader` | Read-only permissions for all Grafana, Mimir, Loki and Alertmanager alert rules\*, alerts, contact points, and notification policies.[\*](#alerting-roles) |
| `fixed:alerting:writer` | `fixed_-PAZgSJsDlRD8NUg-PFSeH_BkJY` | All permissions from `fixed:alerting.rules:writer` <br>`fixed:alerting.instances:writer`<br>`fixed:alerting.notifications:writer` | Create, update, and delete Grafana, Mimir, Loki and Alertmanager alert rules\*, silences, contact points, templates, mute timings, and notification policies.[\*](#alerting-roles) |
| `fixed:alerting.instances:reader` | `fixed_ut5fVS-Ulh_ejFoskFhJT_rYg0Y` | `alert.instances:read` for organization scope <br> `alert.instances.external:read` for scope `datasources:*` | Read all alerts and silences in the organization produced by Grafana Alerts and Mimir and Loki alerts and silences.[\*](#alerting-roles) |
| `fixed:alerting.instances:writer` | `fixed_pKOBJE346uyqMLdgWbk1NsQfEl0` | All permissions from `fixed:alerting.instances:reader` and<br> `alert.instances:create`<br>`alert.instances:write` for organization scope <br> `alert.instances.external:write` for scope `datasources:*` | Create, update and expire all silences in the organization produced by Grafana, Mimir, and Loki.[\*](#alerting-roles) |
| `fixed:alerting.notifications:reader` | `fixed_hmBn0lX5h1RZXB9Vaot420EEdA0` | `alert.notifications:read` for organization scope<br>`alert.notifications.external:read` for scope `datasources:*` | Read all Grafana and Alertmanager contact points, templates, and notification policies.[\*](#alerting-roles) |
| `fixed:alerting.notifications:writer` | `fixed_XplK6HPNxf9AP5IGTdB5Iun4tJc` | All permissions from `fixed:alerting.notifications:reader` and<br>`alert.notifications:write`for organization scope<br>`alert.notifications.external:read` for scope `datasources:*` | Create, update, and delete contact points, templates, mute timings and notification policies for Grafana and external Alertmanager.[\*](#alerting-roles) |
| `fixed:alerting.provisioning:writer` | `fixed_y7pFjdEkxpx5ETdcxPvp0AgRuUo` | `alert.provisioning:read` and `alert.provisioning:write` | Create, update and delete Grafana alert rules, notification policies, contact points, templates, etc via provisioning API. [\*](#alerting-roles) |
| `fixed:alerting.provisioning.secrets:reader` | `fixed_9fmzXXZZG-Od0Amy2ofEG8Uk--c` | `alert.provisioning:read` and `alert.provisioning.secrets:read` | Read-only permissions for Provisioning API and let export resources with decrypted secrets [\*](#alerting-roles) |
| `fixed:alerting.provisioning.provenance:writer` | `fixed_eAxlzfkTuobvKEgXHveFMBZrOj8` | `alert.provisioning.provenance:write` | Set provenance status to alert rules, notification policies, contact points, etc. Should be used together with regular writer roles. [\*](#alerting-roles) |
| `fixed:alerting.rules:reader` | `fixed_fRGKL_vAqUsmUWq5EYKnOha9DcA` | `alert.rule:read`, `alert.silences:read` for scope `folders:*` <br> `alert.rules.external:read` for scope `datasources:*` <br> `alert.notifications.time-intervals:read` <br> `alert.notifications.receivers:list` | Read all\* Grafana, Mimir, and Loki alert rules.[\*](#alerting-roles) and read rule-specific silences |
| `fixed:alerting.rules:writer` | `fixed_YJJGwAalUwDZPrXSyFH8GfYBXAc` | All permissions from `fixed:alerting.rules:reader` and <br> `alert.rule:create` <br> `alert.rule:write` <br> `alert.rule:delete` <br> `alert.silences:create` <br> `alert.silences:write` for scope `folders:*` <br> `alert.rules.external:write` for scope `datasources:*` | Create, update, and delete all\* Grafana, Mimir, and Loki alert rules.[\*](#alerting-roles) and manage rule-specific silences |
| `fixed:annotations:reader` | `fixed_hpZnoizrfAJsrceNcNQqWYV-xNU` | `annotations:read` for scopes `annotations:type:*` | Read all annotations and annotation tags. |
| `fixed:annotations:writer` | `fixed_ZVW-Aa9Tzle6J4s2aUFcq1StKWE` | All permissions from `fixed:annotations:reader` <br>`annotations:write` <br>`annotations.create`<br> `annotations:delete` for scope `annotations:type:*` | Read, create, update and delete all annotations and annotation tags. |
| `fixed:annotations.dashboard:writer` | `fixed_8A775xenXeKaJk4Cr7bchP9yXOA` | `annotations:write` <br>`annotations.create`<br> `annotations:delete` for scope `annotations:type:dashboard` | Create, update and delete dashboard annotations and annotation tags. |
| `fixed:authentication.config:writer` | `fixed_0rYhZ2Qnzs8AdB1nX7gexk3fHDw` | `settings:read` for scope `settings:auth.saml:*` <br> `settings:write` for scope `settings:auth.saml:*` | Read and update authentication and SAML settings. |
| `fixed:general.auth.config:writer` | `fixed_QFxIT_FGtBqbIVJIwx1bLgI5z6c` | `settings:read` for scope `settings:auth:oauth_allow_insecure_email_lookup` <br> `settings:write` for scope `settings:auth:oauth_allow_insecure_email_lookup` | Read and update the Grafana instance's general authentication configuration settings. |
| `fixed:dashboards:creator` | `fixed_ZorKUcEPCM01A1fPakEzGBUyU64` | `dashboards:create`<br>`folders:read` | Create dashboards. |
| `fixed:dashboards:reader` | `fixed_Sgr67JTOhjQGFlzYRahOe45TdWM` | `dashboards:read` | Read all dashboards. |
| `fixed:dashboards:writer` | `fixed_OK2YOQGIoI1G031hVzJB6rAJQAs` | All permissions from `fixed:dashboards:reader` and <br>`dashboards:write`<br>`dashboards:delete`<br>`dashboards:create`<br>`dashboards.permissions:read`<br>`dashboards.permissions:write` | Read, create, update, and delete all dashboards. |
| `fixed:dashboards.insights:reader` | `fixed_JlBJ2_gizP8zhgaeGE2rjyZe2Rs` | `dashboards.insights:read` | Read dashboard insights data and see presence indicators. |
| `fixed:dashboards.permissions:reader` | `fixed_f17oxuXW_58LL8mYJsm4T_mCeIw` | `dashboards.permissions:read` | Read all dashboard permissions. |
| `fixed:dashboards.permissions:writer` | `fixed_CcznxhWX_Yqn8uWMXMQ-b5iFW9k` | All permissions from `fixed:dashboards.permissions:reader` and <br>`dashboards.permissions:write` | Read and update all dashboard permissions. |
| `fixed:dashboards.public:writer` | `fixed_f_GHHRBciaqESXfGz2oCcooqHxs` | `dashboards.public:write` | Create, update, delete or pause a shared dashboard. |
| `fixed:datasources:creator` | `fixed_XX8jHREgUt-wo1A-rPXIiFlX6Zw` | `datasources:create` | Create data sources. |
| `fixed:datasources:explorer` | `fixed_qDzW9mzx9yM91T5Bi8dHUM2muTw` | `datasources:explore` | Enable the Explore feature. Data source permissions still apply, you can only query data sources for which you have query permissions. |
| `fixed:datasources:reader` | `fixed_C2x8IxkiBc1KZVjyYH775T9jNMQ` | `datasources:read`<br>`datasources:query` | Read and query data sources. |
| `fixed:datasources:writer` | `fixed_q8HXq8kjjA5IlHHgBJlKlUyaNik` | All permissions from `fixed:datasources:reader` and <br>`datasources:create`<br>`datasources:write`<br>`datasources:delete` | Read, query, create, delete, or update a data source. |
| `fixed:datasources.builtin:reader` | `fixed_q8HXq8kjjA5IlHHgBJlKlUyaNik` | `datasources:read` and `datasources:query` scoped to `datasources:uid:grafana` | An internal role used to grant Viewers access to the builtin example data source in Grafana. |
| `fixed:datasources.caching:reader` | `fixed_D2ddpGxJYlw0mbsTS1ek9fj0kj4` | `datasources.caching:read` | Read data source query caching settings. |
| `fixed:datasources.caching:writer` | `fixed_JtFjHr7jd7hSqUYcktKvRvIOGRE` | `datasources.caching:read`<br>`datasources.caching:write` | Enable, disable, or update query caching settings. |
| `fixed:datasources.id:reader` | `fixed_entg--fHmDqWY2-69N0ocawK0Os` | `datasources.id:read` | Read the ID of a data source based on its name. |
| `fixed:datasources.insights:reader` | `fixed_EBZ3NwlfecNPp2p0XcZRC1nfEYk` | `datasources.insights:read` | Read data source insights data. |
| `fixed:datasources.permissions:reader` | `fixed_ErYA-cTN3yn4h4GxaVPcawRhiOY` | `datasources.permissions:read` | Read data source permissions. |
| `fixed:datasources.permissions:writer` | `fixed_aiQh9YDfLOKjQhYasF9_SFUjQiw` | All permissions from `fixed:datasources.permissions:reader` and <br>`datasources.permissions:write` | Create, read, or delete permissions of a data source. |
| `fixed:folders:creator` | `fixed_gGLRbZGAGB6n9uECqSh_W382RlQ` | `folders:create` | Create folders in the root level. |
| `fixed:folders:reader` | `fixed_yeW-5QPeo-i5PZUIUXMlAA97GnQ` | `folders:read`<br>`dashboards:read` | Read all folders and dashboards. |
| `fixed:folders:writer` | `fixed_wJXLoTzgE7jVuz90dryYoiogL0o` | All permissions from `fixed:dashboards:writer` and <br>`folders:read`<br>`folders:write`<br>`folders:create`<br>`folders:delete`<br>`folders.permissions:read`<br>`folders.permissions:write` | Read, update, and delete all folders and dashboards. Create folders and subfolders. |
| `fixed:folders.general:reader` | `fixed_rSASbkg8DvpG_gTX5s41d7uxRvI` | `folders:read` scoped to `folders:uid:general` | An internal role used to correctly display access to the folder tree for Viewer role. |
| `fixed:folders.permissions:reader` | `fixed_E06l4cx0JFm47EeLBE4nmv3pnSo` | `folders.permissions:read` | Read all folder permissions. |
| `fixed:folders.permissions:writer` | `fixed_3GAgpQ_hWG8o7-lwNb86_VB37eI` | All permissions from `fixed:folders.permissions:reader` and <br>`folders.permissions:write` | Read and update all folder permissions. |
| `fixed:ldap:reader` | `fixed_lMcOPwSkxKY-qCK8NMJc5k6izLE` | `ldap.user:read`<br>`ldap.status:read` | Read the LDAP configuration and LDAP status information. |
| `fixed:ldap:writer` | `fixed_p6AvnU4GCQyIh7-hbwI-bk3GYnU` | All permissions from `fixed:ldap:reader` and <br>`ldap.user:sync`<br>`ldap.config:reload` | Read and update the LDAP configuration, and read LDAP status information. |
| `fixed:library.panels:creator` | `fixed_6eX6ItfegCIY5zLmPqTDW8ZV7KY` | `library.panels:create`<br>`folders:read` | Create library panel at the root level. |
| `fixed:library.panels:general.reader` | `fixed_ct0DghiBWR_2BiQm3EvNPDVmpio` | `library.panels:read` | Read all library panels at the root level. |
| `fixed:library.panels:general.writer` | `fixed_DgprkmqfN_1EhZ2v1_d1fYG8LzI` | All permissions from `fixed:library.panels:general.reader` plus<br>`library.panels:create`<br>`library.panels:delete`<br>`library.panels:write` | Create, read, write or delete all library panels and their permissions at the root level. |
| `fixed:library.panels:reader` | `fixed_tvTr9CnZ6La5vvUO_U_X1LPnhUs` | `library.panels:read` | Read all library panels. |
| `fixed:library.panels:writer` | `fixed_JTljAr21LWLTXCkgfBC4H0lhBC8` | All permissions from `fixed:library.panels:reader` plus<br>`library.panels:create`<br>`library.panels:delete`<br>`library.panels:write` | Create, read, write or delete all library panels and their permissions. |
| `fixed:licensing:reader` | `fixed_OADpuXvNEylO2Kelu3GIuBXEAYE` | `licensing:read`<br>`licensing.reports:read` | Read licensing information and licensing reports. |
| `fixed:licensing:writer` | `fixed_gzbz3rJpQMdaKHt-E4q0PVaKMoE` | All permissions from `fixed:licensing:reader` and <br>`licensing:write`<br>`licensing:delete` | Read licensing information and licensing reports, update and delete the license token. |
| `fixed:migrationassistant:migrator` | `fixed_LLk2p7TRuBztOAksTQb1Klc8YTk` | `migrationassistant:migrate` | Execute on-prem to cloud migrations through the Migration Assistant. |
| `fixed:org.users:reader` | `fixed_oCqNwlVHLOpw7-jAlwp4HzYqwGY` | `org.users:read` | Read users within a single organization. |
| `fixed:org.users:writer` | `fixed_VERj5nayasjgf_Yh0sWqqCkxWlw` | All permissions from `fixed:org.users:reader` and <br>`org.users:add`<br>`org.users:remove`<br>`org.users:write` | Within a single organization, add a user, invite a new user, read information about a user and their role, remove a user from that organization, or change the role of a user. |
| `fixed:organization:maintainer` | `fixed_CMm-uuBaPUBf4r8XG3jIvxo55bg` | All permissions from `fixed:organization:reader` and <br> `orgs:write`<br>`orgs:create`<br>`orgs:delete`<br>`orgs.quotas:write` | Create, read, write, or delete an organization. Read or write its quotas. This role needs to be assigned globally. |
| `fixed:organization:reader` | `fixed_0SZPJlTHdNEe8zO91zv7Zwiwa2w` | `orgs:read`<br>`orgs.quotas:read` | Read an organization and its quotas. |
| `fixed:organization:writer` | `fixed_Y4jGqDd8w1yCrPwlik8z5Iu8-3M` | All permissions from `fixed:organization:reader` and <br> `orgs:write`<br>`orgs.preferences:read`<br>`orgs.preferences:write` | Read an organization, its quotas, or its preferences. Update organization properties, or its preferences. |
| `fixed:plugins:maintainer` | `fixed_yEOKidBcWgbm74x-nTa3lW5lOyY` | `plugins:install` | Install and uninstall plugins. Needs to be assigned globally. |
| `fixed:plugins:writer` | `fixed_MRYpGk7kpNNwt2VoVOXFiPnQziE` | `plugins:write` | Enable and disable plugins and edit plugins' settings. |
| `fixed:plugins.app:reader` | `fixed_AcZRiNYx7NueYkUqzw1o2OGGUAA` | `plugins.app:access` | Access application plugins (still enforcing the organization role). |
| `fixed:provisioning:writer` | `fixed_bgk1FCyR6OEDwhgirZlQgu5LlCA` | `provisioning:reload` | Reload provisioning. |
| `fixed:reports:reader` | `fixed_72_8LU_0ukfm6BdblOw8Z9q-GQ8` | `reports:read`<br>`reports:send`<br>`reports.settings:read` | Read all reports and shared report settings. |
| `fixed:reports:writer` | `fixed_jBW3_7g1EWOjGVBYeVRwtFxhUNw` | All permissions from `fixed:reports:reader` and <br>`reports:create`<br>`reports:write`<br>`reports:delete`<br>`reports.settings:write` | Create, read, update, or delete all reports and shared report settings. |
| `fixed:roles:reader` | `fixed_GkfG-1NSwEGb4hpK3-E3qHyNltc` | `roles:read`<br>`teams.roles:read`<br>`users.roles:read`<br>`users.permissions:read` | Read all access control roles, roles and permissions assigned to users, teams. |
| `fixed:roles:resetter` | `fixed_WgPpC3qJRmVpVTJavFNwfS5RuzQ` | `roles:write` with scope `permissions:type:escalate` | Reset basic roles to their default. |
| `fixed:roles:writer` | `fixed_W5aFaw8isAM27x_eWfElBhZ0iOc` | All permissions from `fixed:roles:reader` and <br>`roles:write`<br>`roles:delete`<br>`teams.roles:add`<br>`teams.roles:remove`<br>`users.roles:add`<br>`users.roles:remove` | Create, read, update, or delete all roles, assign or unassign roles to users, teams. |
| `fixed:serviceaccounts:creator` | `fixed_Ikw60fckA0MyiiZ73BawSfOULy4` | `serviceaccounts:create` | Create Grafana service accounts. |
| `fixed:serviceaccounts:reader` | `fixed_QFjJAZ88iawMLInYOxPA1DB1w6I` | `serviceaccounts:read` | Read Grafana service accounts. |
| `fixed:serviceaccounts:writer` | `fixed_iBvUNUEZBZ7PUW0vdkN5iojc2sk` | `serviceaccounts:read`<br>`serviceaccounts:create`<br>`serviceaccounts:write`<br>`serviceaccounts:delete`<br>`serviceaccounts.permissions:read`<br>`serviceaccounts.permissions:write` | Create, update, read and delete all Grafana service accounts and manage service account permissions. |
| `fixed:settings:reader` | `fixed_0LaUt1x6PP8hsZzEBhqPQZFUd8Q` | `settings:read` | Read Grafana instance settings. |
| `fixed:settings:writer` | `fixed_joIHDgMrGg790hMhUufVzcU4j44` | All permissions from `fixed:settings:reader` and<br>`settings:write` | Read and update Grafana instance settings. |
| `fixed:stats:reader` | `fixed_OnRCXxZVINWpcKvTF5A1gecJ7pA` | `server.stats:read` | Read Grafana instance statistics. |
| `fixed:support.bundles:reader` | `fixed_gcPjI3PTUJwRx-GJZwDhNa7zbos` | `support.bundles:read` | List and download support bundles. |
| `fixed:support.bundles:writer` | `fixed_dTgCv9Wxrp_WHAhwHYIgeboxKpE` | `support.bundles:read`<br>`support.bundles:create`<br>`support.bundles:delete` | Create, delete, list and download support bundles. |
| `fixed:teams:creator` | `fixed_nzVQoNSDSn0fg1MDgO6XnZX2RZI` | `teams:create`<br>`org.users:read` | Create a team and list organization users (required to manage the created team). |
| `fixed:teams:read` | `fixed_Z8pB0GQlrqRt8IZBCJQxPWvJPgQ` | `teams:read` | List all teams. |
| `fixed:teams:writer` | `fixed_xw1T0579h620MOYi4L96GUs7fZY` | `teams:create`<br>`teams:delete`<br>`teams:read`<br>`teams:write`<br>`teams.permissions:read`<br>`teams.permissions:write` | Create, read, update and delete teams and manage team memberships. |
| `fixed:usagestats:reader` | `fixed_eAM0azEvnWFCJAjNkUKnGL_1-bU` | `server.usagestats.report:read` | View usage statistics report. |
| `fixed:users:reader` | `fixed_buZastUG3reWyQpPemcWjGqPAd0` | `users:read`<br>`users.quotas:read`<br>`users.authtoken:read` | Read all users and their information, such as team memberships, authentication tokens, and quotas. |
| `fixed:users:writer` | `fixed_wjzgHHo_Ux25DJuELn_oiAdB_yM` | All permissions from `fixed:users:reader` and <br>`users:write`<br>`users:create`<br>`users:delete`<br>`users:enable`<br>`users:disable`<br>`users.password:write`<br>`users.permissions:write`<br>`users:logout`<br>`users.authtoken:write`<br>`users.quotas:write` | Read and update all attributes and settings for all users in Grafana: update user information, read user information, create or enable or disable a user, make a user a Grafana administrator, sign out a user, update a users authentication token, or update quotas for all users. |
| Fixed role | UUID | Permissions | Description |
| -------------------------------------------- | ----------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `fixed:alerting:reader` | `fixed_O2oP1_uBFozI2i93klAkcvEWR30` | All permissions from `fixed:alerting.rules:reader` <br>`fixed:alerting.instances:reader`<br>`fixed:alerting.notifications:reader` | Read-only permissions for all Grafana, Mimir, Loki and Alertmanager alert rules\*, alerts, contact points, and notification policies.[\*](#alerting-roles) |
| `fixed:alerting:writer` | `fixed_-PAZgSJsDlRD8NUg-PFSeH_BkJY` | All permissions from `fixed:alerting.rules:writer` <br>`fixed:alerting.instances:writer`<br>`fixed:alerting.notifications:writer` | Create, update, and delete Grafana, Mimir, Loki and Alertmanager alert rules\*, silences, contact points, templates, mute timings, and notification policies.[\*](#alerting-roles) |
| `fixed:alerting.instances:reader` | `fixed_ut5fVS-Ulh_ejFoskFhJT_rYg0Y` | `alert.instances:read` for organization scope <br> `alert.instances.external:read` for scope `datasources:*` | Read all alerts and silences in the organization produced by Grafana Alerts and Mimir and Loki alerts and silences.[\*](#alerting-roles) |
| `fixed:alerting.instances:writer` | `fixed_pKOBJE346uyqMLdgWbk1NsQfEl0` | All permissions from `fixed:alerting.instances:reader` and<br> `alert.instances:create`<br>`alert.instances:write` for organization scope <br> `alert.instances.external:write` for scope `datasources:*` | Create, update and expire all silences in the organization produced by Grafana, Mimir, and Loki.[\*](#alerting-roles) |
| `fixed:alerting.notifications:reader` | `fixed_hmBn0lX5h1RZXB9Vaot420EEdA0` | `alert.notifications:read` for organization scope<br>`alert.notifications.external:read` for scope `datasources:*` | Read all Grafana and Alertmanager contact points, templates, and notification policies.[\*](#alerting-roles) |
| `fixed:alerting.notifications:writer` | `fixed_XplK6HPNxf9AP5IGTdB5Iun4tJc` | All permissions from `fixed:alerting.notifications:reader` and<br>`alert.notifications:write`for organization scope<br>`alert.notifications.external:read` for scope `datasources:*` | Create, update, and delete contact points, templates, mute timings and notification policies for Grafana and external Alertmanager.[\*](#alerting-roles) |
| `fixed:alerting.provisioning:writer` | `fixed_y7pFjdEkxpx5ETdcxPvp0AgRuUo` | `alert.provisioning:read` and `alert.provisioning:write` | Create, update and delete Grafana alert rules, notification policies, contact points, templates, etc via provisioning API. [\*](#alerting-roles) |
| `fixed:alerting.provisioning.secrets:reader` | `fixed_9fmzXXZZG-Od0Amy2ofEG8Uk--c` | `alert.provisioning:read` and `alert.provisioning.secrets:read` | Read-only permissions for Provisioning API and let export resources with decrypted secrets [\*](#alerting-roles) |
| `fixed:alerting.provisioning.status:writer` | `fixed_eAxlzfkTuobvKEgXHveFMBZrOj8` | `alert.provisioning.provenance:write` | Set provenance status to alert rules, notification policies, contact points, etc. Should be used together with regular writer roles. [\*](#alerting-roles) |
| `fixed:alerting.rules:reader` | `fixed_fRGKL_vAqUsmUWq5EYKnOha9DcA` | `alert.rule:read`, `alert.silences:read` for scope `folders:*` <br> `alert.rules.external:read` for scope `datasources:*` <br> `alert.notifications.time-intervals:read` <br> `alert.notifications.receivers:list` | Read all\* Grafana, Mimir, and Loki alert rules.[\*](#alerting-roles) and read rule-specific silences |
| `fixed:alerting.rules:writer` | `fixed_YJJGwAalUwDZPrXSyFH8GfYBXAc` | All permissions from `fixed:alerting.rules:reader` and <br> `alert.rule:create` <br> `alert.rule:write` <br> `alert.rule:delete` <br> `alert.silences:create` <br> `alert.silences:write` for scope `folders:*` <br> `alert.rules.external:write` for scope `datasources:*` | Create, update, and delete all\* Grafana, Mimir, and Loki alert rules.[\*](#alerting-roles) and manage rule-specific silences |
| `fixed:annotations:reader` | `fixed_hpZnoizrfAJsrceNcNQqWYV-xNU` | `annotations:read` for scopes `annotations:type:*` | Read all annotations and annotation tags. |
| `fixed:annotations:writer` | `fixed_ZVW-Aa9Tzle6J4s2aUFcq1StKWE` | All permissions from `fixed:annotations:reader` <br>`annotations:write` <br>`annotations.create`<br> `annotations:delete` for scope `annotations:type:*` | Read, create, update and delete all annotations and annotation tags. |
| `fixed:annotations.dashboard:writer` | `fixed_8A775xenXeKaJk4Cr7bchP9yXOA` | `annotations:write` <br>`annotations.create`<br> `annotations:delete` for scope `annotations:type:dashboard` | Create, update and delete dashboard annotations and annotation tags. |
| `fixed:authentication.config:writer` | `fixed_0rYhZ2Qnzs8AdB1nX7gexk3fHDw` | `settings:read` for scope `settings:auth.saml:*` <br> `settings:write` for scope `settings:auth.saml:*` | Read and update authentication and SAML settings. |
| `fixed:general.auth.config:writer` | `fixed_QFxIT_FGtBqbIVJIwx1bLgI5z6c` | `settings:read` for scope `settings:auth:oauth_allow_insecure_email_lookup` <br> `settings:write` for scope `settings:auth:oauth_allow_insecure_email_lookup` | Read and update the Grafana instance's general authentication configuration settings. |
| `fixed:dashboards:creator` | `fixed_ZorKUcEPCM01A1fPakEzGBUyU64` | `dashboards:create`<br>`folders:read` | Create dashboards. |
| `fixed:dashboards:reader` | `fixed_Sgr67JTOhjQGFlzYRahOe45TdWM` | `dashboards:read` | Read all dashboards. |
| `fixed:dashboards:writer` | `fixed_OK2YOQGIoI1G031hVzJB6rAJQAs` | All permissions from `fixed:dashboards:reader` and <br>`dashboards:write`<br>`dashboards:delete`<br>`dashboards:create`<br>`dashboards.permissions:read`<br>`dashboards.permissions:write` | Read, create, update, and delete all dashboards. |
| `fixed:dashboards.insights:reader` | `fixed_JlBJ2_gizP8zhgaeGE2rjyZe2Rs` | `dashboards.insights:read` | Read dashboard insights data and see presence indicators. |
| `fixed:dashboards.permissions:reader` | `fixed_f17oxuXW_58LL8mYJsm4T_mCeIw` | `dashboards.permissions:read` | Read all dashboard permissions. |
| `fixed:dashboards.permissions:writer` | `fixed_CcznxhWX_Yqn8uWMXMQ-b5iFW9k` | All permissions from `fixed:dashboards.permissions:reader` and <br>`dashboards.permissions:write` | Read and update all dashboard permissions. |
| `fixed:dashboards.public:writer` | `fixed_f_GHHRBciaqESXfGz2oCcooqHxs` | `dashboards.public:write` | Create, update, delete or pause a shared dashboard. |
| `fixed:datasources:creator` | `fixed_XX8jHREgUt-wo1A-rPXIiFlX6Zw` | `datasources:create` | Create data sources. |
| `fixed:datasources:explorer` | `fixed_qDzW9mzx9yM91T5Bi8dHUM2muTw` | `datasources:explore` | Enable the Explore feature. Data source permissions still apply, you can only query data sources for which you have query permissions. |
| `fixed:datasources:reader` | `fixed_C2x8IxkiBc1KZVjyYH775T9jNMQ` | `datasources:read`<br>`datasources:query` | Read and query data sources. |
| `fixed:datasources:writer` | `fixed_q8HXq8kjjA5IlHHgBJlKlUyaNik` | All permissions from `fixed:datasources:reader` and <br>`datasources:create`<br>`datasources:write`<br>`datasources:delete` | Read, query, create, delete, or update a data source. |
| `fixed:datasources.builtin:reader` | `fixed_q8HXq8kjjA5IlHHgBJlKlUyaNik` | `datasources:read` and `datasources:query` scoped to `datasources:uid:grafana` | An internal role used to grant Viewers access to the builtin example data source in Grafana. |
| `fixed:datasources.caching:reader` | `fixed_D2ddpGxJYlw0mbsTS1ek9fj0kj4` | `datasources.caching:read` | Read data source query caching settings. |
| `fixed:datasources.caching:writer` | `fixed_JtFjHr7jd7hSqUYcktKvRvIOGRE` | `datasources.caching:read`<br>`datasources.caching:write` | Enable, disable, or update query caching settings. |
| `fixed:datasources.id:reader` | `fixed_entg--fHmDqWY2-69N0ocawK0Os` | `datasources.id:read` | Read the ID of a data source based on its name. |
| `fixed:datasources.insights:reader` | `fixed_EBZ3NwlfecNPp2p0XcZRC1nfEYk` | `datasources.insights:read` | Read data source insights data. |
| `fixed:datasources.permissions:reader` | `fixed_ErYA-cTN3yn4h4GxaVPcawRhiOY` | `datasources.permissions:read` | Read data source permissions. |
| `fixed:datasources.permissions:writer` | `fixed_aiQh9YDfLOKjQhYasF9_SFUjQiw` | All permissions from `fixed:datasources.permissions:reader` and <br>`datasources.permissions:write` | Create, read, or delete permissions of a data source. |
| `fixed:folders:creator` | `fixed_gGLRbZGAGB6n9uECqSh_W382RlQ` | `folders:create` | Create folders in the root level. |
| `fixed:folders:reader` | `fixed_yeW-5QPeo-i5PZUIUXMlAA97GnQ` | `folders:read`<br>`dashboards:read` | Read all folders and dashboards. |
| `fixed:folders:writer` | `fixed_wJXLoTzgE7jVuz90dryYoiogL0o` | All permissions from `fixed:dashboards:writer` and <br>`folders:read`<br>`folders:write`<br>`folders:create`<br>`folders:delete`<br>`folders.permissions:read`<br>`folders.permissions:write` | Read, update, and delete all folders and dashboards. Create folders and subfolders. |
| `fixed:folders.general:reader` | `fixed_rSASbkg8DvpG_gTX5s41d7uxRvI` | `folders:read` scoped to `folders:uid:general` | An internal role used to correctly display access to the folder tree for Viewer role. |
| `fixed:folders.permissions:reader` | `fixed_E06l4cx0JFm47EeLBE4nmv3pnSo` | `folders.permissions:read` | Read all folder permissions. |
| `fixed:folders.permissions:writer` | `fixed_3GAgpQ_hWG8o7-lwNb86_VB37eI` | All permissions from `fixed:folders.permissions:reader` and <br>`folders.permissions:write` | Read and update all folder permissions. |
| `fixed:ldap:reader` | `fixed_lMcOPwSkxKY-qCK8NMJc5k6izLE` | `ldap.user:read`<br>`ldap.status:read` | Read the LDAP configuration and LDAP status information. |
| `fixed:ldap:writer` | `fixed_p6AvnU4GCQyIh7-hbwI-bk3GYnU` | All permissions from `fixed:ldap:reader` and <br>`ldap.user:sync`<br>`ldap.config:reload` | Read and update the LDAP configuration, and read LDAP status information. |
| `fixed:library.panels:creator` | `fixed_6eX6ItfegCIY5zLmPqTDW8ZV7KY` | `library.panels:create`<br>`folders:read` | Create library panel at the root level. |
| `fixed:library.panels:general.reader` | `fixed_ct0DghiBWR_2BiQm3EvNPDVmpio` | `library.panels:read` | Read all library panels at the root level. |
| `fixed:library.panels:general.writer` | `fixed_DgprkmqfN_1EhZ2v1_d1fYG8LzI` | All permissions from `fixed:library.panels:general.reader` plus<br>`library.panels:create`<br>`library.panels:delete`<br>`library.panels:write` | Create, read, write or delete all library panels and their permissions at the root level. |
| `fixed:library.panels:reader` | `fixed_tvTr9CnZ6La5vvUO_U_X1LPnhUs` | `library.panels:read` | Read all library panels. |
| `fixed:library.panels:writer` | `fixed_JTljAr21LWLTXCkgfBC4H0lhBC8` | All permissions from `fixed:library.panels:reader` plus<br>`library.panels:create`<br>`library.panels:delete`<br>`library.panels:write` | Create, read, write or delete all library panels and their permissions. |
| `fixed:licensing:reader` | `fixed_OADpuXvNEylO2Kelu3GIuBXEAYE` | `licensing:read`<br>`licensing.reports:read` | Read licensing information and licensing reports. |
| `fixed:licensing:writer` | `fixed_gzbz3rJpQMdaKHt-E4q0PVaKMoE` | All permissions from `fixed:licensing:reader` and <br>`licensing:write`<br>`licensing:delete` | Read licensing information and licensing reports, update and delete the license token. |
| `fixed:migrationassistant:migrator` | `fixed_LLk2p7TRuBztOAksTQb1Klc8YTk` | `migrationassistant:migrate` | Execute on-prem to cloud migrations through the Migration Assistant. |
| `fixed:org.users:reader` | `fixed_oCqNwlVHLOpw7-jAlwp4HzYqwGY` | `org.users:read` | Read users within a single organization. |
| `fixed:org.users:writer` | `fixed_VERj5nayasjgf_Yh0sWqqCkxWlw` | All permissions from `fixed:org.users:reader` and <br>`org.users:add`<br>`org.users:remove`<br>`org.users:write` | Within a single organization, add a user, invite a new user, read information about a user and their role, remove a user from that organization, or change the role of a user. |
| `fixed:organization:maintainer` | `fixed_CMm-uuBaPUBf4r8XG3jIvxo55bg` | All permissions from `fixed:organization:reader` and <br> `orgs:write`<br>`orgs:create`<br>`orgs:delete`<br>`orgs.quotas:write` | Create, read, write, or delete an organization. Read or write its quotas. This role needs to be assigned globally. |
| `fixed:organization:reader` | `fixed_0SZPJlTHdNEe8zO91zv7Zwiwa2w` | `orgs:read`<br>`orgs.quotas:read` | Read an organization and its quotas. |
| `fixed:organization:writer` | `fixed_Y4jGqDd8w1yCrPwlik8z5Iu8-3M` | All permissions from `fixed:organization:reader` and <br> `orgs:write`<br>`orgs.preferences:read`<br>`orgs.preferences:write` | Read an organization, its quotas, or its preferences. Update organization properties, or its preferences. |
| `fixed:plugins:maintainer` | `fixed_yEOKidBcWgbm74x-nTa3lW5lOyY` | `plugins:install` | Install and uninstall plugins. Needs to be assigned globally. |
| `fixed:plugins:writer` | `fixed_MRYpGk7kpNNwt2VoVOXFiPnQziE` | `plugins:write` | Enable and disable plugins and edit plugins' settings. |
| `fixed:plugins.app:reader` | `fixed_AcZRiNYx7NueYkUqzw1o2OGGUAA` | `plugins.app:access` | Access application plugins (still enforcing the organization role). |
| `fixed:provisioning:writer` | `fixed_bgk1FCyR6OEDwhgirZlQgu5LlCA` | `provisioning:reload` | Reload provisioning. |
| `fixed:reports:reader` | `fixed_72_8LU_0ukfm6BdblOw8Z9q-GQ8` | `reports:read`<br>`reports:send`<br>`reports.settings:read` | Read all reports and shared report settings. |
| `fixed:reports:writer` | `fixed_jBW3_7g1EWOjGVBYeVRwtFxhUNw` | All permissions from `fixed:reports:reader` and <br>`reports:create`<br>`reports:write`<br>`reports:delete`<br>`reports.settings:write` | Create, read, update, or delete all reports and shared report settings. |
| `fixed:roles:reader` | `fixed_GkfG-1NSwEGb4hpK3-E3qHyNltc` | `roles:read`<br>`teams.roles:read`<br>`users.roles:read`<br>`users.permissions:read` | Read all access control roles, roles and permissions assigned to users, teams. |
| `fixed:roles:resetter` | `fixed_WgPpC3qJRmVpVTJavFNwfS5RuzQ` | `roles:write` with scope `permissions:type:escalate` | Reset basic roles to their default. |
| `fixed:roles:writer` | `fixed_W5aFaw8isAM27x_eWfElBhZ0iOc` | All permissions from `fixed:roles:reader` and <br>`roles:write`<br>`roles:delete`<br>`teams.roles:add`<br>`teams.roles:remove`<br>`users.roles:add`<br>`users.roles:remove` | Create, read, update, or delete all roles, assign or unassign roles to users, teams. |
| `fixed:serviceaccounts:creator` | `fixed_Ikw60fckA0MyiiZ73BawSfOULy4` | `serviceaccounts:create` | Create Grafana service accounts. |
| `fixed:serviceaccounts:reader` | `fixed_QFjJAZ88iawMLInYOxPA1DB1w6I` | `serviceaccounts:read` | Read Grafana service accounts. |
| `fixed:serviceaccounts:writer` | `fixed_iBvUNUEZBZ7PUW0vdkN5iojc2sk` | `serviceaccounts:read`<br>`serviceaccounts:create`<br>`serviceaccounts:write`<br>`serviceaccounts:delete`<br>`serviceaccounts.permissions:read`<br>`serviceaccounts.permissions:write` | Create, update, read and delete all Grafana service accounts and manage service account permissions. |
| `fixed:settings:reader` | `fixed_0LaUt1x6PP8hsZzEBhqPQZFUd8Q` | `settings:read` | Read Grafana instance settings. |
| `fixed:settings:writer` | `fixed_joIHDgMrGg790hMhUufVzcU4j44` | All permissions from `fixed:settings:reader` and<br>`settings:write` | Read and update Grafana instance settings. |
| `fixed:stats:reader` | `fixed_OnRCXxZVINWpcKvTF5A1gecJ7pA` | `server.stats:read` | Read Grafana instance statistics. |
| `fixed:support.bundles:reader` | `fixed_gcPjI3PTUJwRx-GJZwDhNa7zbos` | `support.bundles:read` | List and download support bundles. |
| `fixed:support.bundles:writer` | `fixed_dTgCv9Wxrp_WHAhwHYIgeboxKpE` | `support.bundles:read`<br>`support.bundles:create`<br>`support.bundles:delete` | Create, delete, list and download support bundles. |
| `fixed:teams:creator` | `fixed_nzVQoNSDSn0fg1MDgO6XnZX2RZI` | `teams:create`<br>`org.users:read` | Create a team and list organization users (required to manage the created team). |
| `fixed:teams:read` | `fixed_Z8pB0GQlrqRt8IZBCJQxPWvJPgQ` | `teams:read` | List all teams. |
| `fixed:teams:writer` | `fixed_xw1T0579h620MOYi4L96GUs7fZY` | `teams:create`<br>`teams:delete`<br>`teams:read`<br>`teams:write`<br>`teams.permissions:read`<br>`teams.permissions:write` | Create, read, update and delete teams and manage team memberships. |
| `fixed:usagestats:reader` | `fixed_eAM0azEvnWFCJAjNkUKnGL_1-bU` | `server.usagestats.report:read` | View usage statistics report. |
| `fixed:users:reader` | `fixed_buZastUG3reWyQpPemcWjGqPAd0` | `users:read`<br>`users.quotas:read`<br>`users.authtoken:read` | Read all users and their information, such as team memberships, authentication tokens, and quotas. |
| `fixed:users:writer` | `fixed_wjzgHHo_Ux25DJuELn_oiAdB_yM` | All permissions from `fixed:users:reader` and <br>`users:write`<br>`users:create`<br>`users:delete`<br>`users:enable`<br>`users:disable`<br>`users.password:write`<br>`users.permissions:write`<br>`users:logout`<br>`users.authtoken:write`<br>`users.quotas:write` | Read and update all attributes and settings for all users in Grafana: update user information, read user information, create or enable or disable a user, make a user a Grafana administrator, sign out a user, update a users authentication token, or update quotas for all users. |
### Alerting roles
@@ -164,20 +164,10 @@ Access to Grafana alert rules is an intersection of many permissions:
- Permission to read a folder. For example, the fixed role `fixed:folders:reader` includes the action `folders:read` and a folder scope `folders:id:`.
- Permission to query **all** data sources that a given alert rule uses. If a user cannot query a given data source, they cannot see any alert rules that query that data source.
There is only one exclusion. Role `fixed:alerting.provisioning:writer` does not require user to have any additional permissions and provides access to all aspects of the alerting configuration via special provisioning API.
There is only one exclusion at this moment. Role `fixed:alerting.provisioning:writer` does not require user to have any additional permissions and provides access to all aspects of the alerting configuration via special provisioning API.
For more information about the permissions required to access alert rules, refer to [Create a custom role to access alerts in a folder](ref:plan-rbac-rollout-strategy-create-a-custom-role-to-access-alerts-in-a-folder).
#### Alerting basic roles
The following table lists the default RBAC alerting role assignments to the basic roles:
| Basic role | Associated fixed roles | Description |
| ---------- | --------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- |
| Admin | `fixed:alerting:writer`<br>`fixed:alerting.provisioning.secrets:reader`<br>`fixed:alerting.provisioning:writer` | Default [Grafana organization administrator](ref:rbac-basic-roles) assignments. |
| Editor | `fixed:alerting:writer`<br>`fixed:alerting.provisioning.provenance:writer` | Default [Editor](ref:rbac-basic-roles) assignments. |
| Viewer | `fixed:alerting:reader` | Default [Viewer](ref:rbac-basic-roles) assignments. |
### Grafana OnCall roles
If you are using [Grafana OnCall](ref:oncall), you can try out the integration between Grafana OnCall and RBAC.
@@ -62,9 +62,6 @@ The following steps describe a basic configuration:
# The URL of the Loki server
loki_remote_url = http://localhost:3100
[feature_toggles]
enable = alertingCentralAlertHistory
```
1. **Configure the Loki data source in Grafana**
@@ -17,166 +17,55 @@ weight: 155
# Configure RBAC
[Role-based access control (RBAC)](/docs/grafana/latest/administration/roles-and-permissions/access-control/plan-rbac-rollout-strategy/) for Grafana Enterprise and Grafana Cloud provides a standardized way of granting, changing, and revoking access, so that users can view and modify Grafana resources.
Role-based access control (RBAC) for Grafana Enterprise and Grafana Cloud provides a standardized way of granting, changing, and revoking access, so that users can view and modify Grafana resources.
A user is any individual who can log in to Grafana. Each user has a role that includes permissions. Permissions determine the tasks a user can perform in the system.
A user is any individual who can log in to Grafana. Each user is associated with a role that includes permissions. Permissions determine the tasks a user can perform in the system.
Each permission contains one or more actions and a scope.
## Role types
Grafana has three types of roles for managing access:
- **Basic roles**: Admin, Editor, Viewer, and No basic role. These are assigned to users and provide default access levels.
- **Fixed roles**: Predefined groups of permissions for specific use cases. Basic roles automatically include certain fixed roles.
- **Custom roles**: User-defined roles that combine specific permissions for granular access control.
## Basic role permissions
The following table summarizes the default alerting permissions for each basic role.
| Capability | Admin | Editor | Viewer |
| ----------------------------------------- | :---: | :----: | :----: |
| View alert rules | ✓ | ✓ | ✓ |
| Create, edit, and delete alert rules | ✓ | ✓ | |
| View silences | ✓ | ✓ | ✓ |
| Create, edit, and expire silences | ✓ | ✓ | |
| View contact points and templates | ✓ | ✓ | ✓ |
| Create, edit, and delete contact points | ✓ | ✓ | |
| View notification policies | ✓ | ✓ | ✓ |
| Create, edit, and delete policies | ✓ | ✓ | |
| View mute timings | ✓ | ✓ | ✓ |
| Create, edit, and delete timing intervals | ✓ | ✓ | |
| Access provisioning API | ✓ | ✓ | |
| Export with decrypted secrets | ✓ | | |
{{< admonition type="note" >}}
Access to alert rules also requires permission to read the folder containing the rules and permission to query the data sources used in the rules.
{{< /admonition >}}
## Permissions
Grafana Alerting has the following permissions organized by resource type.
Grafana Alerting has the following permissions.
### Alert rules
Permissions for managing Grafana-managed alert rules.
| Action | Applicable scope | Description |
| -------------------- | ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `alert.rules:create` | `folders:*`<br>`folders:uid:*` | Create Grafana alert rules in a folder and its subfolders. Combine this permission with `folders:read` in a scope that includes the folder and `datasources:query` in the scope of data sources the user can query. |
| `alert.rules:read` | `folders:*`<br>`folders:uid:*` | Read Grafana alert rules in a folder and its subfolders. Combine this permission with `folders:read` in a scope that includes the folder. |
| `alert.rules:write` | `folders:*`<br>`folders:uid:*` | Update Grafana alert rules in a folder and its subfolders. Combine this permission with `folders:read` in a scope that includes the folder. To allow query modifications add `datasources:query` in the scope of data sources the user can query. |
| `alert.rules:delete` | `folders:*`<br>`folders:uid:*` | Delete Grafana alert rules in a folder and its subfolders. Combine this permission with `folders:read` in a scope that includes the folder. |
### External alert rules
Permissions for managing alert rules in external data sources that support alerting.
| Action | Applicable scope | Description |
| ---------------------------- | -------------------------------------- | ---------------------------------------------------------------------------------------------- |
| `alert.rules.external:read` | `datasources:*`<br>`datasources:uid:*` | Read alert rules in data sources that support alerting (Prometheus, Mimir, and Loki). |
| `alert.rules.external:write` | `datasources:*`<br>`datasources:uid:*` | Create, update, and delete alert rules in data sources that support alerting (Mimir and Loki). |
### Alert instances and silences
Permissions for managing alert instances and silences in Grafana.
| Action | Applicable scope | Description |
| ------------------------ | ------------------------------ | ------------------------------------------------------------------------------------ |
| `alert.instances:read` | n/a | Read alerts and silences in the current organization. |
| `alert.instances:create` | n/a | Create silences in the current organization. |
| `alert.instances:write` | n/a | Update and expire silences in the current organization. |
| `alert.silences:read` | `folders:*`<br>`folders:uid:*` | Read all general silences and rule-specific silences in a folder and its subfolders. |
| `alert.silences:create` | `folders:*`<br>`folders:uid:*` | Create rule-specific silences in a folder and its subfolders. |
| `alert.silences:write` | `folders:*`<br>`folders:uid:*` | Update and expire rule-specific silences in a folder and its subfolders. |
### External alert instances
Permissions for managing alert instances in external data sources.
| Action | Applicable scope | Description |
| -------------------------------- | -------------------------------------- | ----------------------------------------------------------------- |
| `alert.instances.external:read` | `datasources:*`<br>`datasources:uid:*` | Read alerts and silences in data sources that support alerting. |
| `alert.instances.external:write` | `datasources:*`<br>`datasources:uid:*` | Manage alerts and silences in data sources that support alerting. |
### Contact points
Permissions for managing contact points (notification receivers).
| Action | Applicable scope | Description |
| -------------------------------------------- | ---------------------------------- | ----------------------------------------------------------------------------------------------------------- |
| `alert.notifications.receivers:list` | n/a | List contact points in the current organization. |
| `alert.notifications.receivers:read` | `receivers:*`<br>`receivers:uid:*` | Read contact points. |
| `alert.notifications.receivers.secrets:read` | `receivers:*`<br>`receivers:uid:*` | Export contact points with decrypted secrets. |
| `alert.notifications.receivers:create` | n/a | Create a new contact points. The creator is automatically granted full access to the created contact point. |
| `alert.notifications.receivers:write` | `receivers:*`<br>`receivers:uid:*` | Update existing contact points. |
| `alert.notifications.receivers:delete` | `receivers:*`<br>`receivers:uid:*` | Update and delete existing contact points. |
| `alert.notifications.receivers:test` | `receivers:*`<br>`receivers:uid:*` | Test contact points to verify their configuration. |
| `receivers.permissions:read` | `receivers:*`<br>`receivers:uid:*` | Read permissions for contact points. |
| `receivers.permissions:write` | `receivers:*`<br>`receivers:uid:*` | Manage permissions for contact points. |
### Notification policies
Permissions for managing notification policies (routing rules).
| Action | Applicable scope | Description |
| ---------------------------------- | ---------------- | ----------------------------------------------------- |
| `alert.notifications.routes:read` | n/a | Read notification policies. |
| `alert.notifications.routes:write` | n/a | Create new, update, and delete notification policies. |
### Time intervals
Permissions for managing mute time intervals.
| Action | Applicable scope | Description |
| ------------------------------------------- | ---------------- | -------------------------------------------------- |
| `alert.notifications.time-intervals:read` | n/a | Read mute time intervals. |
| `alert.notifications.time-intervals:write` | n/a | Create new or update existing mute time intervals. |
| `alert.notifications.time-intervals:delete` | n/a | Delete existing time intervals. |
### Templates
Permissions for managing notification templates.
| Action | Applicable scope | Description |
| ------------------------------------------ | ---------------- | ------------------------------------------------------------------------------- |
| `alert.notifications.templates:read` | n/a | Read templates. |
| `alert.notifications.templates:write` | n/a | Create new or update existing templates. |
| `alert.notifications.templates:delete` | n/a | Delete existing templates. |
| `alert.notifications.templates.test:write` | n/a | Test templates with custom payloads (preview and payload editor functionality). |
### General notifications
Legacy permissions for managing all notification resources.
| Action | Applicable scope | Description |
| --------------------------- | ---------------- | -------------------------------------------------------------------------------------------------------- |
| `alert.notifications:read` | n/a | Read all templates, contact points, notification policies, and mute timings in the current organization. |
| `alert.notifications:write` | n/a | Manage templates, contact points, notification policies, and mute timings in the current organization. |
### External notifications
Permissions for managing notification resources in external data sources.
| Action | Applicable scope | Description |
| ------------------------------------ | -------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
| `alert.notifications.external:read` | `datasources:*`<br>`datasources:uid:*` | Read templates, contact points, notification policies, and mute timings in data sources that support alerting. |
| `alert.notifications.external:write` | `datasources:*`<br>`datasources:uid:*` | Manage templates, contact points, notification policies, and mute timings in data sources that support alerting. |
### Provisioning
Permissions for managing alerting resources via the provisioning API.
| Action | Applicable scope | Description |
| ---------------------------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `alert.provisioning:read` | n/a | Read all Grafana alert rules, notification policies, etc via provisioning API. Permissions to folders and data source are not required. |
| `alert.provisioning.secrets:read` | n/a | Same as `alert.provisioning:read` plus ability to export resources with decrypted secrets. |
| `alert.provisioning:write` | n/a | Update all Grafana alert rules, notification policies, etc via provisioning API. Permissions to folders and data source are not required. |
| `alert.rules.provisioning:read` | n/a | Read Grafana alert rules via provisioning API. More specific than `alert.provisioning:read`. |
| `alert.rules.provisioning:write` | n/a | Create, update, and delete Grafana alert rules via provisioning API. More specific than `alert.provisioning:write`. |
| `alert.notifications.provisioning:read` | n/a | Read notification resources (contact points, notification policies, templates, time intervals) via provisioning API. More specific than `alert.provisioning:read`. |
| `alert.notifications.provisioning:write` | n/a | Create, update, and delete notification resources via provisioning API. More specific than `alert.provisioning:write`. |
| `alert.provisioning.provenance:write` | n/a | Set provisioning status for alerting resources. Cannot be used alone. Requires user to have permissions to access resources. |
| Action | Applicable scope | Description |
| -------------------------------------------- | -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `alert.instances.external:read` | `datasources:*`<br>`datasources:uid:*` | Read alerts and silences in data sources that support alerting. |
| `alert.instances.external:write` | `datasources:*`<br>`datasources:uid:*` | Manage alerts and silences in data sources that support alerting. |
| `alert.instances:create` | n/a | Create silences in the current organization. |
| `alert.instances:read` | n/a | Read alerts and silences in the current organization. |
| `alert.instances:write` | n/a | Update and expire silences in the current organization. |
| `alert.notifications.external:read` | `datasources:*`<br>`datasources:uid:*` | Read templates, contact points, notification policies, and mute timings in data sources that support alerting. |
| `alert.notifications.external:write` | `datasources:*`<br>`datasources:uid:*` | Manage templates, contact points, notification policies, and mute timings in data sources that support alerting. |
| `alert.notifications:write` | n/a | Manage templates, contact points, notification policies, and mute timings in the current organization. |
| `alert.notifications:read` | n/a | Read all templates, contact points, notification policies, and mute timings in the current organization. |
| `alert.rules.external:read` | `datasources:*`<br>`datasources:uid:*` | Read alert rules in data sources that support alerting (Prometheus, Mimir, and Loki) |
| `alert.rules.external:write` | `datasources:*`<br>`datasources:uid:*` | Create, update, and delete alert rules in data sources that support alerting (Mimir and Loki). |
| `alert.rules:create` | `folders:*`<br>`folders:uid:*` | Create Grafana alert rules in a folder and its subfolders. Combine this permission with `folders:read` in a scope that includes the folder and `datasources:query` in the scope of data sources the user can query. |
| `alert.rules:delete` | `folders:*`<br>`folders:uid:*` | Delete Grafana alert rules in a folder and its subfolders. Combine this permission with `folders:read` in a scope that includes the folder. |
| `alert.rules:read` | `folders:*`<br>`folders:uid:*` | Read Grafana alert rules in a folder and its subfolders. Combine this permission with `folders:read` in a scope that includes the folder. |
| `alert.rules:write` | `folders:*`<br>`folders:uid:*` | Update Grafana alert rules in a folder and its subfolders. Combine this permission with `folders:read` in a scope that includes the folder. To allow query modifications add `datasources:query` in the scope of data sources the user can query. |
| `alert.silences:create` | `folders:*`<br>`folders:uid:*` | Create rule-specific silences in a folder and its subfolders. |
| `alert.silences:read` | `folders:*`<br>`folders:uid:*` | Read all general silences and rule-specific silences in a folder and its subfolders. |
| `alert.silences:write` | `folders:*`<br>`folders:uid:*` | Update and expire rule-specific silences in a folder and its subfolders. |
| `alert.provisioning:read` | n/a | Read all Grafana alert rules, notification policies, etc via provisioning API. Permissions to folders and data source are not required. |
| `alert.provisioning.secrets:read` | n/a | Same as `alert.provisioning:read` plus ability to export resources with decrypted secrets. |
| `alert.provisioning:write` | n/a | Update all Grafana alert rules, notification policies, etc via provisioning API. Permissions to folders and data source are not required. |
| `alert.provisioning.provenance:write` | n/a | Set provisioning status for alerting resources. Cannot be used alone. Requires user to have permissions to access resources |
| `alert.notifications.receivers:read` | `receivers:*`<br>`receivers:uid:*` | Read contact points. |
| `alert.notifications.receivers.secrets:read` | `receivers:*`<br>`receivers:uid:*` | Export contact points with decrypted secrets. |
| `alert.notifications.receivers:create` | n/a | Create a new contact points. The creator is automatically granted full access to the created contact point. |
| `alert.notifications.receivers:write` | `receivers:*`<br>`receivers:uid:*` | Update existing contact points. |
| `alert.notifications.receivers:delete` | `receivers:*`<br>`receivers:uid:*` | Update and delete existing contact points. |
| `receivers.permissions:read` | `receivers:*`<br>`receivers:uid:*` | Read permissions for contact points. |
| `receivers.permissions:write` | `receivers:*`<br>`receivers:uid:*` | Manage permissions for contact points. |
| `alert.notifications.time-intervals:read` | n/a | Read mute time intervals. |
| `alert.notifications.time-intervals:write` | n/a | Create new or update existing mute time intervals. |
| `alert.notifications.time-intervals:delete` | n/a | Delete existing time intervals. |
| `alert.notifications.templates:read` | n/a | Read templates. |
| `alert.notifications.templates:write` | n/a | Create new or update existing templates. |
| `alert.notifications.templates:delete` | n/a | Delete existing templates. |
| `alert.notifications.templates.test:write` | n/a | Test templates with custom payloads (preview and payload editor functionality). |
| `alert.notifications.routes:read` | n/a | Read notification policies. |
| `alert.notifications.routes:write` | n/a | Create new, update and update notification policies. |
To help plan your RBAC rollout strategy, refer to [Plan your RBAC rollout strategy](https://grafana.com/docs/grafana/next/administration/roles-and-permissions/access-control/plan-rbac-rollout-strategy/).
@@ -16,7 +16,7 @@ title: Manage access using folders or data sources
weight: 200
---
# Manage access using folders or data sources
## Manage access using folders or data sources
You can extend the access provided by a role to alert rules and rule-specific silences by assigning permissions to individual folders or data sources.
@@ -55,7 +55,7 @@ Details of the fixed roles and the access they provide for Grafana Alerting are
| Full read-only access: `fixed:alerting:reader` | All permissions from `fixed:alerting.rules:reader` <br>`fixed:alerting.instances:reader`<br>`fixed:alerting.notifications:reader` | Read alert rules, alert instances, silences, contact points, and notification policies in Grafana and external providers. |
| Read via Provisioning API + Export Secrets: `fixed:alerting.provisioning.secrets:reader` | `alert.provisioning:read` and `alert.provisioning.secrets:read` | Read alert rules, alert instances, silences, contact points, and notification policies using the provisioning API and use export with decrypted secrets. |
| Access to alert rules provisioning API: `fixed:alerting.provisioning:writer` | `alert.provisioning:read` and `alert.provisioning:write` | Manage all alert rules, notification policies, contact points, templates, in the organization using the provisioning API. |
| Set provisioning status: `fixed:alerting.provisioning.provenance:writer` | `alert.provisioning.provenance:write` | Set provisioning rules for Alerting resources. Should be used together with other regular roles (Notifications Writer and/or Rules Writer.) |
| Set provisioning status: `fixed:alerting.provisioning.status:writer` | `alert.provisioning.provenance:write` | Set provisioning rules for Alerting resources. Should be used together with other regular roles (Notifications Writer and/or Rules Writer.) |
| Contact Point Reader: `fixed:alerting.receivers:reader` | `alert.notifications.receivers:read` for scope `receivers:*` | Read all contact points. |
| Contact Point Creator: `fixed:alerting.receivers:creator` | `alert.notifications.receivers:create` | Create a new contact point. The user is automatically granted full access to the created contact point. |
| Contact Point Writer: `fixed:alerting.receivers:writer` | `alert.notifications.receivers:read`, `alert.notifications.receivers:write`, `alert.notifications.receivers:delete` for scope `receivers:*` and <br> `alert.notifications.receivers:create` | Create a new contact point and manage all existing contact points. |
@@ -63,8 +63,8 @@ Details of the fixed roles and the access they provide for Grafana Alerting are
| Templates Writer: `fixed:alerting.templates:writer` | `alert.notifications.templates:read`, `alert.notifications.templates:write`, `alert.notifications.templates:delete`, `alert.notifications.templates.test:write` | Create new and manage existing notification templates. Test templates with custom payloads. |
| Time Intervals Reader: `fixed:alerting.time-intervals:reader` | `alert.notifications.time-intervals:read` | Read all time intervals. |
| Time Intervals Writer: `fixed:alerting.time-intervals:writer` | `alert.notifications.time-intervals:read`, `alert.notifications.time-intervals:write`, `alert.notifications.time-intervals:delete` | Create new and manage existing time intervals. |
| Notification Policies Reader: `fixed:alerting.routes:reader` | `alert.notifications.routes:read` | Read all notification policies. |
| Notification Policies Writer: `fixed:alerting.routes:writer` | `alert.notifications.routes:read`<br>`alert.notifications.routes:write` | Create new and manage existing notification policies. |
| Notification Policies Reader: `fixed:alerting.routes:reader` | `alert.notifications.routes:read` | Read all time intervals. |
| Notification Policies Writer: `fixed:alerting.routes:writer` | `alert.notifications.routes:read` `alert.notifications.routes:write` | Create new and manage existing time intervals. |
## Create custom roles
@@ -16,27 +16,25 @@ weight: 150
# Configure roles and permissions
This guide explains how to configure roles and permissions for Grafana Alerting for Grafana OSS users. You'll learn how to manage access using roles, folder permissions, and contact point permissions.
A user is any individual who can log in to Grafana. Each user is associated with a role that includes permissions. Permissions determine the tasks a user can perform in the system. For example, the Admin role includes permissions for an administrator to create and delete users.
For more information, refer to [Organization roles](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/#organization-roles).
## Manage access using roles
Grafana OSS has three roles: Admin, Editor, and Viewer.
For Grafana OSS, there are three roles: Admin, Editor, and Viewer.
The following table describes the access each role provides for Grafana Alerting.
Details of the roles and the access they provide for Grafana Alerting are below.
| Role | Access |
| ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Viewer | Read access to alert rules, notification resources (notification API, contact points, templates, time intervals, notification policies, and silences). |
| Editor | Write access to alert rules, notification resources (notification API, contact points, templates, time intervals, notification policies, and silences), and provisioning. |
| Admin | Write access to alert rules, notification resources (notification API, contact points, templates, time intervals, notification policies, and silences), and provisioning, as well as assign roles. |
| Role | Access |
| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Admin | Write access to alert rules, notification resources (notification API, contact points, templates, time intervals, notification policies, and silences), and provisioning. |
| Editor | Write access to alert rules, notification resources (notification API, contact points, templates, time intervals, notification policies, and silences), and provisioning. |
| Viewer | Read access to alert rules, notification resources (notification API, contact points, templates, time intervals, notification policies, and silences). |
## Assign roles
To assign roles, an admin needs to complete the following steps.
To assign roles, admins need to complete the following steps.
1. Navigate to **Administration** > **Users and access** > **Users, Teams, or Service Accounts**.
1. Search for the user, team or service account you want to add a role for.
@@ -60,30 +58,32 @@ Refer to the following table for details on the additional access provided by fo
You can't use folders to customize access to notification resources.
{{< /admonition >}}
To manage folder permissions, complete the following steps:
To manage folder permissions, complete the following steps.
1. In the left-side menu, click **Dashboards**.
1. Hover your mouse cursor over a folder and click **Go to folder**.
1. Click **Manage permissions** from the Folder actions menu.
1. Update or add permissions as required.
## Manage access to contact points
## Manage access using contact point permissions
Extend or limit the access provided by a role to contact points by assigning permissions to individual contact points.
### Before you begin
Extend or limit the access provided by a role to contact points by assigning permissions to individual contact point.
This allows different users, teams, or service accounts to have customized access to read or modify specific contact points.
Refer to the following table for details on the additional access provided by contact point permissions.
| Contact point permission | Additional Access |
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------- |
| View | View and export contact point as well as select it on the Alert rule edit page |
| Edit | Update or delete the contact point |
| Admin | Same additional access as Edit and manage permissions for the contact point. User should have additional permissions to read users and teams. |
| Folder permission | Additional Access |
| ----------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
| View | View and export contact point as well as select it on the Alert rule edit page |
| Edit | Update or delete the contact point |
| Admin | Same additional access as Edit and manage permissions for the contact point. User should have additional permissions to read users and teams. |
### Assign contact point permissions
### Steps
To manage contact point permissions, complete the following steps:
To contact point permissions, complete the following steps.
1. In the left-side menu, click **Contact points**.
1. Hover your mouse cursor over a contact point and click **More**.
@@ -24,7 +24,7 @@ Before you begin, you should have the following available:
- Administrator permissions in your Grafana instance; for more information on assigning Grafana RBAC roles, refer to [Assign RBAC roles](/docs/grafana-cloud/security-and-account-management/authentication-and-permissions/access-control/assign-rbac-roles/).
{{< admonition type="note" >}}
Save all of the following Terraform configuration files in the same directory.
All of the following Terraform configuration files should be saved in the same directory.
{{< /admonition >}}
## Configure the Grafana provider
@@ -1776,13 +1776,6 @@ Specify the frequency of polling for Alertmanager configuration changes. The def
The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), for example, 30s or 1m.
#### `alertmanager_max_template_output_bytes`
Maximum size in bytes that the expanded result of any single template expression (e.g. {{ .CommonAnnotations.description }}, {{ .ExternalURL }}, etc.) may reach during notification rendering.
The limit is checked after template execution for each templated field, but before the value is inserted into the final notification payload sent to the receiver.
If exceeded, the notification will contain output truncated up to the limit and a warning will be logged.
The default value is 10,485,760 bytes (10Mb).
#### `ha_redis_address`
Redis server address or addresses. It can be a single Redis address if using Redis standalone,
@@ -43,36 +43,24 @@ If the data source doesn't support loading the full range logs volume, the logs
The following sections provide detailed explanations on how to visualize and interact with individual logs in Explore.
### Infinite scroll
### Logs navigation
<!-- vale Grafana.GoogleWill = NO -->
Logs navigation, located at the right side of the log lines, can be used to easily request additional logs by clicking **Older logs** at the bottom of the navigation. This is especially useful when you reach the line limit and you want to see more logs. Each request run from the navigation displays in the navigation as separate page. Every page shows `from` and `to` timestamps of the incoming log lines. You can see previous results by clicking on each page. Explore caches the last five requests run from the logs navigation so you're not re-running the same queries when clicking on the pages, saving time and resources.
When you reach the bottom of the list of logs, you will see the message `Scroll to load more`. If you continue scrolling and the displayed logs are within the selected time interval, Grafana will load more logs. When the sort order is "newest first" you receive older logs, and when the sort order is "oldest first" you get newer logs.
<!-- vale Grafana.GoogleWill = YES -->
![Navigate logs in Explore](/static/img/docs/explore/navigate-logs-8-0.png)
### Visualization options
You have the option to customize the display of logs and choose which columns to show. Following is a list of available options.
<!-- vale Grafana.Spelling = NO -->
| Option | Description |
| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Expand / Collapse | Expand or collapse the controls toolbar. |
| Scroll to bottom | Jump to the bottom of the logs table. |
| Oldest Logs First / Newest logs first | Sort direction (ascending or descending). |
| Search logs / Close search | Click to open/close the client side string search of the displayed logs result. |
| Deduplication | **None** does not perform any deduplication, **Exact** matches are done on the whole line except for date fields. **Numbers** matches are done on the line after stripping out numbers such as durations, IP addresses, and so on. **Signature** is the most aggressive deduplication as it strips all letters and numbers and matches on the remaining whitespace and punctuation. |
| Filter levels | Filter logs in display by log level: All levels, Info, Debut, Warning, Error. |
| Set Timestamp format | Hide timestamps (disabled), Show milliseconds timestamps, Show nanoseconds timestamps. |
| Set line wrap | Disable line wrapping, Enable line wrapping, Enable line wrapping and prettify JSON. |
| Enable highlighting | Plain text, Highlight text. |
| Font size | Small font (default), Large font. |
| Unescaped newlines | Only displayed if the logs contain unescaped new lines. Click to unescape and display as new lines. |
| Download logs | Plain text (txt), JavaScript Object Notation (JSON), Comma-separated values (CSV) |
<!-- vale Grafana.Spelling = YES -->
| Option | Description |
| ------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **Time** | Shows or hides the time column. This is the timestamp associated with the log line as reported from the data source. |
| **Unique labels** | Shows or hides the unique labels column that includes only non-common labels. All common labels are displayed above. |
| **Wrap lines** | Set this to `true` if you want the display to use line wrapping. If set to `false`, it will result in horizontal scrolling. |
| **Prettify JSON** | Set this to `true` to pretty print all JSON logs. This setting does not affect logs in any format other than JSON. |
| **Deduplication** | Log data can be very repetitive. Explore hides duplicate log lines using a few different deduplication algorithms. **Exact** matches are done on the whole line except for date fields. **Numbers** matches are done on the line after stripping out numbers such as durations, IP addresses, and so on. **Signature** is the most aggressive deduplication as it strips all letters and numbers and matches on the remaining whitespace and punctuation. |
| **Display results order** | You can change the order of received logs from the default descending order (newest first) to ascending order (oldest first). |
### Download log lines
@@ -155,31 +143,16 @@ Click the **eye icon** to select a subset of fields to visualize in the logs lis
Each field has a **stats icon**, which displays ad-hoc statistics in relation to all displayed logs.
For data sources that support log types, such as Loki, instead of a single view containing all fields, fields will be displayed grouped by their type: Indexed Labels, Parsed fields, and Structured Metadata.
#### Links
Grafana provides data links or correlations, allowing you to convert any part of a log message into an internal or external link. These links enable you to navigate to related data or external resources, offering a seamless and convenient way to explore additional information.
{{< figure src="/static/img/docs/explore/data-link-9-4.png" max-width="800px" caption="Data link in Explore" >}}
#### Log details modes
There are two modes available to view log details:
- **Inline** The default, displays log details below the log line.
- **Sidebar** Displays log details in a sidebar view.
No matter which display mode you are currently viewing, you can change it by clicking the mode control icon.
### Log context
Log context is a feature that displays additional lines of context surrounding a log entry that matches a specific search query. This helps in understanding the context of the log entry and is similar to the `-C` parameter in the `grep` command.
If you're using Loki for your logs, to modify your log context queries, you can use the Loki log context query editor at the top of the table. You can activate this editor by clicking the menu for the log line, and selecting **Show context**. Within the **Log Context** view, you have the option to modify your search by removing one or more label filters from the log stream. If your original query used a parser, you can refine your search by leveraging extracted label filters.
Change the **Context time window** option to look for logs within a specific time interval around your log line.
Toggle **Wrap lines** if you encounter long lines of text that make it difficult to read and analyze the context around log entries. By enabling this toggle, Grafana automatically wraps long lines of text to fit within the visible width of the viewer, making the log entries easier to read and understand.
Click **Open in split view** to execute the context query for a log entry in a split screen in the Explore view. Clicking this button opens a new Explore pane with the context query displayed alongside the log entry, making it easier to analyze and understand the surrounding context.
@@ -31,7 +31,7 @@ refs:
_Logs_ are structured records of events or messages generated by a system or application&mdash;that is, a series of text records with status updates from your system or app. They generally include timestamps, messages, and context information like the severity of the logged event.
The logs visualization displays these records from data sources that support logs, such as Elastic, Influx, and Loki. The logs visualization shows, by default, the timestamp, a colored string representing the log status, the log line body, as well as collapsible log events that help you analyze the information generated.
The logs visualization displays these records from data sources that support logs, such as Elastic, Influx, and Loki. The logs visualization has colored indicators of log status, as well as collapsible log events that help you analyze the information generated.
{{< figure src="/media/docs/grafana/panels-visualizations/screenshot-logs-v12.3.png" max-width="750px" alt="Logs visualization" >}}
@@ -100,16 +100,16 @@ Use these settings to refine your visualization:
| Option | Description |
| --------------- | --------------- |
| Show timestamps | Show or hide the time column. This is the timestamp associated with the log line as reported from the data source. |
| Time | Show or hide the time column. This is the timestamp associated with the log line as reported from the data source. |
| Unique labels | Show or hide the unique labels column, which shows only non-common labels. |
| Common labels | Show or hide the common labels. |
| Wrap lines | Turn line wrapping on or off. |
| Prettify JSON | Toggle the switch on to pretty print all JSON logs. This setting does not affect logs in any format other than JSON. |
| Enable highlighting | Use a predefined syntax coloring grammar to highlight relevant parts of the log lines |
| Enable logs highlighting | Experimental. Use a predefined coloring scheme to highlight relevant parts of the log lines. Subtle colors are added to the log lines to improve readability and help with identifying important information faster. |
| Enable log details | Toggle the switch on to see an extendable area with log details including labels and detected fields. Each field or label has a stats icon to display ad-hoc statistics in relation to all displayed logs. The default setting is on. |
| Log Details panel mode | Choose to display the log details in a sidebar panel or inline, below the log line. |
| Enable infinite scrolling | Request more results by scrolling to the bottom of the logs list. |
| Show controls | Display controls to jump to the last or first log line, and filters by log level |
| Font size | Select between the default font size and small font size. |
| Log details panel mode | Choose to display the log details in a sidebar panel or inline, below the log line. The default mode depends on viewport size: the default mode for smaller viewports is inline, while for larger ones, it's sidebar. You can also change mode dynamically in the panel by clicking the mode control. |
| Enable infinite scrolling | Request more results by scrolling to the bottom of the logs list. When you reach the bottom of the list of logs, if you continue scrolling and the displayed logs are within the selected time interval, you can request to load more logs. When the sort order is **Newest first**, you receive older logs, and when the sort order is **Oldest first** you get newer logs. |
| Show controls | Display controls to jump to the last or first log line, and filter by log level. |
| Font size | Select between the **Default** font size and **Small** font sizes.|
| Deduplication | Hide log messages that are duplicates of others shown, according to your selected criteria. Choose from: <ul><li>**Exact** - Ignoring ISO datetimes.</li><li>**Numerical** - Ignoring only those that differ by numbers such as IPs or latencies.</li><li>**Signatures** - Removing successive lines with identical punctuation and white space.</li></ul> |
| Order | Set whether to show results **Newest first** or **Oldest first**. |
+1 -1
View File
@@ -87,7 +87,7 @@ require (
github.com/googleapis/gax-go/v2 v2.15.0 // @grafana/grafana-backend-group
github.com/gorilla/mux v1.8.1 // @grafana/grafana-backend-group
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // @grafana/grafana-app-platform-squad
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7 // @grafana/alerting-backend
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba // @grafana/alerting-backend
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // @grafana/identity-access-team
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 // @grafana/identity-access-team
github.com/grafana/dataplane/examples v0.0.1 // @grafana/observability-metrics
+2 -2
View File
@@ -1613,8 +1613,8 @@ github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7Fsg
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7 h1:ZzG/gCclEit9w0QUfQt9GURcOycAIGcsQAhY1u0AEX0=
github.com/grafana/alerting v0.0.0-20251212143239-491433b332b7/go.mod h1:l7v67cgP7x72ajB9UPZlumdrHqNztpKoqQ52cU8T3LU=
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba h1:psKWNETD5nGxmFAlqnWsXoRyUwSa2GHNEMSEDKGKfQ4=
github.com/grafana/alerting v0.0.0-20251204145817-de8c2bbf9eba/go.mod h1:l7v67cgP7x72ajB9UPZlumdrHqNztpKoqQ52cU8T3LU=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
-5
View File
@@ -1189,11 +1189,6 @@ export interface FeatureToggles {
*/
panelTimeSettings?: boolean;
/**
* Enables the raw DSL query editor in the Elasticsearch data source
* @default false
*/
elasticsearchRawDSLQuery?: boolean;
/**
* Enables app platform API for annotations
* @default false
*/
+3 -4
View File
@@ -273,7 +273,7 @@ export interface DataSourceWithSupplementaryQueriesSupport<TQuery extends DataQu
/**
* Returns supplementary query types that data source supports.
*/
getSupportedSupplementaryQueryTypes(dsRequest?: DataQueryRequest<DataQuery>): SupplementaryQueryType[];
getSupportedSupplementaryQueryTypes(): SupplementaryQueryType[];
/**
* Returns a supplementary query to be used to fetch supplementary data based on the provided type and original query.
* If the provided query is not suitable for the provided supplementary query type, undefined should be returned.
@@ -283,8 +283,7 @@ export interface DataSourceWithSupplementaryQueriesSupport<TQuery extends DataQu
export const hasSupplementaryQuerySupport = <TQuery extends DataQuery>(
datasource: DataSourceApi | (DataSourceApi & DataSourceWithSupplementaryQueriesSupport<TQuery>),
type: SupplementaryQueryType,
dsRequest?: DataQueryRequest<DataQuery>
type: SupplementaryQueryType
): datasource is DataSourceApi & DataSourceWithSupplementaryQueriesSupport<TQuery> => {
if (!datasource) {
return false;
@@ -294,7 +293,7 @@ export const hasSupplementaryQuerySupport = <TQuery extends DataQuery>(
('getDataProvider' in datasource || 'getSupplementaryRequest' in datasource) &&
'getSupplementaryQuery' in datasource &&
'getSupportedSupplementaryQueryTypes' in datasource &&
datasource.getSupportedSupplementaryQueryTypes(dsRequest).includes(type)
datasource.getSupportedSupplementaryQueryTypes().includes(type)
);
};
@@ -35,10 +35,6 @@ export interface TraceToMetricsData extends DataSourceJsonData {
interface Props extends DataSourcePluginOptionsEditorProps<TraceToMetricsData> {}
export function TraceToMetricsSettings({ options, onOptionsChange }: Props) {
const supportedDataSourceTypes = [
'prometheus',
'victoriametrics-metrics-datasource', // external
];
const styles = useStyles2(getStyles);
return (
@@ -51,10 +47,10 @@ export function TraceToMetricsSettings({ options, onOptionsChange }: Props) {
>
<DataSourcePicker
inputId="trace-to-metrics-data-source-picker"
pluginId="prometheus"
current={options.jsonData.tracesToMetrics?.datasourceUid}
noDefault={true}
width={40}
filter={(ds) => supportedDataSourceTypes.includes(ds.type)}
onChange={(ds: DataSourceInstanceSettings) =>
updateDatasourcePluginJsonDataOption({ onOptionsChange, options }, 'tracesToMetrics', {
...options.jsonData.tracesToMetrics,
@@ -387,10 +387,6 @@ export interface ElasticsearchDataQuery extends common.DataQuery {
* List of bucket aggregations
*/
bucketAggs?: Array<BucketAggregation>;
/**
* Editor type
*/
editorType?: string;
/**
* List of metric aggregations
*/
@@ -399,10 +395,6 @@ export interface ElasticsearchDataQuery extends common.DataQuery {
* Lucene query
*/
query?: string;
/**
* Raw DSL query
*/
rawDSLQuery?: string;
/**
* Name of time field
*/
@@ -1,6 +1,6 @@
import { Chance } from 'chance';
import { DashboardsTreeItem, DashboardViewItem, ManagerKind, UIDashboardViewItem } from '../types/browse-dashboards';
import { DashboardsTreeItem, DashboardViewItem, UIDashboardViewItem } from '../types/browse-dashboards';
function wellFormedEmptyFolder(
seed = 1,
@@ -64,14 +64,13 @@ function wellFormedFolder(
}
export function treeViewersCanEdit() {
const [, { folderA, folderC, folderD }] = wellFormedTree();
const [, { folderA, folderC }] = wellFormedTree();
return [
[folderA, folderC, folderD],
[folderA, folderC],
{
folderA,
folderC,
folderD,
},
] as const;
}
@@ -91,8 +90,6 @@ export function wellFormedTree() {
const folderB = wellFormedFolder(seed++);
const folderB_empty = wellFormedEmptyFolder(seed++);
const folderC = wellFormedFolder(seed++);
// folderD is marked as managed by repo (git-synced) for testing disabled folder behavior
const folderD = wellFormedFolder(seed++, {}, { managedBy: ManagerKind.Repo });
const dashbdD = wellFormedDashboard(seed++);
const dashbdE = wellFormedDashboard(seed++);
@@ -110,7 +107,6 @@ export function wellFormedTree() {
folderB,
folderB_empty,
folderC,
folderD,
dashbdD,
dashbdE,
],
@@ -127,7 +123,6 @@ export function wellFormedTree() {
folderB,
folderB_empty,
folderC,
folderD,
dashbdD,
dashbdE,
},
@@ -4,7 +4,6 @@ import { HttpResponse, http } from 'msw';
import { treeViewersCanEdit, wellFormedTree } from '../../../fixtures/folders';
const [mockTree, { folderB }] = wellFormedTree();
// folderD is included in mockTree and will be returned by the handlers with managedBy: 'repo'
const [mockTreeThatViewersCanEdit] = treeViewersCanEdit();
const collator = new Intl.Collator();
@@ -49,7 +48,6 @@ const listFoldersHandler = () =>
id: random.integer({ min: 1, max: 1000 }),
uid: folder.item.uid,
title: folder.item.kind === 'folder' ? folder.item.title : "invalid - this shouldn't happen",
...('managedBy' in folder.item && folder.item.managedBy ? { managedBy: folder.item.managedBy } : {}),
};
})
.sort((a, b) => collator.compare(a.title, b.title)) // API always sorts by title
@@ -78,7 +76,6 @@ const getFolderHandler = () =>
uid: folder?.item.uid,
...additionalProperties,
...(accessControlQueryParam ? { accessControl: mockAccessControl } : {}),
...('managedBy' in folder.item && folder.item.managedBy ? { managedBy: folder.item.managedBy } : {}),
});
});
@@ -5,7 +5,6 @@ import { wellFormedTree } from '../../../../fixtures/folders';
import { getErrorResponse } from '../../../helpers';
const [mockTree, { folderB }] = wellFormedTree();
// folderD is included in mockTree and will be returned by the handlers with managedBy: 'repo'
const baseResponse = {
kind: 'Folder',
@@ -25,7 +24,7 @@ const folderToAppPlatform = (folder: (typeof mockTree)[number]['item'], id?: num
// TODO: Generalise annotations in fixture data
'grafana.app/createdBy': 'user:1',
'grafana.app/updatedBy': 'user:2',
'grafana.app/managedBy': 'managedBy' in folder ? folder.managedBy : 'user',
'grafana.app/managedBy': 'user',
'grafana.app/updatedTimestamp': '2024-01-01T00:00:00Z',
'grafana.app/folder': folder.kind === 'folder' ? folder.parentUID : undefined,
},
@@ -3,7 +3,7 @@
// @grafana/schema?
// New package @grafana/core? @grafana/types?
export enum ManagerKind {
enum ManagerKind {
Repo = 'repo',
Terraform = 'terraform',
Kubectl = 'kubectl',
+6 -4
View File
@@ -112,15 +112,17 @@ func TestGetHomeDashboard(t *testing.T) {
}
func newTestLive(t *testing.T) *live.GrafanaLive {
features := featuremgmt.WithFeatures()
cfg := setting.NewCfg()
cfg.AppURL = "http://localhost:3000/"
gLive, err := live.ProvideService(cfg,
gLive, err := live.ProvideService(nil, cfg,
routing.NewRouteRegister(),
nil, nil, nil, nil,
nil,
&usagestats.UsageStatsMock{T: t},
featuremgmt.WithFeatures(),
&dashboards.FakeDashboardService{}, nil)
features, acimpl.ProvideAccessControl(features),
&dashboards.FakeDashboardService{},
nil, nil)
require.NoError(t, err)
return gLive
}
+3 -1
View File
@@ -294,7 +294,6 @@ func (hs *HTTPServer) SearchOrgUsersWithPaging(c *contextmodel.ReqContext) respo
}
func (hs *HTTPServer) searchOrgUsersHelper(c *contextmodel.ReqContext, query *org.SearchOrgUsersQuery) (*org.SearchOrgUsersQueryResult, error) {
query.ExcludeHiddenUsers = true
result, err := hs.orgService.SearchOrgUsers(c.Req.Context(), query)
if err != nil {
return nil, err
@@ -304,6 +303,9 @@ func (hs *HTTPServer) searchOrgUsersHelper(c *contextmodel.ReqContext, query *or
userIDs := map[string]bool{}
authLabelsUserIDs := make([]int64, 0, len(result.OrgUsers))
for _, user := range result.OrgUsers {
if dtos.IsHiddenUser(user.Login, c.SignedInUser, hs.Cfg) {
continue
}
user.AvatarURL = dtos.GetGravatarUrl(hs.Cfg, user.Email)
userIDs[fmt.Sprint(user.UserID)] = true
+1 -18
View File
@@ -171,16 +171,11 @@ func TestIntegrationOrgUsersAPIEndpoint_userLoggedIn(t *testing.T) {
orgService.ExpectedSearchOrgUsersResult = &org.SearchOrgUsersQueryResult{
OrgUsers: []*org.OrgUserDTO{
{Login: testUserLogin, Email: "testUser@grafana.com"},
{Login: "user1", Email: "user1@grafana.com"},
{Login: "user2", Email: "user2@grafana.com"},
},
}
orgService.SearchOrgUsersFn = func(ctx context.Context, query *org.SearchOrgUsersQuery) (*org.SearchOrgUsersQueryResult, error) {
require.True(t, query.ExcludeHiddenUsers)
return orgService.ExpectedSearchOrgUsersResult, nil
}
defer func() { orgService.SearchOrgUsersFn = nil }()
sc.handlerFunc = hs.GetOrgUsersForCurrentOrg
sc.fakeReqWithParams("GET", sc.url, map[string]string{}).exec()
@@ -196,18 +191,6 @@ func TestIntegrationOrgUsersAPIEndpoint_userLoggedIn(t *testing.T) {
loggedInUserScenarioWithRole(t, "When calling GET as an admin on", "GET", "api/org/users/lookup",
"api/org/users/lookup", org.RoleAdmin, func(sc *scenarioContext) {
orgService.ExpectedSearchOrgUsersResult = &org.SearchOrgUsersQueryResult{
OrgUsers: []*org.OrgUserDTO{
{Login: testUserLogin, Email: "testUser@grafana.com"},
{Login: "user2", Email: "user2@grafana.com"},
},
}
orgService.SearchOrgUsersFn = func(ctx context.Context, query *org.SearchOrgUsersQuery) (*org.SearchOrgUsersQueryResult, error) {
require.True(t, query.ExcludeHiddenUsers)
return orgService.ExpectedSearchOrgUsersResult, nil
}
defer func() { orgService.SearchOrgUsersFn = nil }()
sc.handlerFunc = hs.GetOrgUsersForCurrentOrgLookup
sc.fakeReqWithParams("GET", sc.url, map[string]string{}).exec()
+2 -9
View File
@@ -19,18 +19,11 @@ func (NoopBackend) Shutdown() {}
func (NoopBackend) String() string { return "" }
// NoopPolicyRuleProvider is a no-op implementation of PolicyRuleProvider
type NoopPolicyRuleProvider struct{}
func ProvideNoopPolicyRuleProvider() PolicyRuleProvider { return &NoopPolicyRuleProvider{} }
func (NoopPolicyRuleProvider) PolicyRuleProvider(PolicyRuleEvaluators) audit.PolicyRuleEvaluator {
return NoopPolicyRuleEvaluator{}
}
// NoopPolicyRuleEvaluator is a no-op implementation of audit.PolicyRuleEvaluator
type NoopPolicyRuleEvaluator struct{}
func ProvideNoopPolicyRuleEvaluator() audit.PolicyRuleEvaluator { return &NoopPolicyRuleEvaluator{} }
func (NoopPolicyRuleEvaluator) EvaluatePolicyRule(authorizer.Attributes) audit.RequestAuditConfig {
return audit.RequestAuditConfig{Level: auditinternal.LevelNone}
}
-59
View File
@@ -1,59 +0,0 @@
package auditing
import (
"slices"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"k8s.io/apimachinery/pkg/runtime/schema"
auditinternal "k8s.io/apiserver/pkg/apis/audit"
"k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
)
// PolicyRuleEvaluators is a map of API group+version to audit.PolicyRuleEvaluator
type PolicyRuleEvaluators = map[schema.GroupVersion]audit.PolicyRuleEvaluator
type PolicyRuleProvider interface {
PolicyRuleProvider(evaluators PolicyRuleEvaluators) audit.PolicyRuleEvaluator
}
// PolicyRuleEvaluator alias for easier imports.
type PolicyRuleEvaluator = audit.PolicyRuleEvaluator
// DefaultGrafanaPolicyRuleEvaluator provides a sane default configuration for audit logging for API group+versions.
type defaultGrafanaPolicyRuleEvaluator struct{}
var _ PolicyRuleEvaluator = &defaultGrafanaPolicyRuleEvaluator{}
func NewDefaultGrafanaPolicyRuleEvaluator() audit.PolicyRuleEvaluator {
return defaultGrafanaPolicyRuleEvaluator{}
}
func (defaultGrafanaPolicyRuleEvaluator) EvaluatePolicyRule(attrs authorizer.Attributes) audit.RequestAuditConfig {
// Skip non-resource and watch requests otherwise it is too noisy.
if !attrs.IsResourceRequest() || attrs.GetVerb() == utils.VerbWatch {
return audit.RequestAuditConfig{
Level: auditinternal.LevelNone,
}
}
// Skip auditing if the user is part of the privileged group.
// The loopback client uses this group, so requests initiated in `/api/` would be duplicated.
if u := attrs.GetUser(); u != nil && slices.Contains(u.GetGroups(), user.SystemPrivilegedGroup) {
return audit.RequestAuditConfig{
Level: auditinternal.LevelNone,
}
}
return audit.RequestAuditConfig{
Level: auditinternal.LevelMetadata,
OmitStages: []auditinternal.Stage{
// Only log on StageResponseComplete
auditinternal.StageRequestReceived,
auditinternal.StageResponseStarted,
auditinternal.StagePanic,
},
OmitManagedFields: false, // Setting it to true causes extra copying/unmarshalling.
}
}
-73
View File
@@ -1,73 +0,0 @@
package auditing_test
import (
"testing"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/apiserver/auditing"
"github.com/stretchr/testify/require"
auditinternal "k8s.io/apiserver/pkg/apis/audit"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
)
func TestDefaultGrafanaPolicyRuleEvaluator(t *testing.T) {
t.Parallel()
evaluator := auditing.NewDefaultGrafanaPolicyRuleEvaluator()
require.NotNil(t, evaluator)
t.Run("returns audit level none for non-resource requests", func(t *testing.T) {
t.Parallel()
attrs := authorizer.AttributesRecord{
ResourceRequest: false,
}
config := evaluator.EvaluatePolicyRule(attrs)
require.Equal(t, auditinternal.LevelNone, config.Level)
})
t.Run("returns audit level none for watch requests", func(t *testing.T) {
t.Parallel()
attrs := authorizer.AttributesRecord{
ResourceRequest: true,
Verb: utils.VerbWatch,
}
config := evaluator.EvaluatePolicyRule(attrs)
require.Equal(t, auditinternal.LevelNone, config.Level)
})
t.Run("returns audit level none for requests from privileged group", func(t *testing.T) {
t.Parallel()
attrs := authorizer.AttributesRecord{
ResourceRequest: true,
Verb: utils.VerbCreate,
User: &user.DefaultInfo{
Groups: []string{"test-group", user.SystemPrivilegedGroup},
},
}
config := evaluator.EvaluatePolicyRule(attrs)
require.Equal(t, auditinternal.LevelNone, config.Level)
})
t.Run("return audit level metadata for other resource requests", func(t *testing.T) {
t.Parallel()
attrs := authorizer.AttributesRecord{
ResourceRequest: true,
Verb: utils.VerbCreate,
User: &user.DefaultInfo{
Name: "test-user",
Groups: []string{"test-group"},
},
}
config := evaluator.EvaluatePolicyRule(attrs)
require.Equal(t, auditinternal.LevelMetadata, config.Level)
})
}
+1 -2
View File
@@ -222,7 +222,7 @@ func RegisterAPIService(
return builder
}
func NewAPIService(ac authlib.AccessClient, features featuremgmt.FeatureToggles, folderClientProvider client.K8sHandlerProvider, datasourceProvider schemaversion.DataSourceIndexProvider, libraryElementProvider schemaversion.LibraryElementIndexProvider, resourcePermissionsSvc *dynamic.NamespaceableResourceInterface, search *SearchHandler) *DashboardsAPIBuilder {
func NewAPIService(ac authlib.AccessClient, features featuremgmt.FeatureToggles, folderClientProvider client.K8sHandlerProvider, datasourceProvider schemaversion.DataSourceIndexProvider, libraryElementProvider schemaversion.LibraryElementIndexProvider, resourcePermissionsSvc *dynamic.NamespaceableResourceInterface) *DashboardsAPIBuilder {
migration.Initialize(datasourceProvider, libraryElementProvider, migration.DefaultCacheTTL)
return &DashboardsAPIBuilder{
minRefreshInterval: "10s",
@@ -231,7 +231,6 @@ func NewAPIService(ac authlib.AccessClient, features featuremgmt.FeatureToggles,
dashboardService: &dashsvc.DashboardServiceImpl{}, // for validation helpers only
folderClientProvider: folderClientProvider,
resourcePermissionsSvc: resourcePermissionsSvc,
search: search,
isStandalone: true,
}
}
+82 -115
View File
@@ -328,124 +328,91 @@ func (b *APIBuilder) GetAuthorizer() authorizer.Authorizer {
return authorizer.DecisionDeny, "failed to find requester", err
}
return b.authorizeResource(ctx, a, id)
// Different routes may need different permissions.
// * Reading and modifying a repository's configuration requires administrator privileges.
// * Reading a repository's limited configuration (/stats & /settings) requires viewer privileges.
// * Reading a repository's files requires viewer privileges.
// * Reading a repository's refs requires viewer privileges.
// * Editing a repository's files requires editor privileges.
// * Syncing a repository requires editor privileges.
// * Exporting a repository requires administrator privileges.
// * Migrating a repository requires administrator privileges.
// * Testing a repository configuration requires administrator privileges.
// * Viewing a repository's history requires editor privileges.
switch a.GetResource() {
case provisioning.RepositoryResourceInfo.GetName():
// TODO: Support more fine-grained permissions than the basic roles. Especially on Enterprise.
switch a.GetSubresource() {
case "", "test", "jobs":
// Doing something with the repository itself.
if id.GetOrgRole().Includes(identity.RoleAdmin) {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "admin role is required", nil
case "refs":
// This is strictly a read operation. It is handy on the frontend for viewers.
if id.GetOrgRole().Includes(identity.RoleViewer) {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "viewer role is required", nil
case "files":
// Access to files is controlled by the AccessClient
return authorizer.DecisionAllow, "", nil
case "resources", "sync", "history":
// These are strictly read operations.
// Sync can also be somewhat destructive, but it's expected to be fine to import changes.
if id.GetOrgRole().Includes(identity.RoleEditor) {
return authorizer.DecisionAllow, "", nil
} else {
return authorizer.DecisionDeny, "editor role is required", nil
}
case "status":
if id.GetOrgRole().Includes(identity.RoleViewer) && a.GetVerb() == apiutils.VerbGet {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "users cannot update the status of a repository", nil
default:
if id.GetIsGrafanaAdmin() {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "unmapped subresource defaults to no access", nil
}
case "stats":
// This can leak information one shouldn't necessarily have access to.
if id.GetOrgRole().Includes(identity.RoleAdmin) {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "admin role is required", nil
case "settings":
// This is strictly a read operation. It is handy on the frontend for viewers.
if id.GetOrgRole().Includes(identity.RoleViewer) {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "viewer role is required", nil
case provisioning.JobResourceInfo.GetName(),
provisioning.HistoricJobResourceInfo.GetName():
// Jobs are shown on the configuration page.
if id.GetOrgRole().Includes(identity.RoleAdmin) {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "admin role is required", nil
default:
// We haven't bothered with this kind yet.
if id.GetIsGrafanaAdmin() {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "unmapped kind defaults to no access", nil
}
})
}
// authorizeResource handles authorization for different resources.
// Different routes may need different permissions.
// * Reading and modifying a repository's configuration requires administrator privileges.
// * Reading a repository's limited configuration (/stats & /settings) requires viewer privileges.
// * Reading a repository's files requires viewer privileges.
// * Reading a repository's refs requires viewer privileges.
// * Editing a repository's files requires editor privileges.
// * Syncing a repository requires editor privileges.
// * Exporting a repository requires administrator privileges.
// * Migrating a repository requires administrator privileges.
// * Testing a repository configuration requires administrator privileges.
// * Viewing a repository's history requires editor privileges.
func (b *APIBuilder) authorizeResource(ctx context.Context, a authorizer.Attributes, id identity.Requester) (authorizer.Decision, string, error) {
switch a.GetResource() {
case provisioning.RepositoryResourceInfo.GetName():
return b.authorizeRepositorySubresource(a, id)
case "stats":
return b.authorizeStats(id)
case "settings":
return b.authorizeSettings(id)
case provisioning.JobResourceInfo.GetName(), provisioning.HistoricJobResourceInfo.GetName():
return b.authorizeJobs(id)
default:
return b.authorizeDefault(id)
}
}
// authorizeRepositorySubresource handles authorization for repository subresources.
func (b *APIBuilder) authorizeRepositorySubresource(a authorizer.Attributes, id identity.Requester) (authorizer.Decision, string, error) {
// TODO: Support more fine-grained permissions than the basic roles. Especially on Enterprise.
switch a.GetSubresource() {
case "", "test":
// Doing something with the repository itself.
if id.GetOrgRole().Includes(identity.RoleAdmin) {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "admin role is required", nil
case "jobs":
// Posting jobs requires editor privileges (for syncing).
if id.GetOrgRole().Includes(identity.RoleAdmin) || id.GetOrgRole().Includes(identity.RoleEditor) {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "editor role is required", nil
case "refs":
// This is strictly a read operation. It is handy on the frontend for viewers.
if id.GetOrgRole().Includes(identity.RoleViewer) {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "viewer role is required", nil
case "files":
// Access to files is controlled by the AccessClient
return authorizer.DecisionAllow, "", nil
case "resources", "sync", "history":
// These are strictly read operations.
// Sync can also be somewhat destructive, but it's expected to be fine to import changes.
if id.GetOrgRole().Includes(identity.RoleEditor) {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "editor role is required", nil
case "status":
if id.GetOrgRole().Includes(identity.RoleViewer) && a.GetVerb() == apiutils.VerbGet {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "users cannot update the status of a repository", nil
default:
if id.GetIsGrafanaAdmin() {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "unmapped subresource defaults to no access", nil
}
}
// authorizeStats handles authorization for stats resource.
func (b *APIBuilder) authorizeStats(id identity.Requester) (authorizer.Decision, string, error) {
// This can leak information one shouldn't necessarily have access to.
if id.GetOrgRole().Includes(identity.RoleAdmin) {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "admin role is required", nil
}
// authorizeSettings handles authorization for settings resource.
func (b *APIBuilder) authorizeSettings(id identity.Requester) (authorizer.Decision, string, error) {
// This is strictly a read operation. It is handy on the frontend for viewers.
if id.GetOrgRole().Includes(identity.RoleViewer) {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "viewer role is required", nil
}
// authorizeJobs handles authorization for job resources.
func (b *APIBuilder) authorizeJobs(id identity.Requester) (authorizer.Decision, string, error) {
// Jobs are shown on the configuration page.
if id.GetOrgRole().Includes(identity.RoleAdmin) {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "admin role is required", nil
}
// authorizeDefault handles authorization for unmapped resources.
func (b *APIBuilder) authorizeDefault(id identity.Requester) (authorizer.Decision, string, error) {
// We haven't bothered with this kind yet.
if id.GetIsGrafanaAdmin() {
return authorizer.DecisionAllow, "", nil
}
return authorizer.DecisionDeny, "unmapped kind defaults to no access", nil
}
func (b *APIBuilder) GetGroupVersion() schema.GroupVersion {
return provisioning.SchemeGroupVersion
}
+1 -1
View File
@@ -37,7 +37,7 @@ var WireSetExts = wire.NewSet(
// Auditing Options
auditing.ProvideNoopBackend,
auditing.ProvideNoopPolicyRuleProvider,
auditing.ProvideNoopPolicyRuleEvaluator,
)
var provisioningExtras = wire.NewSet(
-1
View File
@@ -349,7 +349,6 @@ var wireBasicSet = wire.NewSet(
dashboardservice.ProvideDashboardService,
dashboardservice.ProvideDashboardProvisioningService,
dashboardservice.ProvideDashboardPluginService,
dashboardservice.ProvideDashboardAccessService,
dashboardstore.ProvideDashboardStore,
folderimpl.ProvideService,
wire.Bind(new(folder.Service), new(*folderimpl.Service)),
+7 -9
View File
File diff suppressed because one or more lines are too long
-8
View File
@@ -9,7 +9,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/registry/generic"
genericapiserver "k8s.io/apiserver/pkg/server"
@@ -60,13 +59,6 @@ type APIGroupAuthorizer interface {
GetAuthorizer() authorizer.Authorizer
}
// APIGroupAuditor allows different API groups to opt-in and provide their own auditing policy evaluator function.
// Auditing is only enabled if this is implemented. If no customization is needed, you can use the default evaluator,
// `pkg/apiserver/auditing.NewDefaultGrafanaPolicyRuleEvaluator()`.
type APIGroupAuditor interface {
GetPolicyRuleEvaluator() audit.PolicyRuleEvaluator
}
type APIGroupMutation interface {
// Mutate allows the builder to make changes to the object before it is persisted.
// Context is used only for timeout/deadline/cancellation and tracing information.
-27
View File
@@ -29,7 +29,6 @@ import (
"k8s.io/klog/v2"
"k8s.io/kube-openapi/pkg/common"
"github.com/grafana/grafana/pkg/apiserver/auditing"
"github.com/grafana/grafana/pkg/apiserver/endpoints/filters"
grafanarest "github.com/grafana/grafana/pkg/apiserver/rest"
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
@@ -498,32 +497,6 @@ func AddPostStartHooks(
return nil
}
func EvaluatorPolicyRuleFromBuilders(builders []APIGroupBuilder) auditing.PolicyRuleEvaluators {
policyRuleEvaluators := make(auditing.PolicyRuleEvaluators, 0)
for _, b := range builders {
auditor, ok := b.(APIGroupAuditor)
if !ok {
continue
}
policyRuleEvaluator := auditor.GetPolicyRuleEvaluator()
if policyRuleEvaluator == nil {
continue
}
for _, gv := range GetGroupVersions(b) {
if gv.Empty() {
continue
}
policyRuleEvaluators[gv] = policyRuleEvaluator
}
}
return policyRuleEvaluators
}
func allowRegisteringResourceByInfo(allowedResources []string, name string) bool {
// trim any subresources from the name
name = strings.Split(name, "/")[0]
+5 -6
View File
@@ -28,7 +28,6 @@ import (
dataplaneaggregator "github.com/grafana/grafana/pkg/aggregator/apiserver"
"github.com/grafana/grafana/pkg/api/routing"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/apiserver/auditing"
grafanaresponsewriter "github.com/grafana/grafana/pkg/apiserver/endpoints/responsewriter"
grafanarest "github.com/grafana/grafana/pkg/apiserver/rest"
"github.com/grafana/grafana/pkg/infra/db"
@@ -116,8 +115,8 @@ type service struct {
builderMetrics *builder.BuilderMetrics
dualWriterMetrics *grafanarest.DualWriterMetrics
auditBackend audit.Backend
auditPolicyRuleProvider auditing.PolicyRuleProvider
auditBackend audit.Backend
auditPolicyRuleEvaluator audit.PolicyRuleEvaluator
}
func ProvideService(
@@ -143,7 +142,7 @@ func ProvideService(
appInstallers []appsdkapiserver.AppInstaller,
builderMetrics *builder.BuilderMetrics,
auditBackend audit.Backend,
auditPolicyRuleProvider auditing.PolicyRuleProvider,
auditPolicyRuleEvaluator audit.PolicyRuleEvaluator,
) (*service, error) {
scheme := builder.ProvideScheme()
codecs := builder.ProvideCodecFactory(scheme)
@@ -175,7 +174,7 @@ func ProvideService(
builderMetrics: builderMetrics,
dualWriterMetrics: grafanarest.NewDualWriterMetrics(reg),
auditBackend: auditBackend,
auditPolicyRuleProvider: auditPolicyRuleProvider,
auditPolicyRuleEvaluator: auditPolicyRuleEvaluator,
}
// This will be used when running as a dskit service
s.NamedService = services.NewBasicService(s.start, s.running, nil).WithName(modules.GrafanaAPIServer)
@@ -366,7 +365,7 @@ func (s *service) start(ctx context.Context) error {
// Auditing Options
serverConfig.AuditBackend = s.auditBackend
serverConfig.AuditPolicyRuleEvaluator = s.auditPolicyRuleProvider.PolicyRuleProvider(builder.EvaluatorPolicyRuleFromBuilders(s.builders))
serverConfig.AuditPolicyRuleEvaluator = s.auditPolicyRuleEvaluator
// Add OpenAPI specs for each group+version (existing builders)
err = builder.SetupConfig(
@@ -58,13 +58,6 @@ const (
RelationGetPermissions string = "get_permissions"
RelationSetPermissions string = "set_permissions"
RelationCanGet string = "can_get"
RelationCanCreate string = "can_create"
RelationCanUpdate string = "can_update"
RelationCanDelete string = "can_delete"
RelationCanGetPermissions string = "can_get_permissions"
RelationCanSetPermissions string = "can_set_permissions"
RelationSubresourceSetView string = "resource_" + RelationSetView
RelationSubresourceSetEdit string = "resource_" + RelationSetEdit
RelationSubresourceSetAdmin string = "resource_" + RelationSetAdmin
@@ -141,26 +134,6 @@ var RelationToVerbMapping = map[string]string{
RelationSetPermissions: utils.VerbSetPermissions,
}
// FolderPermissionRelation returns the optimized folder relation for permission management.
func FolderPermissionRelation(relation string) string {
switch relation {
case RelationGet:
return RelationCanGet
case RelationCreate:
return RelationCanCreate
case RelationUpdate:
return RelationCanUpdate
case RelationDelete:
return RelationCanDelete
case RelationGetPermissions:
return RelationCanGetPermissions
case RelationSetPermissions:
return RelationCanSetPermissions
default:
return relation
}
}
func IsGroupResourceRelation(relation string) bool {
return isValidRelation(relation, RelationsGroupResource)
}
@@ -4,21 +4,15 @@ type folder
relations
define parent: [folder]
# Permission levels
# Action sets
define view: [user, service-account, team#member, role#assignee] or edit or view from parent
define edit: [user, service-account, team#member, role#assignee] or admin or edit from parent
define admin: [user, service-account, team#member, role#assignee] or admin from parent
define edit: [user, service-account, team#member, role#assignee] or edit from parent
define view: [user, service-account, team#member, role#assignee] or view from parent
define get: [user, service-account, team#member, role#assignee] or get from parent
define create: [user, service-account, team#member, role#assignee] or create from parent
define update: [user, service-account, team#member, role#assignee] or update from parent
define delete: [user, service-account, team#member, role#assignee] or delete from parent
define get_permissions: [user, service-account, team#member, role#assignee] or get_permissions from parent
define set_permissions: [user, service-account, team#member, role#assignee] or set_permissions from parent
# Computed actions
define can_get: admin or edit or view or get
define can_create: admin or edit or create
define can_update: admin or edit or update
define can_delete: admin or edit or delete
define can_get_permissions: admin or get_permissions
define can_set_permissions: admin or set_permissions
define get: [user, service-account, team#member, role#assignee] or view or get from parent
define create: [user, service-account, team#member, role#assignee] or edit or create from parent
define update: [user, service-account, team#member, role#assignee] or edit or update from parent
define delete: [user, service-account, team#member, role#assignee] or edit or delete from parent
define get_permissions: [user, service-account, team#member, role#assignee] or admin or get_permissions from parent
define set_permissions: [user, service-account, team#member, role#assignee] or admin or set_permissions from parent
@@ -1,947 +0,0 @@
package server
import (
"context"
"fmt"
"math/rand"
"testing"
"time"
authzv1 "github.com/grafana/authlib/authz/proto/v1"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
"github.com/grafana/grafana/pkg/services/authz/zanzana/store"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/setting"
)
const (
benchNamespace = "default"
// Folder tree parameters
foldersPerLevel = 3
folderDepth = 7
// Other data generation parameters
numResources = 50000
numUsers = 1000
numTeams = 100
// Timeout for List operations
listTimeout = 30 * time.Second
// Resource type constants for benchmarks
benchDashboardGroup = "dashboard.grafana.app"
benchDashboardResource = "dashboards"
benchFolderGroup = "folder.grafana.app"
benchFolderResource = "folders"
// BenchmarkBatchCheck measures the performance of BatchCheck requests with 50 items per batch.
batchCheckSize = 50
)
// benchmarkData holds all the generated test data for benchmarks
type benchmarkData struct {
folders []string // folder UIDs
folderDepths map[string]int // folder UID -> depth level
folderParents map[string]string // folder UID -> parent UID
folderDescendants map[string]int // folder UID -> number of descendants (including self)
foldersByDepth [][]string // folders grouped by depth level
resources []string // resource names
resourceFolders map[string]string // resource name -> folder UID
users []string // user identifiers (e.g., "user:1")
teams []string // team identifiers (e.g., "team:1")
// Pre-computed test scenarios
deepestFolder string // folder at max depth for worst-case tests
midDepthFolder string // folder at depth/2
shallowFolder string // folder at depth 1
rootFolder string // root level folder (depth 0)
largestRootFolder string // root folder with most descendants
largestRootDescCount int // number of descendants in largestRootFolder
maxDepth int // maximum depth in the tree
}
// generateFolderHierarchy creates a balanced tree of folders.
// Each folder has `childrenPerFolder` children, up to `depth` levels deep.
func generateFolderHierarchy(childrenPerFolder, depth int) ([]*openfgav1.TupleKey, *benchmarkData) {
// Calculate total folders: childrenPerFolder + childrenPerFolder^2 + ... + childrenPerFolder^(depth+1)
totalFolders := 0
levelSize := childrenPerFolder
for d := 0; d <= depth; d++ {
totalFolders += levelSize
levelSize *= childrenPerFolder
}
data := &benchmarkData{
folders: make([]string, 0, totalFolders),
folderDepths: make(map[string]int),
folderParents: make(map[string]string),
folderDescendants: make(map[string]int),
}
tuples := make([]*openfgav1.TupleKey, 0, totalFolders)
folderIdx := 0
// Track folders at each level for parent assignment
levelFolders := make([][]string, depth+1)
for i := range levelFolders {
levelFolders[i] = make([]string, 0)
}
// Create root level folders (depth 0)
for i := 0; i < childrenPerFolder; i++ {
folderUID := fmt.Sprintf("folder-%d", folderIdx)
data.folders = append(data.folders, folderUID)
data.folderDepths[folderUID] = 0
levelFolders[0] = append(levelFolders[0], folderUID)
folderIdx++
}
// Create folders at each subsequent depth level
for d := 1; d <= depth; d++ {
parentFolders := levelFolders[d-1]
// Each parent gets exactly childrenPerFolder children
for _, parentUID := range parentFolders {
for j := 0; j < childrenPerFolder; j++ {
folderUID := fmt.Sprintf("folder-%d", folderIdx)
data.folders = append(data.folders, folderUID)
data.folderDepths[folderUID] = d
data.folderParents[folderUID] = parentUID
levelFolders[d] = append(levelFolders[d], folderUID)
// Create parent relationship tuple
tuples = append(tuples, common.NewFolderParentTuple(folderUID, parentUID))
folderIdx++
}
}
}
// Set reference folders for different depth scenarios
data.rootFolder = levelFolders[0][0]
data.shallowFolder = levelFolders[0][0]
if len(levelFolders[1]) > 0 {
data.shallowFolder = levelFolders[1][0]
}
midDepth := depth / 2
if len(levelFolders[midDepth]) > 0 {
data.midDepthFolder = levelFolders[midDepth][0]
}
// Deepest folder
if len(levelFolders[depth]) > 0 {
data.deepestFolder = levelFolders[depth][0]
}
// Calculate descendant counts for each folder (bottom-up)
// Initialize all folders with count of 1 (self)
for _, folder := range data.folders {
data.folderDescendants[folder] = 1
}
// Process folders from deepest to shallowest, accumulating descendant counts
for d := depth; d >= 0; d-- {
for _, folder := range levelFolders[d] {
if parent, hasParent := data.folderParents[folder]; hasParent {
data.folderDescendants[parent] += data.folderDescendants[folder]
}
}
}
// Find root folder with most descendants
for _, rootFolder := range levelFolders[0] {
count := data.folderDescendants[rootFolder]
if count > data.largestRootDescCount {
data.largestRootDescCount = count
data.largestRootFolder = rootFolder
}
}
// Store folders by depth for depth-based testing
data.foldersByDepth = levelFolders
data.maxDepth = depth
return tuples, data
}
// generateResources creates resources distributed across folders
func generateResources(data *benchmarkData, numResources int) []*openfgav1.TupleKey {
data.resources = make([]string, numResources)
data.resourceFolders = make(map[string]string, numResources)
// Distribute resources across folders
for i := 0; i < numResources; i++ {
resourceName := fmt.Sprintf("resource-%d", i)
folderIdx := i % len(data.folders)
folderUID := data.folders[folderIdx]
data.resources[i] = resourceName
data.resourceFolders[resourceName] = folderUID
}
// Note: We don't create tuples for resources themselves,
// permissions are assigned to users/teams on folders or directly on resources
return nil
}
// generateUsers creates user identifiers
func generateUsers(data *benchmarkData, numUsers int) {
data.users = make([]string, numUsers)
for i := 0; i < numUsers; i++ {
data.users[i] = fmt.Sprintf("user:%d", i)
}
}
// generateTeams creates team identifiers
func generateTeams(data *benchmarkData, numTeams int) {
data.teams = make([]string, numTeams)
for i := 0; i < numTeams; i++ {
data.teams[i] = fmt.Sprintf("team:%d", i)
}
}
// generatePermissionTuples creates various permission assignments for benchmarking.
// Users are distributed across 7 patterns: global, root folder, mid-depth folder,
// folder-scoped resource, direct resource, team-based, and no permissions.
const numPermissionPatterns = 7
func generatePermissionTuples(data *benchmarkData) []*openfgav1.TupleKey {
tuples := make([]*openfgav1.TupleKey, 0)
// Distribute users across different permission patterns
usersPerPattern := len(data.users) / numPermissionPatterns
// Pattern 1: Users with GroupResource permission (all access)
// Users 0 to usersPerPattern-1
for i := 0; i < usersPerPattern; i++ {
tuples = append(tuples, common.NewGroupResourceTuple(
data.users[i],
common.RelationGet,
benchDashboardGroup,
benchDashboardResource,
"",
))
}
// Pattern 2: Users with folder-level permission on root folders
// Users usersPerPattern to 2*usersPerPattern-1
for i := usersPerPattern; i < 2*usersPerPattern; i++ {
folderIdx := (i - usersPerPattern) % len(data.folders)
// Only assign to root-level folders for this pattern
for j := folderIdx; j < len(data.folders); j++ {
if data.folderDepths[data.folders[j]] == 0 {
tuples = append(tuples, common.NewFolderTuple(
data.users[i],
common.RelationSetView,
data.folders[j],
))
break
}
}
}
// Pattern 3: Users with folder-level permission on mid-depth folders
// Use relative depth range: 1/3 to 2/3 of max depth
// Use "view" relation which grants get through the optimized schema
minMidDepth := data.maxDepth / 3
maxMidDepth := 2 * data.maxDepth / 3
if maxMidDepth < minMidDepth {
maxMidDepth = minMidDepth
}
// Collect folders in the mid-depth range
var midDepthFolders []string
for d := minMidDepth; d <= maxMidDepth; d++ {
if d < len(data.foldersByDepth) {
midDepthFolders = append(midDepthFolders, data.foldersByDepth[d]...)
}
}
// Fall back to root folders if no mid-depth folders exist
if len(midDepthFolders) == 0 {
midDepthFolders = data.foldersByDepth[0]
}
for i := 2 * usersPerPattern; i < 3*usersPerPattern; i++ {
folderIdx := (i - 2*usersPerPattern) % len(midDepthFolders)
tuples = append(tuples, common.NewFolderTuple(
data.users[i],
common.RelationSetView,
midDepthFolders[folderIdx],
))
}
// Pattern 4: Users with folder-scoped resource permission
for i := 3 * usersPerPattern; i < 4*usersPerPattern; i++ {
folderIdx := (i - 3*usersPerPattern) % len(data.folders)
tuples = append(tuples, common.NewFolderResourceTuple(
data.users[i],
common.RelationGet,
benchDashboardGroup,
benchDashboardResource,
"",
data.folders[folderIdx],
))
}
// Pattern 5: Users with direct resource permission
for i := 4 * usersPerPattern; i < 5*usersPerPattern; i++ {
resourceIdx := (i - 4*usersPerPattern) % len(data.resources)
tuples = append(tuples, common.NewResourceTuple(
data.users[i],
common.RelationGet,
benchDashboardGroup,
benchDashboardResource,
"",
data.resources[resourceIdx],
))
}
// Pattern 6: Team memberships and team permissions
// First, add users to teams
for i := 5 * usersPerPattern; i < 6*usersPerPattern && i < len(data.users); i++ {
teamIdx := (i - 5*usersPerPattern) % len(data.teams)
tuples = append(tuples, common.NewTypedTuple(
common.TypeTeam,
data.users[i],
common.RelationTeamMember,
fmt.Sprintf("%d", teamIdx),
))
}
// Then, give teams folder permissions
// Use "view" relation which grants get through the optimized schema
for i := 0; i < len(data.teams); i++ {
folderIdx := i % len(data.folders)
teamMember := fmt.Sprintf("team:%d#member", i)
tuples = append(tuples, common.NewFolderTuple(
teamMember,
common.RelationSetView,
data.folders[folderIdx],
))
}
// Pattern 7: Users with no permissions (remaining users)
// These users don't get any tuples - they're for testing denial cases
return tuples
}
// setupBenchmarkServer creates a server with the benchmark data loaded
func setupBenchmarkServer(b *testing.B) (*Server, *benchmarkData) {
b.Helper()
if testing.Short() {
b.Skip("skipping benchmark in short mode")
}
cfg := setting.NewCfg()
testStore := sqlstore.NewTestStore(b, sqlstore.WithCfg(cfg))
openFGAStore, err := store.NewEmbeddedStore(cfg, testStore, log.NewNopLogger())
require.NoError(b, err)
openfga, err := NewOpenFGAServer(cfg.ZanzanaServer, openFGAStore)
require.NoError(b, err)
srv, err := NewServer(cfg.ZanzanaServer, openfga, log.NewNopLogger(), tracing.NewNoopTracerService(), prometheus.NewRegistry())
require.NoError(b, err)
// Generate test data
b.Log("Generating folder hierarchy...")
folderTuples, data := generateFolderHierarchy(foldersPerLevel, folderDepth)
b.Log("Generating resources...")
generateResources(data, numResources)
b.Log("Generating users...")
generateUsers(data, numUsers)
b.Log("Generating teams...")
generateTeams(data, numTeams)
b.Log("Generating permission tuples...")
permTuples := generatePermissionTuples(data)
// Add special user with permission on largest root folder (for >1000 folder test)
// Use "view" relation which grants get through the optimized schema
largeRootUserTuple := common.NewFolderTuple(
"user:large-root-access",
common.RelationSetView,
data.largestRootFolder,
)
permTuples = append(permTuples, largeRootUserTuple)
// Add users with permissions at each depth level for depth-based testing
// Use "view" relation which grants get through the optimized schema
for depth := 0; depth <= data.maxDepth; depth++ {
if len(data.foldersByDepth[depth]) == 0 {
continue
}
folder := data.foldersByDepth[depth][0]
user := fmt.Sprintf("user:depth-%d-access", depth)
permTuples = append(permTuples, common.NewFolderTuple(user, common.RelationSetView, folder))
}
// Combine all tuples
allTuples := append(folderTuples, permTuples...)
b.Logf("Total tuples to write: %d", len(allTuples))
// Get store info
ctx := newContextWithNamespace()
storeInf, err := srv.getStoreInfo(ctx, benchNamespace)
require.NoError(b, err)
// Write tuples in batches (OpenFGA limits to 100 per write)
batchSize := 100
for i := 0; i < len(allTuples); i += batchSize {
end := i + batchSize
if end > len(allTuples) {
end = len(allTuples)
}
batch := allTuples[i:end]
_, err = srv.openfga.Write(ctx, &openfgav1.WriteRequest{
StoreId: storeInf.ID,
AuthorizationModelId: storeInf.ModelID,
Writes: &openfgav1.WriteRequestWrites{
TupleKeys: batch,
OnDuplicate: "ignore",
},
})
require.NoError(b, err)
if (i/batchSize)%100 == 0 {
b.Logf("Written %d/%d tuples", end, len(allTuples))
}
}
b.Logf("Benchmark data setup complete: %d folders, %d resources, %d users, %d teams",
len(data.folders), len(data.resources), len(data.users), len(data.teams))
b.Logf("Largest root folder: %s with %d descendants", data.largestRootFolder, data.largestRootDescCount)
return srv, data
}
// BenchmarkCheck measures the performance of Check requests
func BenchmarkCheck(b *testing.B) {
srv, data := setupBenchmarkServer(b)
ctx := newContextWithNamespace()
// Helper to create check requests
newCheckReq := func(subject, verb, group, resource, folder, name string) *authzv1.CheckRequest {
return &authzv1.CheckRequest{
Namespace: benchNamespace,
Subject: subject,
Verb: verb,
Group: group,
Resource: resource,
Folder: folder,
Name: name,
}
}
usersPerPattern := len(data.users) / 7
b.Run("GroupResourceDirect", func(b *testing.B) {
// User with group_resource permission - should have access to everything
user := data.users[0] // First user has GroupResource permission
resource := data.resources[rand.Intn(len(data.resources))]
folder := data.resourceFolders[resource]
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.Check(ctx, newCheckReq(user, utils.VerbGet, benchDashboardGroup, benchDashboardResource, folder, resource))
if err != nil {
b.Fatal(err)
}
if !res.GetAllowed() {
b.Fatal("expected access to be allowed")
}
}
})
// Test folder inheritance at each depth level (0 to maxDepth)
// User has permission on ROOT folder (depth 0), we check access at each deeper level
rootUser := "user:depth-0-access" // has view permission on root folder
for depth := 0; depth <= data.maxDepth; depth++ {
depth := depth // capture for closure
if len(data.foldersByDepth[depth]) == 0 {
continue
}
b.Run(fmt.Sprintf("FolderInheritance/Depth%d", depth), func(b *testing.B) {
resource := data.resources[0]
folder := data.foldersByDepth[depth][0]
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.Check(ctx, newCheckReq(rootUser, utils.VerbGet, benchDashboardGroup, benchDashboardResource, folder, resource))
if err != nil {
b.Fatal(err)
}
_ = res.GetAllowed()
}
})
}
b.Run("FolderResourceScoped", func(b *testing.B) {
// User with folder-scoped resource permission
user := data.users[3*usersPerPattern]
folderIdx := 0
folder := data.folders[folderIdx]
resource := data.resources[folderIdx]
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.Check(ctx, newCheckReq(user, utils.VerbGet, benchDashboardGroup, benchDashboardResource, folder, resource))
if err != nil {
b.Fatal(err)
}
_ = res.GetAllowed()
}
})
b.Run("DirectResource", func(b *testing.B) {
// User with direct resource permission
user := data.users[4*usersPerPattern]
resourceIdx := 0
resource := data.resources[resourceIdx]
folder := data.resourceFolders[resource]
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.Check(ctx, newCheckReq(user, utils.VerbGet, benchDashboardGroup, benchDashboardResource, folder, resource))
if err != nil {
b.Fatal(err)
}
_ = res.GetAllowed()
}
})
b.Run("TeamMembership", func(b *testing.B) {
// User who is a team member, team has folder permission
user := data.users[5*usersPerPattern]
teamIdx := 0
folderIdx := teamIdx % len(data.folders)
folder := data.folders[folderIdx]
resource := data.resources[folderIdx%len(data.resources)]
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.Check(ctx, newCheckReq(user, utils.VerbGet, benchDashboardGroup, benchDashboardResource, folder, resource))
if err != nil {
b.Fatal(err)
}
_ = res.GetAllowed()
}
})
b.Run("NoAccess", func(b *testing.B) {
// User with no permissions - tests denial path
user := data.users[len(data.users)-1] // Last user has no permissions
resource := data.resources[0]
folder := data.resourceFolders[resource]
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.Check(ctx, newCheckReq(user, utils.VerbGet, benchDashboardGroup, benchDashboardResource, folder, resource))
if err != nil {
b.Fatal(err)
}
if res.GetAllowed() {
b.Fatal("expected access to be denied")
}
}
})
b.Run("FolderCheck", func(b *testing.B) {
// Direct folder access check
user := data.users[usersPerPattern]
folder := data.rootFolder
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.Check(ctx, newCheckReq(user, utils.VerbGet, benchFolderGroup, benchFolderResource, "", folder))
if err != nil {
b.Fatal(err)
}
_ = res.GetAllowed()
}
})
}
func BenchmarkBatchCheck(b *testing.B) {
srv, data := setupBenchmarkServer(b)
ctx := newContextWithNamespace()
// Helper to create batch check requests
newBatchCheckReq := func(subject string, items []*authzextv1.BatchCheckItem) *authzextv1.BatchCheckRequest {
return &authzextv1.BatchCheckRequest{
Namespace: benchNamespace,
Subject: subject,
Items: items,
}
}
// Helper to create batch items for resources in folders
createBatchItems := func(resources []string, resourceFolders map[string]string) []*authzextv1.BatchCheckItem {
items := make([]*authzextv1.BatchCheckItem, 0, batchCheckSize)
for i := 0; i < batchCheckSize && i < len(resources); i++ {
resource := resources[i]
items = append(items, &authzextv1.BatchCheckItem{
Verb: utils.VerbGet,
Group: benchDashboardGroup,
Resource: benchDashboardResource,
Name: resource,
Folder: resourceFolders[resource],
})
}
return items
}
// Helper to create batch items for folders at a specific depth
createFolderBatchItems := func(folders []string, depth int, folderDepths map[string]int) []*authzextv1.BatchCheckItem {
items := make([]*authzextv1.BatchCheckItem, 0, batchCheckSize)
for _, folder := range folders {
if folderDepths[folder] == depth && len(items) < batchCheckSize {
items = append(items, &authzextv1.BatchCheckItem{
Verb: utils.VerbGet,
Group: benchDashboardGroup,
Resource: benchDashboardResource,
Name: fmt.Sprintf("resource-in-%s", folder),
Folder: folder,
})
}
}
// Fill remaining slots if needed
for len(items) < batchCheckSize && len(folders) > 0 {
folder := folders[len(items)%len(folders)]
items = append(items, &authzextv1.BatchCheckItem{
Verb: utils.VerbGet,
Group: benchDashboardGroup,
Resource: benchDashboardResource,
Name: fmt.Sprintf("resource-%d", len(items)),
Folder: folder,
})
}
return items
}
usersPerPattern := len(data.users) / numPermissionPatterns
b.Run("GroupResourceDirect", func(b *testing.B) {
// User with group_resource permission - should have access to everything
user := data.users[0]
items := createBatchItems(data.resources, data.resourceFolders)
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.BatchCheck(ctx, newBatchCheckReq(user, items))
if err != nil {
b.Fatal(err)
}
_ = res.Groups
}
})
b.Run("FolderInheritance/Depth1", func(b *testing.B) {
// User with folder permission on shallow folder
user := data.users[usersPerPattern]
items := createFolderBatchItems(data.folders, 1, data.folderDepths)
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.BatchCheck(ctx, newBatchCheckReq(user, items))
if err != nil {
b.Fatal(err)
}
_ = res.Groups
}
})
b.Run("FolderInheritance/Depth4", func(b *testing.B) {
// User with folder permission on mid-depth folder
user := data.users[2*usersPerPattern]
items := createFolderBatchItems(data.folders, 4, data.folderDepths)
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.BatchCheck(ctx, newBatchCheckReq(user, items))
if err != nil {
b.Fatal(err)
}
_ = res.Groups
}
})
b.Run("FolderInheritance/Depth7", func(b *testing.B) {
// Check access on deepest folders (worst case for inheritance traversal)
user := data.users[usersPerPattern]
items := createFolderBatchItems(data.folders, data.maxDepth, data.folderDepths)
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.BatchCheck(ctx, newBatchCheckReq(user, items))
if err != nil {
b.Fatal(err)
}
_ = res.Groups
}
})
b.Run("DirectResource", func(b *testing.B) {
// User with direct resource permission
user := data.users[4*usersPerPattern]
items := createBatchItems(data.resources, data.resourceFolders)
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.BatchCheck(ctx, newBatchCheckReq(user, items))
if err != nil {
b.Fatal(err)
}
_ = res.Groups
}
})
b.Run("TeamMembership", func(b *testing.B) {
// User who is a team member, team has folder permission
user := data.users[5*usersPerPattern]
items := createBatchItems(data.resources, data.resourceFolders)
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.BatchCheck(ctx, newBatchCheckReq(user, items))
if err != nil {
b.Fatal(err)
}
_ = res.Groups
}
})
b.Run("NoAccess", func(b *testing.B) {
// User with no permissions - tests denial path
user := data.users[len(data.users)-1]
items := createBatchItems(data.resources, data.resourceFolders)
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.BatchCheck(ctx, newBatchCheckReq(user, items))
if err != nil {
b.Fatal(err)
}
_ = res.Groups
}
})
b.Run("MixedFolders", func(b *testing.B) {
// Batch of items across different folder depths
user := data.users[usersPerPattern]
items := make([]*authzextv1.BatchCheckItem, 0, batchCheckSize)
for i := 0; i < batchCheckSize; i++ {
folder := data.folders[i%len(data.folders)]
items = append(items, &authzextv1.BatchCheckItem{
Verb: utils.VerbGet,
Group: benchDashboardGroup,
Resource: benchDashboardResource,
Name: fmt.Sprintf("resource-%d", i),
Folder: folder,
})
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.BatchCheck(ctx, newBatchCheckReq(user, items))
if err != nil {
b.Fatal(err)
}
_ = res.Groups
}
})
}
// BenchmarkList measures the performance of List requests (Compile equivalent)
func BenchmarkList(b *testing.B) {
srv, data := setupBenchmarkServer(b)
baseCtx := newContextWithNamespace()
// Helper to create list requests
newListReq := func(subject, verb, group, resource string) *authzv1.ListRequest {
return &authzv1.ListRequest{
Namespace: benchNamespace,
Subject: subject,
Verb: verb,
Group: group,
Resource: resource,
}
}
// Helper to create context with timeout
ctxWithTimeout := func() (context.Context, context.CancelFunc) {
return context.WithTimeout(baseCtx, listTimeout)
}
usersPerPattern := len(data.users) / 7
b.Run("AllAccess", func(b *testing.B) {
// User with group_resource permission - should return All=true quickly
user := data.users[0]
b.Logf("Test: User with group_resource permission (access to ALL dashboards)")
b.Logf("Expected: All=true returned immediately without ListObjects call")
b.ResetTimer()
for i := 0; i < b.N; i++ {
ctx, cancel := ctxWithTimeout()
res, err := srv.List(ctx, newListReq(user, utils.VerbGet, benchDashboardGroup, benchDashboardResource))
cancel()
if err != nil {
b.Fatalf("Error: %v", err)
}
if !res.GetAll() {
b.Fatal("expected All=true for user with group_resource permission")
}
}
})
b.Run("FolderScoped", func(b *testing.B) {
// User with folder permissions - should return folder list
user := data.users[usersPerPattern]
b.Logf("Test: User with direct folder permission on a single folder")
b.Logf("Expected: Returns list of folders user has access to")
b.ResetTimer()
for i := 0; i < b.N; i++ {
ctx, cancel := ctxWithTimeout()
res, err := srv.List(ctx, newListReq(user, utils.VerbGet, benchDashboardGroup, benchDashboardResource))
cancel()
if err != nil {
b.Fatalf("Error: %v", err)
}
if i == 0 {
b.Logf("Result: %d folders, %d items, All=%v", len(res.GetFolders()), len(res.GetItems()), res.GetAll())
}
}
})
b.Run("DirectResources", func(b *testing.B) {
// User with direct resource permissions - should return items list
user := data.users[4*usersPerPattern]
b.Logf("Test: User with direct permission on specific resources")
b.Logf("Expected: Returns list of specific resources user has access to")
b.ResetTimer()
for i := 0; i < b.N; i++ {
ctx, cancel := ctxWithTimeout()
res, err := srv.List(ctx, newListReq(user, utils.VerbGet, benchDashboardGroup, benchDashboardResource))
cancel()
if err != nil {
b.Fatalf("Error: %v", err)
}
if i == 0 {
b.Logf("Result: %d folders, %d items, All=%v", len(res.GetFolders()), len(res.GetItems()), res.GetAll())
}
}
})
b.Run("NoAccess", func(b *testing.B) {
// User with no permissions - should return empty results
user := data.users[len(data.users)-1]
b.Logf("Test: User with NO permissions (denial case)")
b.Logf("Expected: Empty results")
b.ResetTimer()
for i := 0; i < b.N; i++ {
ctx, cancel := ctxWithTimeout()
res, err := srv.List(ctx, newListReq(user, utils.VerbGet, benchDashboardGroup, benchDashboardResource))
cancel()
if err != nil {
b.Fatalf("Error: %v", err)
}
if i == 0 {
b.Logf("Result: %d folders, %d items, All=%v", len(res.GetFolders()), len(res.GetItems()), res.GetAll())
}
}
})
b.Run("LargeRootFolder", func(b *testing.B) {
// User with access to root folder that has many descendants
user := "user:large-root-access"
b.Logf("Test: User with permission on ROOT folder (folder-0)")
b.Logf("Root folder %s has %d total descendants", data.largestRootFolder, data.largestRootDescCount)
b.Logf("Expected: ListObjects should return folders through inheritance")
b.ResetTimer()
for i := 0; i < b.N; i++ {
ctx, cancel := ctxWithTimeout()
start := time.Now()
res, err := srv.List(ctx, newListReq(user, utils.VerbGet, benchFolderGroup, benchFolderResource))
elapsed := time.Since(start)
cancel()
if err != nil {
b.Fatalf("Error after %v: %v", elapsed, err)
}
if i == 0 {
b.Logf("Result: %d folders returned in %v (descendants: %d)",
len(res.GetItems()), elapsed, data.largestRootDescCount)
}
}
})
// Test List at various folder depths to find breaking point
b.Run("ByDepth", func(b *testing.B) {
b.Logf("Testing List performance at various folder depths (timeout: %v)", listTimeout)
b.Logf("Tree structure: %d folders per level, %d max depth", foldersPerLevel, data.maxDepth)
for depth := 0; depth <= data.maxDepth; depth++ {
if len(data.foldersByDepth[depth]) == 0 {
continue
}
folder := data.foldersByDepth[depth][0]
descendants := data.folderDescendants[folder]
user := fmt.Sprintf("user:depth-%d-access", depth)
b.Run(fmt.Sprintf("Depth%d_%dDescendants", depth, descendants), func(b *testing.B) {
b.Logf("Test: User with permission on folder at depth %d", depth)
b.Logf("Folder: %s, Descendants: %d", folder, descendants)
// First, do a single timed run to report
ctx, cancel := ctxWithTimeout()
start := time.Now()
res, err := srv.List(ctx, newListReq(user, utils.VerbGet, benchFolderGroup, benchFolderResource))
elapsed := time.Since(start)
cancel()
if err != nil {
b.Logf("FAILED after %v: %v", elapsed, err)
if elapsed >= listTimeout {
b.Logf("TIMEOUT: List took longer than %v", listTimeout)
}
b.Skip("Skipping benchmark iterations due to error")
return
}
b.Logf("Result: %d folders in %v", len(res.GetItems()), elapsed)
if elapsed > 5*time.Second {
b.Logf("WARNING: Single List took %v, skipping benchmark iterations", elapsed)
b.Skip("Too slow for benchmark iterations")
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
ctx, cancel := ctxWithTimeout()
_, err := srv.List(ctx, newListReq(user, utils.VerbGet, benchFolderGroup, benchFolderResource))
cancel()
if err != nil {
b.Fatalf("Error: %v", err)
}
}
})
}
})
}
@@ -126,14 +126,8 @@ func (s *Server) checkTyped(ctx context.Context, subject, relation string, resou
return &authzv1.CheckResponse{Allowed: false}, nil
}
// Use optimized folder permission relations for permission management
checkRelation := relation
if resource.Type() == common.TypeFolder {
checkRelation = common.FolderPermissionRelation(relation)
}
// Check if subject has direct access to resource
res, err := s.openfgaCheck(ctx, store, subject, checkRelation, resourceIdent, contextuals, nil)
res, err := s.openfgaCheck(ctx, store, subject, relation, resourceIdent, contextuals, nil)
if err != nil {
return nil, err
}
@@ -149,15 +143,14 @@ func (s *Server) checkGeneric(ctx context.Context, subject, relation string, res
defer span.End()
var (
folderIdent = resource.FolderIdent()
resourceCtx = resource.Context()
folderRelation = common.SubresourceRelation(relation)
folderCheckRelation = common.FolderPermissionRelation(relation)
folderIdent = resource.FolderIdent()
resourceCtx = resource.Context()
folderRelation = common.SubresourceRelation(relation)
)
if folderIdent != "" && isFolderPermissionBasedResource(resource.GroupResource()) {
// Check if resource inherits permissions from the folder (like dashboards in a folder)
res, err := s.openfgaCheck(ctx, store, subject, folderCheckRelation, folderIdent, contextuals, resourceCtx)
res, err := s.openfgaCheck(ctx, store, subject, relation, folderIdent, contextuals, resourceCtx)
if err != nil {
return nil, err
}
@@ -85,12 +85,6 @@ func (s *Server) listTyped(ctx context.Context, subject, relation string, resour
resourceCtx = resource.Context()
)
// Use optimized folder permission relations for permission management
listRelation := relation
if resource.Type() == common.TypeFolder {
listRelation = common.FolderPermissionRelation(relation)
}
var items []string
if resource.HasSubresource() && common.IsSubresourceRelation(subresourceRelation) {
// List requested subresources
@@ -116,7 +110,7 @@ func (s *Server) listTyped(ctx context.Context, subject, relation string, resour
StoreId: store.ID,
AuthorizationModelId: store.ModelID,
Type: resource.Type(),
Relation: listRelation,
Relation: relation,
User: subject,
ContextualTuples: contextuals,
})
@@ -135,9 +129,8 @@ func (s *Server) listGeneric(ctx context.Context, subject, relation string, reso
defer span.End()
var (
folderRelation = common.SubresourceRelation(relation)
folderListRelation = common.FolderPermissionRelation(relation) // Optimized for permission management
resourceCtx = resource.Context()
folderRelation = common.SubresourceRelation(relation)
resourceCtx = resource.Context()
)
// 1. List all folders subject has access to resource type in
@@ -166,7 +159,7 @@ func (s *Server) listGeneric(ctx context.Context, subject, relation string, reso
StoreId: store.ID,
AuthorizationModelId: store.ModelID,
Type: common.TypeFolder,
Relation: folderListRelation,
Relation: relation,
User: subject,
Context: resourceCtx,
ContextualTuples: contextuals,
-5
View File
@@ -44,11 +44,6 @@ type DashboardService interface {
GetDashboardsByLibraryPanelUID(ctx context.Context, libraryPanelUID string, orgID int64) ([]*DashboardRef, error)
}
type DashboardAccessService interface {
// The user as access to {VERB} the requested dashboard
HasDashboardAccess(ctx context.Context, user identity.Requester, verb string, namespace string, name string) (bool, error)
}
type PermissionsRegistrationService interface {
RegisterDashboardPermissions(service accesscontrol.DashboardPermissionsService)
@@ -5,9 +5,8 @@ package dashboards
import (
context "context"
mock "github.com/stretchr/testify/mock"
identity "github.com/grafana/grafana/pkg/apimachinery/identity"
mock "github.com/stretchr/testify/mock"
model "github.com/grafana/grafana/pkg/services/search/model"
@@ -530,11 +529,6 @@ func (_m *FakeDashboardService) ValidateDashboardRefreshInterval(minRefreshInter
return r0
}
// CanViewDashboard uses the access control service to check if the requested user can see a dashboard
func (_m *FakeDashboardService) HasDashboardAccess(ctx context.Context, user identity.Requester, verb string, namespace string, name string) (bool, error) {
return true, nil
}
// NewFakeDashboardService creates a new instance of FakeDashboardService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewFakeDashboardService(t interface {
@@ -67,7 +67,6 @@ var (
_ dashboards.DashboardService = (*DashboardServiceImpl)(nil)
_ dashboards.DashboardProvisioningService = (*DashboardServiceImpl)(nil)
_ dashboards.PluginService = (*DashboardServiceImpl)(nil)
_ dashboards.DashboardAccessService = (*DashboardServiceImpl)(nil)
daysInTrash = 24 * 30 * time.Hour
tracer = otel.Tracer("github.com/grafana/grafana/pkg/services/dashboards/service")
@@ -101,38 +100,6 @@ type DashboardServiceImpl struct {
dashboardPermissionsReady chan struct{}
}
// CanViewDashboard uses the access control service to check if the requested user can see a dashboard
func (dr *DashboardServiceImpl) HasDashboardAccess(ctx context.Context, user identity.Requester, verb string, namespace string, name string) (bool, error) {
ns, err := claims.ParseNamespace(namespace)
if err != nil {
return false, err
}
dash, err := dr.GetDashboard(ctx, &dashboards.GetDashboardQuery{
UID: name,
OrgID: ns.OrgID,
})
if err != nil || dash == nil {
return false, nil
}
var action string
switch verb {
case utils.VerbGet:
action = dashboards.ActionDashboardsRead
case utils.VerbUpdate:
action = dashboards.ActionDashboardsWrite
default:
return false, fmt.Errorf("unsupported verb")
}
evaluator := accesscontrol.EvalPermission(action,
dashboards.ScopeDashboardsProvider.GetResourceScopeUID(name))
canView, err := dr.ac.Evaluate(ctx, user, evaluator)
if err != nil || !canView {
return false, nil
}
return true, nil
}
func (dr *DashboardServiceImpl) startK8sDeletedDashboardsCleanupJob(ctx context.Context) chan struct{} {
done := make(chan struct{})
go func() {
@@ -23,9 +23,3 @@ func ProvideDashboardPluginService(
) dashboards.PluginService {
return orig
}
func ProvideDashboardAccessService(
features featuremgmt.FeatureToggles, orig *DashboardServiceImpl,
) dashboards.DashboardAccessService {
return orig
}
-7
View File
@@ -1962,13 +1962,6 @@ var (
RequiresRestart: false,
HideFromDocs: false,
},
{
Name: "elasticsearchRawDSLQuery",
Description: "Enables the raw DSL query editor in the Elasticsearch data source",
Stage: FeatureStageExperimental,
Owner: grafanaPartnerPluginsSquad,
Expression: "false",
},
{
Name: "kubernetesAnnotations",
Description: "Enables app platform API for annotations",
-1
View File
@@ -266,7 +266,6 @@ pluginStoreServiceLoading,experimental,@grafana/plugins-platform-backend,false,f
newPanelPadding,preview,@grafana/dashboards-squad,false,false,true
onlyStoreActionSets,GA,@grafana/identity-access-team,false,false,false
panelTimeSettings,experimental,@grafana/dashboards-squad,false,false,false
elasticsearchRawDSLQuery,experimental,@grafana/partner-datasources,false,false,false
kubernetesAnnotations,experimental,@grafana/grafana-backend-services-squad,false,false,false
awsDatasourcesHttpProxy,experimental,@grafana/aws-datasources,false,false,false
transformationsEmptyPlaceholder,preview,@grafana/datapro,false,false,true
1 Name Stage Owner requiresDevMode RequiresRestart FrontendOnly
266 newPanelPadding preview @grafana/dashboards-squad false false true
267 onlyStoreActionSets GA @grafana/identity-access-team false false false
268 panelTimeSettings experimental @grafana/dashboards-squad false false false
elasticsearchRawDSLQuery experimental @grafana/partner-datasources false false false
269 kubernetesAnnotations experimental @grafana/grafana-backend-services-squad false false false
270 awsDatasourcesHttpProxy experimental @grafana/aws-datasources false false false
271 transformationsEmptyPlaceholder preview @grafana/datapro false false true
-4
View File
@@ -758,10 +758,6 @@ const (
// Enables a new panel time settings drawer
FlagPanelTimeSettings = "panelTimeSettings"
// FlagElasticsearchRawDSLQuery
// Enables the raw DSL query editor in the Elasticsearch data source
FlagElasticsearchRawDSLQuery = "elasticsearchRawDSLQuery"
// FlagKubernetesAnnotations
// Enables app platform API for annotations
FlagKubernetesAnnotations = "kubernetesAnnotations"
-13
View File
@@ -1206,19 +1206,6 @@
"codeowner": "@grafana/partner-datasources"
}
},
{
"metadata": {
"name": "elasticsearchRawDSLQuery",
"resourceVersion": "1763508396079",
"creationTimestamp": "2025-11-18T23:26:36Z"
},
"spec": {
"description": "Enables the raw DSL query editor in the Elasticsearch data source",
"stage": "experimental",
"codeowner": "@grafana/partner-datasources",
"expression": "false"
}
},
{
"metadata": {
"name": "enableAppChromeExtensions",
+43 -22
View File
@@ -6,11 +6,10 @@ import (
"fmt"
"strings"
"github.com/grafana/authlib/types"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/cmd/grafana-cli/logger"
"github.com/grafana/grafana/pkg/services/accesscontrol"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/live/model"
)
@@ -33,9 +32,10 @@ type dashboardEvent struct {
// DashboardHandler manages all the `grafana/dashboard/*` channels
type DashboardHandler struct {
Publisher model.ChannelPublisher
ClientCount model.ChannelClientCount
AccessControl dashboards.DashboardAccessService
Publisher model.ChannelPublisher
ClientCount model.ChannelClientCount
DashboardService dashboards.DashboardService
AccessControl accesscontrol.AccessControl
}
// GetHandlerForPath called on init
@@ -49,15 +49,23 @@ func (h *DashboardHandler) OnSubscribe(ctx context.Context, user identity.Reques
// make sure can view this dashboard
if len(parts) == 2 && parts[0] == "uid" {
ns := types.OrgNamespaceFormatter(user.GetOrgID())
ok, err := h.AccessControl.HasDashboardAccess(ctx, user, utils.VerbGet, ns, parts[1])
if ok && err == nil {
return model.SubscribeReply{
Presence: true,
JoinLeave: true,
}, backend.SubscribeStreamStatusOK, nil
query := dashboards.GetDashboardQuery{UID: parts[1], OrgID: user.GetOrgID()}
_, err := h.DashboardService.GetDashboard(ctx, &query)
if err != nil {
logger.Error("Error getting dashboard", "query", query, "error", err)
return model.SubscribeReply{}, backend.SubscribeStreamStatusNotFound, nil
}
return model.SubscribeReply{}, backend.SubscribeStreamStatusPermissionDenied, err
evaluator := accesscontrol.EvalPermission(dashboards.ActionDashboardsRead, dashboards.ScopeDashboardsProvider.GetResourceScopeUID(parts[1]))
canView, err := h.AccessControl.Evaluate(ctx, user, evaluator)
if err != nil || !canView {
return model.SubscribeReply{}, backend.SubscribeStreamStatusPermissionDenied, err
}
return model.SubscribeReply{
Presence: true,
JoinLeave: true,
}, backend.SubscribeStreamStatusOK, nil
}
// Unknown path
@@ -80,16 +88,29 @@ func (h *DashboardHandler) OnPublish(ctx context.Context, requester identity.Req
// just ignore the event
return model.PublishReply{}, backend.PublishStreamStatusNotFound, fmt.Errorf("ignore???")
}
ns := types.OrgNamespaceFormatter(requester.GetOrgID())
ok, err := h.AccessControl.HasDashboardAccess(ctx, requester, utils.VerbUpdate, ns, parts[1])
if ok && err == nil {
msg, err := json.Marshal(event)
if err != nil {
return model.PublishReply{}, backend.PublishStreamStatusNotFound, fmt.Errorf("internal error")
}
return model.PublishReply{Data: msg}, backend.PublishStreamStatusOK, nil
query := dashboards.GetDashboardQuery{UID: parts[1], OrgID: requester.GetOrgID()}
_, err = h.DashboardService.GetDashboard(ctx, &query)
if err != nil {
logger.Error("Unknown dashboard", "query", query)
return model.PublishReply{}, backend.PublishStreamStatusNotFound, nil
}
evaluator := accesscontrol.EvalPermission(dashboards.ActionDashboardsWrite, dashboards.ScopeDashboardsProvider.GetResourceScopeUID(parts[1]))
canEdit, err := h.AccessControl.Evaluate(ctx, requester, evaluator)
if err != nil {
return model.PublishReply{}, backend.PublishStreamStatusNotFound, fmt.Errorf("internal error")
}
// Ignore edit events if the user can not edit
if !canEdit {
return model.PublishReply{}, backend.PublishStreamStatusNotFound, nil // NOOP
}
msg, err := json.Marshal(event)
if err != nil {
return model.PublishReply{}, backend.PublishStreamStatusNotFound, fmt.Errorf("internal error")
}
return model.PublishReply{Data: msg}, backend.PublishStreamStatusOK, nil
}
return model.PublishReply{}, backend.PublishStreamStatusNotFound, nil
+93 -10
View File
@@ -27,11 +27,13 @@ import (
"github.com/grafana/grafana/pkg/api/response"
"github.com/grafana/grafana/pkg/api/routing"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/infra/localcache"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/usagestats"
"github.com/grafana/grafana/pkg/middleware"
"github.com/grafana/grafana/pkg/middleware/requestmeta"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/services/accesscontrol"
"github.com/grafana/grafana/pkg/services/apiserver"
contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model"
"github.com/grafana/grafana/pkg/services/dashboards"
@@ -50,6 +52,7 @@ import (
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/services/pluginsintegration/plugincontext"
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
"github.com/grafana/grafana/pkg/services/secrets"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
"github.com/grafana/grafana/pkg/web"
@@ -69,23 +72,28 @@ type CoreGrafanaScope struct {
Dashboards DashboardActivityChannel
}
func ProvideService(cfg *setting.Cfg, routeRegister routing.RouteRegister, plugCtxProvider *plugincontext.Provider,
pluginStore pluginstore.Store, pluginClient plugins.Client, dataSourceCache datasources.CacheService,
func ProvideService(plugCtxProvider *plugincontext.Provider, cfg *setting.Cfg, routeRegister routing.RouteRegister,
pluginStore pluginstore.Store, pluginClient plugins.Client, cacheService *localcache.CacheService,
dataSourceCache datasources.CacheService, secretsService secrets.Service,
usageStatsService usagestats.Service, toggles featuremgmt.FeatureToggles,
dashboardService dashboards.DashboardAccessService,
configProvider apiserver.RestConfigProvider) (*GrafanaLive, error) {
accessControl accesscontrol.AccessControl, dashboardService dashboards.DashboardService,
orgService org.Service, configProvider apiserver.RestConfigProvider) (*GrafanaLive, error) {
g := &GrafanaLive{
Cfg: cfg,
Features: toggles,
PluginContextProvider: plugCtxProvider,
RouteRegister: routeRegister,
pluginStore: pluginStore,
pluginClient: pluginClient,
CacheService: cacheService,
DataSourceCache: dataSourceCache,
SecretsService: secretsService,
channels: make(map[string]model.ChannelHandler),
GrafanaScope: CoreGrafanaScope{
Features: make(map[string]model.ChannelHandlerFactory),
},
usageStatsService: usageStatsService,
orgService: orgService,
keyPrefix: "gf_live",
}
@@ -168,13 +176,19 @@ func ProvideService(cfg *setting.Cfg, routeRegister routing.RouteRegister, plugC
// Initialize the main features
dash := &features.DashboardHandler{
Publisher: g.Publish,
ClientCount: g.ClientCount,
AccessControl: dashboardService,
Publisher: g.Publish,
ClientCount: g.ClientCount,
DashboardService: dashboardService,
AccessControl: accessControl,
}
g.GrafanaScope.Dashboards = dash
g.GrafanaScope.Features["dashboard"] = dash
g.GrafanaScope.Features["watch"] = features.NewWatchRunner(g.Publish, configProvider)
// Testing watch with just the provisioning support -- this will be removed when it is well validated
//nolint:staticcheck // not yet migrated to OpenFeature
if toggles.IsEnabledGlobally(featuremgmt.FlagProvisioning) {
g.GrafanaScope.Features["watch"] = features.NewWatchRunner(g.Publish, configProvider)
}
g.surveyCaller = survey.NewCaller(managedStreamRunner, node)
err = g.surveyCaller.SetupHandlers()
@@ -384,11 +398,11 @@ func ProvideService(cfg *setting.Cfg, routeRegister routing.RouteRegister, plugC
pushPipelineWSHandler.ServeHTTP(ctx.Resp, r)
}
routeRegister.Group("/api/live", func(group routing.RouteRegister) {
g.RouteRegister.Group("/api/live", func(group routing.RouteRegister) {
group.Get("/ws", g.websocketHandler)
}, middleware.ReqSignedIn, requestmeta.SetSLOGroup(requestmeta.SLOGroupNone))
routeRegister.Group("/api/live", func(group routing.RouteRegister) {
g.RouteRegister.Group("/api/live", func(group routing.RouteRegister) {
group.Get("/push/:streamId", g.pushWebsocketHandler)
group.Get("/pipeline/push/*", g.pushPipelineWebsocketHandler)
}, middleware.ReqOrgAdmin, requestmeta.SetSLOGroup(requestmeta.SLOGroupNone))
@@ -447,9 +461,13 @@ type GrafanaLive struct {
PluginContextProvider *plugincontext.Provider
Cfg *setting.Cfg
Features featuremgmt.FeatureToggles
RouteRegister routing.RouteRegister
CacheService *localcache.CacheService
DataSourceCache datasources.CacheService
SecretsService secrets.Service
pluginStore pluginstore.Store
pluginClient plugins.Client
orgService org.Service
keyPrefix string // HA prefix for grafana cloud (since the org is always 1)
@@ -1338,6 +1356,71 @@ func (g *GrafanaLive) HandleWriteConfigsPostHTTP(c *contextmodel.ReqContext) res
})
}
// HandleWriteConfigsPutHTTP ...
func (g *GrafanaLive) HandleWriteConfigsPutHTTP(c *contextmodel.ReqContext) response.Response {
body, err := io.ReadAll(c.Req.Body)
if err != nil {
return response.Error(http.StatusInternalServerError, "Error reading body", err)
}
var cmd pipeline.WriteConfigUpdateCmd
err = json.Unmarshal(body, &cmd)
if err != nil {
return response.Error(http.StatusBadRequest, "Error decoding write config update command", err)
}
if cmd.UID == "" {
return response.Error(http.StatusBadRequest, "UID required", nil)
}
existingBackend, ok, err := g.pipelineStorage.GetWriteConfig(c.Req.Context(), c.GetOrgID(), pipeline.WriteConfigGetCmd{
UID: cmd.UID,
})
if err != nil {
return response.Error(http.StatusInternalServerError, "Failed to get write config", err)
}
if ok {
if cmd.SecureSettings == nil {
cmd.SecureSettings = map[string]string{}
}
secureJSONData, err := g.SecretsService.DecryptJsonData(c.Req.Context(), existingBackend.SecureSettings)
if err != nil {
logger.Error("Error decrypting secure settings", "error", err)
return response.Error(http.StatusInternalServerError, "Error decrypting secure settings", err)
}
for k, v := range secureJSONData {
if _, ok := cmd.SecureSettings[k]; !ok {
cmd.SecureSettings[k] = v
}
}
}
result, err := g.pipelineStorage.UpdateWriteConfig(c.Req.Context(), c.GetOrgID(), cmd)
if err != nil {
return response.Error(http.StatusInternalServerError, "Failed to update write config", err)
}
return response.JSON(http.StatusOK, util.DynMap{
"writeConfig": pipeline.WriteConfigToDto(result),
})
}
// HandleWriteConfigsDeleteHTTP ...
func (g *GrafanaLive) HandleWriteConfigsDeleteHTTP(c *contextmodel.ReqContext) response.Response {
body, err := io.ReadAll(c.Req.Body)
if err != nil {
return response.Error(http.StatusInternalServerError, "Error reading body", err)
}
var cmd pipeline.WriteConfigDeleteCmd
err = json.Unmarshal(body, &cmd)
if err != nil {
return response.Error(http.StatusBadRequest, "Error decoding write config delete command", err)
}
if cmd.UID == "" {
return response.Error(http.StatusBadRequest, "UID required", nil)
}
err = g.pipelineStorage.DeleteWriteConfig(c.Req.Context(), c.GetOrgID(), cmd)
if err != nil {
return response.Error(http.StatusInternalServerError, "Failed to delete write config", err)
}
return response.JSON(http.StatusOK, util.DynMap{})
}
// Write to the standard log15 logger
func handleLog(msg centrifuge.LogEntry) {
arr := make([]interface{}, 0)
+6 -3
View File
@@ -19,6 +19,7 @@ import (
"github.com/grafana/grafana/pkg/api/routing"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/infra/usagestats"
"github.com/grafana/grafana/pkg/services/accesscontrol/acimpl"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/setting"
@@ -339,14 +340,16 @@ func setupLiveService(cfg *setting.Cfg, t *testing.T) (*GrafanaLive, error) {
cfg = setting.NewCfg()
}
return ProvideService(cfg,
return ProvideService(nil,
cfg,
routing.NewRouteRegister(),
nil, nil, nil,
nil, nil, nil, nil,
nil,
&usagestats.UsageStatsMock{T: t},
featuremgmt.WithFeatures(),
acimpl.ProvideAccessControl(featuremgmt.WithFeatures()),
&dashboards.FakeDashboardService{},
nil)
nil, nil)
}
type dummyTransport struct {
@@ -457,7 +457,6 @@ type paginationContext struct {
labelOptions []ngmodels.LabelOption
limitAlertsPerRule int64
limitRulesPerGroup int64
compact bool
}
// pageResult is the result of fetching and filtering of one page
@@ -493,7 +492,6 @@ func (ctx *paginationContext) fetchAndFilterPage(log log.Logger, store ListAlert
Limit: remainingGroups,
RuleLimit: remainingRules,
ContinueToken: token,
Compact: ctx.compact,
}
ruleList, newToken, err := store.ListAlertRulesByGroup(ctx.opts.Ctx, &byGroupQuery)
@@ -521,7 +519,7 @@ func (ctx *paginationContext) fetchAndFilterPage(log log.Logger, store ListAlert
log, rg.GroupKey, rg.Folder, rg.Rules,
ctx.provenanceRecords, ctx.limitAlertsPerRule,
ctx.stateFilterSet, ctx.matchers, ctx.labelOptions,
ctx.ruleStatusMutator, ctx.alertStateMutator, ctx.compact,
ctx.ruleStatusMutator, ctx.alertStateMutator,
)
ruleGroup.Totals = totals
accumulateTotals(result.totalsDelta, totals)
@@ -787,8 +785,6 @@ func PrepareRuleGroupStatusesV2(log log.Logger, store ListAlertRulesStoreV2, opt
}
span.SetAttributes(attribute.Int("rule_name_count", len(ruleNamesSet)))
compact := getBoolWithDefault(opts.Query, "compact", false)
span.SetAttributes(attribute.Bool("compact", compact))
pagCtx := &paginationContext{
opts: opts,
provenanceRecords: provenanceRecords,
@@ -811,7 +807,6 @@ func PrepareRuleGroupStatusesV2(log log.Logger, store ListAlertRulesStoreV2, opt
labelOptions: labelOptions,
limitAlertsPerRule: limitAlertsPerRule,
limitRulesPerGroup: limitRulesPerGroup,
compact: compact,
}
groups, rulesTotals, continueToken, err := paginateRuleGroups(log, store, pagCtx, span, maxGroups, maxRules, nextToken)
@@ -964,7 +959,7 @@ func PrepareRuleGroupStatuses(log log.Logger, store ListAlertRulesStore, opts Ru
break
}
ruleGroup, totals := toRuleGroup(log, rg.GroupKey, rg.Folder, rg.Rules, provenanceRecords, limitAlertsPerRule, stateFilterSet, matchers, labelOptions, ruleStatusMutator, alertStateMutator, false)
ruleGroup, totals := toRuleGroup(log, rg.GroupKey, rg.Folder, rg.Rules, provenanceRecords, limitAlertsPerRule, stateFilterSet, matchers, labelOptions, ruleStatusMutator, alertStateMutator)
ruleGroup.Totals = totals
for k, v := range totals {
rulesTotals[k] += v
@@ -1115,7 +1110,7 @@ func matchersMatch(matchers []*labels.Matcher, labels map[string]string) bool {
return true
}
func toRuleGroup(log log.Logger, groupKey ngmodels.AlertRuleGroupKey, folderFullPath string, rules []*ngmodels.AlertRule, provenanceRecords map[string]ngmodels.Provenance, limitAlerts int64, stateFilterSet map[eval.State]struct{}, matchers labels.Matchers, labelOptions []ngmodels.LabelOption, ruleStatusMutator RuleStatusMutator, ruleAlertStateMutator RuleAlertStateMutator, compact bool) (*apimodels.RuleGroup, map[string]int64) {
func toRuleGroup(log log.Logger, groupKey ngmodels.AlertRuleGroupKey, folderFullPath string, rules []*ngmodels.AlertRule, provenanceRecords map[string]ngmodels.Provenance, limitAlerts int64, stateFilterSet map[eval.State]struct{}, matchers labels.Matchers, labelOptions []ngmodels.LabelOption, ruleStatusMutator RuleStatusMutator, ruleAlertStateMutator RuleAlertStateMutator) (*apimodels.RuleGroup, map[string]int64) {
newGroup := &apimodels.RuleGroup{
Name: groupKey.RuleGroup,
// file is what Prometheus uses for provisioning, we replace it with namespace which is the folder in Grafana.
@@ -1131,14 +1126,10 @@ func toRuleGroup(log log.Logger, groupKey ngmodels.AlertRuleGroupKey, folderFull
if prov, exists := provenanceRecords[rule.ResourceID()]; exists {
provenance = prov
}
var query string
if !compact {
query = ruleToQuery(log, rule)
}
alertingRule := apimodels.AlertingRule{
State: "inactive",
Name: rule.Title,
Query: query,
Query: ruleToQuery(log, rule),
QueriedDatasourceUIDs: extractDatasourceUIDs(rule),
Duration: rule.For.Seconds(),
KeepFiringFor: rule.KeepFiringFor.Seconds(),
@@ -110,12 +110,6 @@ func (aq *AlertQuery) String() string {
}
func (aq *AlertQuery) setModelProps() error {
if aq.Model == nil {
// No data to extract, use an empty map.
aq.modelProps = map[string]any{}
return nil
}
aq.modelProps = make(map[string]any)
err := json.Unmarshal(aq.Model, &aq.modelProps)
if err != nil {
@@ -1022,7 +1022,6 @@ type ListAlertRulesExtendedQuery struct {
Limit int64
RuleLimit int64
ContinueToken string
Compact bool
}
// CountAlertRulesQuery is the query for counting alert rules
+1 -14
View File
@@ -12,7 +12,6 @@ import (
"github.com/grafana/alerting/models"
alertingNotify "github.com/grafana/alerting/notify"
"github.com/grafana/alerting/notify/nfstatus"
alertingTemplates "github.com/grafana/alerting/templates"
"github.com/prometheus/alertmanager/config"
amv2 "github.com/prometheus/alertmanager/api/v2/models"
@@ -59,7 +58,6 @@ type alertmanager struct {
decryptFn alertingNotify.GetDecryptedValueFn
crypto Crypto
features featuremgmt.FeatureToggles
dynamicLimits alertingNotify.DynamicLimits
}
// maintenanceOptions represent the options for components that need maintenance on a frequency within the Alertmanager.
@@ -150,16 +148,6 @@ func NewAlertmanager(ctx context.Context, orgID int64, cfg *setting.Cfg, store A
return nil, err
}
limits := alertingNotify.DynamicLimits{
Dispatcher: nilLimits{},
Templates: alertingTemplates.Limits{
MaxTemplateOutputSize: cfg.UnifiedAlerting.AlertmanagerMaxTemplateOutputSize,
},
}
if err := limits.Templates.Validate(); err != nil {
return nil, fmt.Errorf("invalid template limits: %w", err)
}
am := &alertmanager{
Base: gam,
ConfigMetrics: m.AlertmanagerConfigMetrics,
@@ -170,7 +158,6 @@ func NewAlertmanager(ctx context.Context, orgID int64, cfg *setting.Cfg, store A
decryptFn: decryptFn,
crypto: crypto,
features: featureToggles,
dynamicLimits: limits,
}
return am, nil
@@ -395,7 +382,7 @@ func (am *alertmanager) applyConfig(ctx context.Context, cfg *apimodels.Postable
TimeIntervals: amConfig.TimeIntervals,
Templates: templates,
Receivers: receivers,
Limits: am.dynamicLimits,
DispatcherLimits: &nilLimits{},
Raw: rawConfig,
Hash: configHash,
})
+1 -7
View File
@@ -631,13 +631,7 @@ func (st DBstore) ListAlertRulesByGroup(ctx context.Context, query *ngmodels.Lis
continue
}
var converted ngmodels.AlertRule
if query.Compact {
converted, err = alertRuleToModelsAlertRuleCompact(*rule, st.Logger)
} else {
converted, err = alertRuleToModelsAlertRule(*rule, st.Logger)
}
converted, err := alertRuleToModelsAlertRule(*rule, st.Logger)
if err != nil {
st.Logger.Error("Invalid rule found in DB store, cannot convert, ignoring it", "func", "ListAlertRulesByGroup", "error", err)
continue
+5 -33
View File
@@ -10,38 +10,11 @@ import (
"github.com/grafana/grafana/pkg/services/ngalert/models"
)
// We only care about the data source UIDs.
type compactQuery struct {
DatasourceUID string `json:"datasourceUid"`
}
func alertRuleToModelsAlertRule(ar alertRule, l log.Logger) (models.AlertRule, error) {
return convertAlertRuleToModel(ar, l, false)
}
// alertRuleToModelsAlertRuleCompact transforms an alertRule to a models.AlertRule
// ignoring alert queries (except for data source UIDs), notification settings, and metadata.
func alertRuleToModelsAlertRuleCompact(ar alertRule, l log.Logger) (models.AlertRule, error) {
return convertAlertRuleToModel(ar, l, true)
}
// convertAlertRuleToModel creates a models.AlertRule from an alertRule.
// When 'compact' is set to 'true', it skips parsing the alert queries (except for the data source UID), notification
// settings, and metadata, thus reducing the number of JSON serializations needed.
func convertAlertRuleToModel(ar alertRule, l log.Logger, compact bool) (models.AlertRule, error) {
var data []models.AlertQuery
if compact {
var cqs []compactQuery
if err := json.Unmarshal([]byte(ar.Data), &cqs); err != nil {
return models.AlertRule{}, fmt.Errorf("failed to parse data: %w", err)
}
for _, cq := range cqs {
data = append(data, models.AlertQuery{DatasourceUID: cq.DatasourceUID})
}
} else {
if err := json.Unmarshal([]byte(ar.Data), &data); err != nil {
return models.AlertRule{}, fmt.Errorf("failed to parse data: %w", err)
}
err := json.Unmarshal([]byte(ar.Data), &data)
if err != nil {
return models.AlertRule{}, fmt.Errorf("failed to parse data: %w", err)
}
result := models.AlertRule{
@@ -79,7 +52,6 @@ func convertAlertRuleToModel(ar alertRule, l log.Logger, compact bool) (models.A
result.UpdatedBy = util.Pointer(models.UserUID(*ar.UpdatedBy))
}
var err error
if ar.NoDataState != "" {
result.NoDataState, err = models.NoDataStateFromString(ar.NoDataState)
if err != nil {
@@ -118,7 +90,7 @@ func convertAlertRuleToModel(ar alertRule, l log.Logger, compact bool) (models.A
}
}
if !compact && ar.NotificationSettings != "" {
if ar.NotificationSettings != "" {
ns, err := parseNotificationSettings(ar.NotificationSettings)
if err != nil {
return models.AlertRule{}, fmt.Errorf("failed to parse notification settings: %w", err)
@@ -126,7 +98,7 @@ func convertAlertRuleToModel(ar alertRule, l log.Logger, compact bool) (models.A
result.NotificationSettings = ns
}
if !compact && ar.Metadata != "" {
if ar.Metadata != "" {
err = json.Unmarshal([]byte(ar.Metadata), &result.Metadata)
if err != nil {
return models.AlertRule{}, fmt.Errorf("failed to metadata: %w", err)
-79
View File
@@ -65,85 +65,6 @@ func TestAlertRuleToModelsAlertRule(t *testing.T) {
})
}
func TestAlertRuleToModelsAlertRuleCompact(t *testing.T) {
t.Run("should only extract datasource UIDs in compact mode", func(t *testing.T) {
rule := alertRule{
ID: 1,
OrgID: 1,
UID: "test-uid",
Title: "Test Rule",
Condition: "A",
Data: `[{"datasourceUid":"ds1","refId":"A","queryType":"test","model":{"expr":"up"}},{"datasourceUid":"ds2","refId":"B","queryType":"test","model":{"expr":"down"}}]`,
IntervalSeconds: 60,
Version: 1,
NamespaceUID: "ns-uid",
RuleGroup: "test-group",
NoDataState: "NoData",
ExecErrState: "Error",
NotificationSettings: `[{"receiver":"test-receiver"}]`,
Metadata: `{"editor_settings":{"simplified_query_and_expressions_section":true}}`,
}
compactResult, err := alertRuleToModelsAlertRuleCompact(rule, &logtest.Fake{})
require.NoError(t, err)
// Should have datasource UIDs.
require.Len(t, compactResult.Data, 2)
require.Equal(t, "ds1", compactResult.Data[0].DatasourceUID)
require.Equal(t, "ds2", compactResult.Data[1].DatasourceUID)
// But should not have full query data (RefID, QueryType, Model should be empty).
require.Empty(t, compactResult.Data[0].RefID)
require.Empty(t, compactResult.Data[0].QueryType)
require.Nil(t, compactResult.Data[0].Model)
require.Empty(t, compactResult.Data[1].RefID)
require.Empty(t, compactResult.Data[1].QueryType)
require.Nil(t, compactResult.Data[1].Model)
// Should not have notification settings.
require.Empty(t, compactResult.NotificationSettings)
// Should not have metadata (should be zero value).
require.Equal(t, ngmodels.AlertRuleMetadata{}, compactResult.Metadata)
})
t.Run("should parse full data in non-compact mode", func(t *testing.T) {
rule := alertRule{
ID: 1,
OrgID: 1,
UID: "test-uid",
Title: "Test Rule",
Condition: "A",
Data: `[{"datasourceUid":"ds1","refId":"A","queryType":"test","model":{"expr":"up"}},{"datasourceUid":"ds2","refId":"B","queryType":"test","model":{"expr":"down"}}]`,
IntervalSeconds: 60,
Version: 1,
NamespaceUID: "ns-uid",
RuleGroup: "test-group",
NoDataState: "NoData",
ExecErrState: "Error",
NotificationSettings: `[{"receiver":"test-receiver"}]`,
Metadata: `{"editor_settings":{"simplified_query_and_expressions_section":true}}`,
}
fullResult, err := alertRuleToModelsAlertRule(rule, &logtest.Fake{})
require.NoError(t, err)
// Should have full query data.
require.Len(t, fullResult.Data, 2)
require.Equal(t, "ds1", fullResult.Data[0].DatasourceUID)
require.Equal(t, "A", fullResult.Data[0].RefID)
require.Equal(t, "test", fullResult.Data[0].QueryType)
require.NotNil(t, fullResult.Data[0].Model)
// Should have notification settings.
require.Len(t, fullResult.NotificationSettings, 1)
require.Equal(t, "test-receiver", fullResult.NotificationSettings[0].Receiver)
// Should have metadata (metadata is parsed from JSON to struct).
require.NotEqual(t, ngmodels.AlertRuleMetadata{}, fullResult.Metadata)
})
}
func TestAlertRuleVersionToAlertRule(t *testing.T) {
g := ngmodels.RuleGen
-2
View File
@@ -188,8 +188,6 @@ type SearchOrgUsersQuery struct {
SortOpts []model.SortOption
// Flag used to allow oss edition to query users without access control
DontEnforceAccessControl bool
// Flag used to exclude hidden users from the result
ExcludeHiddenUsers bool
User identity.Requester
}
-1
View File
@@ -27,7 +27,6 @@ func ProvideService(db db.DB, cfg *setting.Cfg, quotaService quota.Service) (org
db: db,
dialect: db.GetDialect(),
log: log,
cfg: cfg,
},
cfg: cfg,
log: log,
-31
View File
@@ -8,7 +8,6 @@ import (
"strings"
"time"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/accesscontrol"
@@ -17,7 +16,6 @@ import (
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
)
@@ -55,7 +53,6 @@ type sqlStore struct {
//TODO: moved to service
log log.Logger
deletes []string
cfg *setting.Cfg
}
func (ss *sqlStore) Get(ctx context.Context, orgID int64) (*org.Org, error) {
@@ -563,14 +560,6 @@ func (ss *sqlStore) SearchOrgUsers(ctx context.Context, query *org.SearchOrgUser
whereParams = append(whereParams, acFilter.Args...)
}
if query.ExcludeHiddenUsers {
cond, params := buildHiddenUsersFilter(query.User, ss.cfg.HiddenUsers)
if cond != "" {
whereConditions = append(whereConditions, cond)
whereParams = append(whereParams, params...)
}
}
if query.Query != "" {
sql1, param1 := ss.dialect.LikeOperator("email", true, query.Query, true)
sql2, param2 := ss.dialect.LikeOperator("name", true, query.Query, true)
@@ -836,23 +825,3 @@ func removeUserOrg(sess *db.Session, userID int64) error {
func (ss *sqlStore) RegisterDelete(query string) {
ss.deletes = append(ss.deletes, query)
}
func buildHiddenUsersFilter(requester identity.Requester, hiddenUsersMap map[string]struct{}) (string, []any) {
if requester != nil && requester.GetIsGrafanaAdmin() {
return "", nil
}
hiddenUsers := make([]any, 0)
for user := range hiddenUsersMap {
if requester != nil && user == requester.GetLogin() {
continue
}
hiddenUsers = append(hiddenUsers, user)
}
if len(hiddenUsers) > 0 {
return "u.login NOT IN (?" + strings.Repeat(",?", len(hiddenUsers)-1) + ")", hiddenUsers
}
return "", nil
}
+6 -109
View File
@@ -820,9 +820,8 @@ func TestIntegration_SQLStore_SearchOrgUsers(t *testing.T) {
db: store,
dialect: store.GetDialect(),
log: log.NewNopLogger(),
cfg: cfg,
}
// orgUserStore.cfg.Skip
orgSvc, userSvc := createOrgAndUserSvc(t, store, cfg)
o, err := orgSvc.CreateWithMember(context.Background(), &org.CreateOrgCommand{Name: "test org"})
@@ -830,14 +829,6 @@ func TestIntegration_SQLStore_SearchOrgUsers(t *testing.T) {
seedOrgUsers(t, &orgUserStore, 10, userSvc, o.ID)
user1, err := userSvc.GetByLogin(context.Background(), &user.GetUserByLoginQuery{LoginOrEmail: "user-1"})
require.NoError(t, err)
cfg.HiddenUsers = map[string]struct{}{
"user-1": {},
"user-2": {},
}
tests := []struct {
desc string
query *org.SearchOrgUsersQuery
@@ -849,7 +840,7 @@ func TestIntegration_SQLStore_SearchOrgUsers(t *testing.T) {
OrgID: o.ID,
User: &user.SignedInUser{
OrgID: o.ID,
Permissions: map[int64]map[string][]string{o.ID: {accesscontrol.ActionOrgUsersRead: {accesscontrol.ScopeUsersAll}}},
Permissions: map[int64]map[string][]string{1: {accesscontrol.ActionOrgUsersRead: {accesscontrol.ScopeUsersAll}}},
},
},
expectedNumUsers: 10,
@@ -860,7 +851,7 @@ func TestIntegration_SQLStore_SearchOrgUsers(t *testing.T) {
OrgID: o.ID,
User: &user.SignedInUser{
OrgID: o.ID,
Permissions: map[int64]map[string][]string{o.ID: {accesscontrol.ActionOrgUsersRead: {""}}},
Permissions: map[int64]map[string][]string{1: {accesscontrol.ActionOrgUsersRead: {""}}},
},
},
expectedNumUsers: 0,
@@ -871,8 +862,8 @@ func TestIntegration_SQLStore_SearchOrgUsers(t *testing.T) {
OrgID: o.ID,
User: &user.SignedInUser{
OrgID: o.ID,
Permissions: map[int64]map[string][]string{o.ID: {accesscontrol.ActionOrgUsersRead: {
"users:id:2",
Permissions: map[int64]map[string][]string{1: {accesscontrol.ActionOrgUsersRead: {
"users:id:1",
"users:id:5",
"users:id:9",
}}},
@@ -880,55 +871,6 @@ func TestIntegration_SQLStore_SearchOrgUsers(t *testing.T) {
},
expectedNumUsers: 3,
},
{
desc: "should exclude hidden users when ExcludeHiddenUsers is true and user is nil",
query: &org.SearchOrgUsersQuery{
OrgID: o.ID,
ExcludeHiddenUsers: true,
User: nil,
DontEnforceAccessControl: true,
},
expectedNumUsers: 8,
},
{
desc: "should not exclude hidden users when ExcludeHiddenUsers is true and user is Grafana Admin",
query: &org.SearchOrgUsersQuery{
OrgID: o.ID,
ExcludeHiddenUsers: true,
User: &user.SignedInUser{
OrgID: o.ID,
IsGrafanaAdmin: true,
Permissions: map[int64]map[string][]string{o.ID: {accesscontrol.ActionOrgUsersRead: {accesscontrol.ScopeUsersAll}}},
},
},
expectedNumUsers: 10,
},
{
desc: "should return all users if ExcludeHiddenUsers is false",
query: &org.SearchOrgUsersQuery{
OrgID: o.ID,
ExcludeHiddenUsers: false,
User: &user.SignedInUser{
OrgID: o.ID,
Permissions: map[int64]map[string][]string{o.ID: {accesscontrol.ActionOrgUsersRead: {accesscontrol.ScopeUsersAll}}},
},
},
expectedNumUsers: 10,
},
{
desc: "should include the hidden user when the request is made by the hidden user and ExcludeHiddenUsers is true",
query: &org.SearchOrgUsersQuery{
OrgID: o.ID,
ExcludeHiddenUsers: true,
User: &user.SignedInUser{
UserID: user1.ID,
Login: user1.Login,
OrgID: o.ID,
Permissions: map[int64]map[string][]string{o.ID: {accesscontrol.ActionOrgUsersRead: {accesscontrol.ScopeUsersAll}}},
},
},
expectedNumUsers: 9,
},
}
for _, tt := range tests {
@@ -937,58 +879,13 @@ func TestIntegration_SQLStore_SearchOrgUsers(t *testing.T) {
require.NoError(t, err)
assert.Len(t, result.OrgUsers, tt.expectedNumUsers)
// No pagination is applied, so TotalCount should equal to number of returned users
assert.Equal(t, int64(tt.expectedNumUsers), result.TotalCount)
if tt.query.User != nil && !hasWildcardScope(tt.query.User, accesscontrol.ActionOrgUsersRead) && !tt.query.User.GetIsGrafanaAdmin() {
if !hasWildcardScope(tt.query.User, accesscontrol.ActionOrgUsersRead) {
for _, u := range result.OrgUsers {
assert.Contains(t, tt.query.User.GetPermissions()[accesscontrol.ActionOrgUsersRead], fmt.Sprintf("users:id:%d", u.UserID))
}
}
})
}
t.Run("should paginate correctly when ExcludeHiddenUsers is true", func(t *testing.T) {
query := &org.SearchOrgUsersQuery{
OrgID: o.ID,
ExcludeHiddenUsers: true,
User: &user.SignedInUser{
OrgID: o.ID,
Permissions: map[int64]map[string][]string{o.ID: {accesscontrol.ActionOrgUsersRead: {accesscontrol.ScopeUsersAll}}},
},
Limit: 5,
Page: 1,
}
result, err := orgUserStore.SearchOrgUsers(context.Background(), query)
require.NoError(t, err)
assert.Len(t, result.OrgUsers, 5)
assert.Equal(t, int64(8), result.TotalCount)
query.Page = 2
result, err = orgUserStore.SearchOrgUsers(context.Background(), query)
require.NoError(t, err)
assert.Len(t, result.OrgUsers, 3)
assert.Equal(t, int64(8), result.TotalCount)
})
t.Run("should return all users if HiddenUsers is empty", func(t *testing.T) {
oldHiddenUsers := cfg.HiddenUsers
cfg.HiddenUsers = make(map[string]struct{})
defer func() { cfg.HiddenUsers = oldHiddenUsers }()
query := &org.SearchOrgUsersQuery{
OrgID: o.ID,
ExcludeHiddenUsers: true,
User: &user.SignedInUser{
OrgID: o.ID,
Permissions: map[int64]map[string][]string{o.ID: {accesscontrol.ActionOrgUsersRead: {accesscontrol.ScopeUsersAll}}},
},
}
result, err := orgUserStore.SearchOrgUsers(context.Background(), query)
require.NoError(t, err)
assert.Len(t, result.OrgUsers, 10)
assert.Equal(t, int64(10), result.TotalCount)
})
}
func TestIntegration_SQLStore_RemoveOrgUser(t *testing.T) {
-8
View File
@@ -153,9 +153,6 @@ type UnifiedAlertingSettings struct {
// DeletedRuleRetention defines the maximum duration to retain deleted alerting rules before permanent removal.
DeletedRuleRetention time.Duration
// AlertmanagerMaxTemplateOutputSize specifies the maximum allowed size for rendered template output in bytes.
AlertmanagerMaxTemplateOutputSize int64
}
type RecordingRuleSettings struct {
@@ -586,11 +583,6 @@ func (cfg *Cfg) ReadUnifiedAlertingSettings(iniFile *ini.File) error {
return fmt.Errorf("setting 'deleted_rule_retention' is invalid, only 0 or a positive duration are allowed")
}
uaCfg.AlertmanagerMaxTemplateOutputSize = ua.Key("alertmanager_max_template_output_bytes").MustInt64(10485760)
if uaCfg.AlertmanagerMaxTemplateOutputSize < 0 {
return fmt.Errorf("setting 'alertmanager_max_template_output_bytes' is invalid, only 0 or a positive integer are allowed")
}
cfg.UnifiedAlerting = uaCfg
return nil
}
-30
View File
@@ -1346,34 +1346,4 @@ Key metrics for monitoring Unified Search:
- `unified_search_shadow_requests_total`: Shadow traffic request counts
- `unified_search_ring_members`: Number of active search server instances
## Data migrations
Unified storage includes an automated migration system that transfers resources from legacy SQL tables to unified storage. Migrations run automatically during Grafana startup when enabled.
### Supported resources
- Folders
- Dashboards
- Library panels
- Playlists
### Validation
Built-in validators ensure data integrity after migration:
- **CountValidator**: Verifies resource counts match between legacy and unified storage
- **FolderTreeValidator**: Validates folder parent-child relationships are preserved
### Configuration
Enable migrations in `grafana.ini`:
```ini
[unified_storage]
disable_data_migrations = false
```
### Documentation
For detailed information about migration architecture, validators, and troubleshooting, refer to [migrations/README.md](./migrations/README.md).
-122
View File
@@ -1,122 +0,0 @@
# Unified storage data migrations
Automated migration system for moving Grafana resources from legacy SQL storage to unified storage.
## Overview
The migration system transfers resources from legacy SQL tables to Grafana's unified storage backend. It runs automatically during Grafana startup and validates data integrity after each migration.
### Supported resources
| Resource | API Group | Legacy table |
|----------|-----------|--------------|
| Folders | `folder.grafana.app` | `dashboard` |
| Dashboards | `dashboard.grafana.app` | `dashboard` |
| Library panels | `dashboard.grafana.app` | `library_element` |
| Playlists | `playlist.grafana.app` | `playlist` |
## Architecture
```
┌─────────────────────────────────────────────────────────────┐
│ ResourceMigration │
│ (Orchestrates per-organization migration) │
└──────────────────────────┬──────────────────────────────────┘
┌───────────────────┼───────────────────┐
▼ ▼ ▼
UnifiedMigrator Validators BulkProcess API
(Stream legacy (Validate after (Write to unified
resources) migration) storage)
```
### Components
- **`service.go`**: Migration service entry point and registration
- **`migrator.go`**: Core migration logic using streaming BulkProcess API
- **`resource_migration.go`**: Per-organization migration execution
- **`validator.go`**: Post-migration validation (CountValidator, FolderTreeValidator)
- **`resources.go`**: Registry of migratable resource types
## How migrations work
### Migration flow
1. Grafana starts and checks migration status in `unifiedstorage_migration_log` table
2. For each organization, the migrator:
- Reads resources from legacy SQL tables
- Streams resources to unified storage via BulkProcess API
- Runs validators to verify data integrity
3. Records migration result in `unifiedstorage_migration_log` table
### Per-organization execution
Migrations run independently for each organization using namespace format `org-{orgId}`.
## Validators
### CountValidator
Compares resource counts between legacy SQL and unified storage. Accounts for rejected items during validation.
### FolderTreeValidator
Verifies folder parent-child relationships are preserved after migration.
## Configuration
To enable migrations, set the following in your Grafana configuration:
```ini
[unified_storage]
disable_data_migrations = false
```
## Monitoring
### Log messages
Successful migration:
```
info: storage.unified.resource_migration Starting migration for all organizations
info: storage.unified.resource_migration Migration completed successfully for all organizations
```
Failed migration:
```
error: storage.unified.resource_migration Migration validation failed
```
### Migration status
Query the migration log table to check status:
```sql
SELECT * FROM unifiedstorage_migration_log WHERE migration_id LIKE '%folders-dashboards%';
```
The `migration_id` is defined in `service.go` during registration. Ideally, it should be the resource type(s) being migrated.
## Development
### Adding a new validator
Implement the `Validator` interface:
```go
type Validator interface {
Name() string
Validate(ctx context.Context, sess *xorm.Session, response *resourcepb.BulkResponse, log log.Logger) error
}
```
Register the validator in `service.go` when creating the `ResourceMigration`.
### Adding a new resource type
1. Add the resource definition to `registeredResources` in `resources.go`
2. Implement the migrator function in the `MigrationDashboardAccessor` interface
3. Register the migration in `service.go`
@@ -867,86 +867,3 @@ func TestIntegrationProvisioning_DeleteRepositoryAndReleaseResources(t *testing.
}
}, time.Second*20, time.Millisecond*10, "Expected folders to be released")
}
func TestIntegrationProvisioning_JobPermissions(t *testing.T) {
testutil.SkipIntegrationTestInShortMode(t)
helper := runGrafana(t)
ctx := context.Background()
const repo = "job-permissions-test"
testRepo := TestRepo{
Name: repo,
Target: "folder",
Copies: map[string]string{}, // No files needed for this test
ExpectedDashboards: 0,
ExpectedFolders: 1, // Repository creates a folder
}
helper.CreateRepo(t, testRepo)
jobSpec := provisioning.JobSpec{
Action: provisioning.JobActionPull,
Pull: &provisioning.SyncJobOptions{},
}
body := asJSON(jobSpec)
t.Run("editor can POST jobs", func(t *testing.T) {
var statusCode int
result := helper.EditorREST.Post().
Namespace("default").
Resource("repositories").
Name(repo).
SubResource("jobs").
Body(body).
SetHeader("Content-Type", "application/json").
Do(ctx).StatusCode(&statusCode)
require.NoError(t, result.Error(), "editor should be able to POST jobs")
require.Equal(t, http.StatusAccepted, statusCode, "should return 202 Accepted")
// Verify the job was created
obj, err := result.Get()
require.NoError(t, err, "should get job object")
unstruct, ok := obj.(*unstructured.Unstructured)
require.True(t, ok, "expecting unstructured object")
require.NotEmpty(t, unstruct.GetName(), "job should have a name")
})
t.Run("viewer cannot POST jobs", func(t *testing.T) {
var statusCode int
result := helper.ViewerREST.Post().
Namespace("default").
Resource("repositories").
Name(repo).
SubResource("jobs").
Body(body).
SetHeader("Content-Type", "application/json").
Do(ctx).StatusCode(&statusCode)
require.Error(t, result.Error(), "viewer should not be able to POST jobs")
require.Equal(t, http.StatusForbidden, statusCode, "should return 403 Forbidden")
require.True(t, apierrors.IsForbidden(result.Error()), "error should be forbidden")
})
t.Run("admin can POST jobs", func(t *testing.T) {
var statusCode int
result := helper.AdminREST.Post().
Namespace("default").
Resource("repositories").
Name(repo).
SubResource("jobs").
Body(body).
SetHeader("Content-Type", "application/json").
Do(ctx).StatusCode(&statusCode)
// Job might already exist from previous test, which is acceptable
if apierrors.IsAlreadyExists(result.Error()) {
// Wait for the existing job to complete
helper.AwaitJobs(t, repo)
return
}
require.NoError(t, result.Error(), "admin should be able to POST jobs")
require.Equal(t, http.StatusAccepted, statusCode, "should return 202 Accepted")
})
}
-8
View File
@@ -20,18 +20,10 @@ type SearchRequest struct {
Aggs AggArray
CustomProps map[string]interface{}
TimeRange backend.TimeRange
// RawBody contains the raw Elasticsearch Query DSL JSON for raw DSL queries
// When set, this takes precedence over all other fields during marshaling
RawBody map[string]interface{}
}
// MarshalJSON returns the JSON encoding of the request.
func (r *SearchRequest) MarshalJSON() ([]byte, error) {
// If RawBody is set, use it directly for raw DSL queries
if len(r.RawBody) > 0 {
return json.Marshal(r.RawBody)
}
root := make(map[string]interface{})
root["size"] = r.Size
@@ -3,7 +3,6 @@ package es
import (
"bytes"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
@@ -26,9 +25,6 @@ func newRequestEncoder(logger log.Logger) *requestEncoder {
// encodeBatchRequests encodes multiple requests into NDJSON format
func (e *requestEncoder) encodeBatchRequests(requests []*multiRequest) ([]byte, error) {
start := time.Now()
defer func() {
e.logger.Debug("Completed encoding of batch requests to json", "duration", time.Since(start))
}()
payload := bytes.Buffer{}
for _, r := range requests {
@@ -38,25 +34,20 @@ func (e *requestEncoder) encodeBatchRequests(requests []*multiRequest) ([]byte,
}
payload.WriteString(string(reqHeader) + "\n")
body := ""
switch r.body.(type) {
case *SearchRequest:
reqBody, err := json.Marshal(r.body)
if err != nil {
return nil, err
}
body = string(reqBody)
case string:
body = r.body.(string)
default:
return nil, fmt.Errorf("unknown request type: %T", r.body)
reqBody, err := json.Marshal(r.body)
if err != nil {
return nil, err
}
body := string(reqBody)
body = strings.ReplaceAll(body, "$__interval_ms", strconv.FormatInt(r.interval.Milliseconds(), 10))
body = strings.ReplaceAll(body, "$__interval", r.interval.String())
payload.WriteString(body + "\n")
}
elapsed := time.Since(start)
e.logger.Debug("Completed encoding of batch requests to json", "duration", elapsed)
return payload.Bytes(), nil
}
@@ -30,8 +30,6 @@ type SearchRequestBuilder struct {
aggBuilders []AggBuilder
customProps map[string]any
timeRange backend.TimeRange
// rawBody contains the raw Elasticsearch Query DSL JSON for raw DSL queries
rawBody map[string]any
}
// NewSearchRequestBuilder create a new search request builder
@@ -55,12 +53,6 @@ func (b *SearchRequestBuilder) Build() (*SearchRequest, error) {
Size: b.size,
Sort: b.sort,
CustomProps: b.customProps,
RawBody: b.rawBody,
}
// If RawBody is set, skip building query and aggs as they're in the raw body
if len(b.rawBody) > 0 {
return &sr, nil
}
if b.queryBuilder != nil {
@@ -149,19 +141,6 @@ func (b *SearchRequestBuilder) AddSearchAfter(value any) *SearchRequestBuilder {
return b
}
// AddCustomProp adds a custom property to the search request
func (b *SearchRequestBuilder) AddCustomProp(key string, value any) *SearchRequestBuilder {
b.customProps[key] = value
return b
}
// SetRawBody sets the raw Elasticsearch Query DSL body directly
// This bypasses all builder logic and sends the query as-is to Elasticsearch
func (b *SearchRequestBuilder) SetRawBody(rawBody map[string]any) *SearchRequestBuilder {
b.rawBody = rawBody
return b
}
// Query creates and return a query builder
func (b *SearchRequestBuilder) Query() *QueryBuilder {
if b.queryBuilder == nil {
+5 -8
View File
@@ -20,12 +20,11 @@ const (
)
type elasticsearchDataQuery struct {
client es.Client
dataQueries []backend.DataQuery
logger log.Logger
ctx context.Context
keepLabelsInResponse bool
aggregationParserDSLRawQuery AggregationParser
client es.Client
dataQueries []backend.DataQuery
logger log.Logger
ctx context.Context
keepLabelsInResponse bool
}
var newElasticsearchDataQuery = func(ctx context.Context, client es.Client, req *backend.QueryDataRequest, logger log.Logger) *elasticsearchDataQuery {
@@ -40,8 +39,6 @@ var newElasticsearchDataQuery = func(ctx context.Context, client es.Client, req
// To maintain backward compatibility, it is necessary to keep labels in responses for alerting and expressions queries.
// Historically, these labels have been used in alerting rules and transformations.
keepLabelsInResponse: fromAlert || fromExpression,
aggregationParserDSLRawQuery: NewAggregationParser(),
}
}
@@ -1,7 +1,6 @@
package elasticsearch
import (
"encoding/json"
"fmt"
"strconv"
@@ -24,17 +23,6 @@ func (e *elasticsearchDataQuery) processQuery(q *Query, ms *es.MultiSearchReques
filters.AddDateRangeFilter(defaultTimeField, to, from, es.DateFormatEpochMS)
filters.AddQueryStringFilter(q.RawQuery, true)
if q.EditorType != nil && *q.EditorType == "code" && q.RawDSLQuery != "" {
cfg := backend.GrafanaConfigFromContext(e.ctx)
if !cfg.FeatureToggles().IsEnabled("elasticsearchRawDSLQuery") {
return backend.DownstreamError(fmt.Errorf("raw DSL query feature is disabled. Enable the elasticsearchRawDSLQuery feature toggle to use this query type"))
}
if err := e.processRawDSLQuery(q, b); err != nil {
return err
}
}
if isLogsQuery(q) {
processLogsQuery(q, b, from, to, defaultTimeField)
} else if isDocumentQuery(q) {
@@ -196,46 +184,6 @@ func processTimeSeriesQuery(q *Query, b *es.SearchRequestBuilder, from, to int64
}
}
func (e *elasticsearchDataQuery) processRawDSLQuery(q *Query, b *es.SearchRequestBuilder) error {
if q.RawDSLQuery == "" {
return backend.DownstreamError(fmt.Errorf("raw DSL query is empty"))
}
// Parse the raw DSL query JSON
var queryBody map[string]any
if err := json.Unmarshal([]byte(q.RawDSLQuery), &queryBody); err != nil {
return backend.DownstreamError(fmt.Errorf("invalid raw DSL query JSON: %w", err))
}
if len(q.Metrics) > 0 {
firstMetricType := q.Metrics[0].Type
if firstMetricType != logsType && firstMetricType != rawDataType && firstMetricType != rawDocumentType {
bucketAggs, metricAggs, err := e.aggregationParserDSLRawQuery.Parse(q.RawDSLQuery)
if err != nil {
return backend.DownstreamError(fmt.Errorf("failed to parse aggregations: %w", err))
}
// If there is no metric agg in the query, it is a count agg
if len(metricAggs) == 0 {
metricAggs = append(metricAggs, &MetricAgg{Type: "count"})
}
q.BucketAggs = bucketAggs
q.Metrics = metricAggs
if queryPart, ok := queryBody["query"].(map[string]any); ok {
queryJSON, _ := json.Marshal(queryPart)
q.RawQuery = string(queryJSON)
}
return nil
}
}
// For non-time-series queries (logs, raw data), pass through the raw body directly
b.SetRawBody(queryBody)
return nil
}
// getPipelineAggField returns the pipeline aggregation field
func getPipelineAggField(m *MetricAgg) string {
// In frontend we are using Field as pipelineAggField
+1 -99
View File
@@ -8,7 +8,6 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
"github.com/grafana/grafana-plugin-sdk-go/experimental/featuretoggles"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -1888,11 +1887,6 @@ func newDataQuery(body string) (backend.QueryDataRequest, error) {
}
func executeElasticsearchDataQuery(c es.Client, body string, from, to time.Time) (
*backend.QueryDataResponse, error) {
return executeElasticsearchDataQueryWithContext(c, body, from, to, context.Background())
}
func executeElasticsearchDataQueryWithContext(c es.Client, body string, from, to time.Time, ctx context.Context) (
*backend.QueryDataResponse, error) {
timeRange := backend.TimeRange{
From: from,
@@ -1907,98 +1901,6 @@ func executeElasticsearchDataQueryWithContext(c es.Client, body string, from, to
},
},
}
query := newElasticsearchDataQuery(ctx, c, &dataRequest, log.New())
query := newElasticsearchDataQuery(context.Background(), c, &dataRequest, log.New())
return query.execute()
}
func TestRawDSLQuery(t *testing.T) {
from := time.Date(2018, 5, 15, 17, 50, 0, 0, time.UTC)
to := time.Date(2018, 5, 15, 17, 55, 0, 0, time.UTC)
// Create context with raw DSL query feature toggle enabled
cfg := backend.NewGrafanaCfg(map[string]string{
featuretoggles.EnabledFeatures: "elasticsearchRawDSLQuery",
})
ctx := backend.WithGrafanaConfig(context.Background(), cfg)
t.Run("With raw DSL query", func(t *testing.T) {
t.Run("Basic raw DSL query with aggregations", func(t *testing.T) {
c := newFakeClient()
_, err := executeElasticsearchDataQueryWithContext(c, `{
"editorType": "code",
"rawDSLQuery": "{\"query\":{\"bool\":{\"filter\":[{\"range\":{\"@timestamp\":{\"gte\":1526405400000,\"lte\":1526405700000,\"format\":\"epoch_millis\"}}}]}},\"aggs\":{\"date_histogram\":{\"date_histogram\":{\"field\":\"@timestamp\",\"interval\":\"1m\"}}},\"size\":0}"
}`, from, to, ctx)
require.NoError(t, err)
require.Len(t, c.multisearchRequests, 1)
require.Len(t, c.multisearchRequests[0].Requests, 1)
sr := c.multisearchRequests[0].Requests[0]
// Verify RawBody contains the entire DSL query
require.NotNil(t, sr.RawBody)
require.Contains(t, sr.RawBody, "query")
require.Contains(t, sr.RawBody, "aggs")
// Verify size from raw body
size, ok := sr.RawBody["size"].(float64)
require.True(t, ok)
require.Equal(t, float64(0), size)
})
t.Run("Raw DSL query with query_string", func(t *testing.T) {
c := newFakeClient()
_, err := executeElasticsearchDataQueryWithContext(c, `{
"editorType": "code",
"rawDSLQuery": "{\"query\":{\"query_string\":{\"query\":\"status:200\",\"analyze_wildcard\":true}},\"size\":100}"
}`, from, to, ctx)
require.NoError(t, err)
require.Len(t, c.multisearchRequests, 1)
sr := c.multisearchRequests[0].Requests[0]
// Verify RawBody contains the entire DSL query
require.NotNil(t, sr.RawBody)
require.Contains(t, sr.RawBody, "query")
// Verify size from raw body
size, ok := sr.RawBody["size"].(float64)
require.True(t, ok)
require.Equal(t, float64(100), size)
// Verify query object exists in raw body
query, ok := sr.RawBody["query"].(map[string]any)
require.True(t, ok)
require.Contains(t, query, "query_string")
})
t.Run("Raw DSL query with sort", func(t *testing.T) {
c := newFakeClient()
_, err := executeElasticsearchDataQueryWithContext(c, `{
"editorType": "code",
"rawDSLQuery": "{\"query\":{\"match_all\":{}},\"sort\":[{\"@timestamp\":{\"order\":\"desc\"}}],\"size\":50}"
}`, from, to, ctx)
require.NoError(t, err)
require.Len(t, c.multisearchRequests, 1)
sr := c.multisearchRequests[0].Requests[0]
// Verify RawBody contains the entire DSL query
require.NotNil(t, sr.RawBody)
require.Contains(t, sr.RawBody, "query")
require.Contains(t, sr.RawBody, "sort")
// Verify sort in raw body
sort, ok := sr.RawBody["sort"].([]any)
require.True(t, ok)
require.NotEmpty(t, sort)
})
t.Run("Invalid JSON in raw DSL query returns error", func(t *testing.T) {
c := newFakeClient()
response, err := executeElasticsearchDataQueryWithContext(c, `{
"editorType": "code",
"rawDSLQuery": "{ invalid json }"
}`, from, to, ctx)
require.NoError(t, err)
require.NotNil(t, response.Responses["A"].Error)
require.Contains(t, response.Responses["A"].Error.Error(), "invalid raw DSL query JSON")
})
})
}
@@ -6,10 +6,6 @@ import (
// isQueryWithError validates the query and returns an error if invalid
func isQueryWithError(query *Query) error {
// Skip validation for raw DSL queries because no easy way to see it is valid without just running it
if query.EditorType != nil && *query.EditorType == "code" && query.RawDSLQuery != "" {
return nil
}
if len(query.BucketAggs) == 0 {
// If no aggregations, only document and logs queries are valid
if len(query.Metrics) == 0 || (!isLogsQuery(query) && !isDocumentQuery(query)) {

Some files were not shown because too many files have changed in this diff Show More