Compare commits

..

10 Commits

Author SHA1 Message Date
Alexander Zobnin 0aae7e01bc Zanzana: Add remote client metrics (#116012)
* Zanzana: Add remote client metrics

* fix linter
2026-01-08 15:24:54 +01:00
Will Assis 58e9e4a56d unified-storage: fixes for sqlkv to work with postgres (#115961)
* unified-storage: fixes for sqlkv to work with postgres
2026-01-08 08:21:35 -05:00
Matheus Macabu dff9bea3e8 Reporting: Add feature toggle for CSV encoding options (#115584) 2026-01-08 13:56:54 +01:00
Galen Kistler 19cfab89f3 Explore: Traces query that will work with either logs drilldown or explore (#115837)
* fix: use query that will work with either logs drilldown or explore
2026-01-08 06:55:01 -06:00
Mustafa Sencer Özcan 088bab8b38 feat: enable auto migration based on resource count (#115619)
* feat(unified): migration at startup based on resource count

-- draft

* feat: introduce auto migration enablement for dashboards & folders

* feat: enable auto migration based on threshold

* fix: improve

* fix: pass in the auto migrate per migration definition

* fix: minor

* fix: only use one options

* fix: test

* fix: test

* fix: tests

* fix: simplify configs

* chore: rename

* fix: add integration test

* fix: add integration test

* fix: integration tests

* chore: add comments

* fix: address comment

* fix: address comments

* fix: test and auto migration flow

* fix: test

---------

Co-authored-by: Rafael Paulovic <rafael.paulovic@grafana.com>
2026-01-08 13:30:40 +01:00
Sonia Aguilar 9e8bdee283 Alerting: Hide DMA options when no manageAlerts datasources exist (#115952)
* hide data source managed options in the more menu in the list view

* Hide type selector in the new alert form when no data source has mangeAlerts enabled
2026-01-08 13:17:37 +01:00
Gilles De Mey bb5bb00e4d Alerting: Rename alerts to alert activitity (#115948)
rename alerts to alert activitity
2026-01-08 11:48:27 +01:00
Misi 5fcc67837a IAM: Update ExternalGroupMapping authorizer (#115627)
* wip

* Add target resource authorizer to ExternalGroupMapping

* Regenerate OpenAPI snapshot

* Update pkg/registry/apis/iam/authorizer/external_group_mapping.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update pkg/registry/apis/iam/register.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Address feedback, reorganize

* Add tests to the public interface separately

* Address feedback

* Address feedback

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-01-08 11:47:00 +01:00
renovate-sh-app[bot] 79f2016a66 chore(deps): update dependency @openfeature/ofrep-web-provider to v0.3.5 (#115963)
| datasource | package                         | from  | to    |
| ---------- | ------------------------------- | ----- | ----- |
| npm        | @openfeature/ofrep-web-provider | 0.3.3 | 0.3.5 |

Signed-off-by: renovate-sh-app[bot] <219655108+renovate-sh-app[bot]@users.noreply.github.com>
Co-authored-by: renovate-sh-app[bot] <219655108+renovate-sh-app[bot]@users.noreply.github.com>
2026-01-08 09:49:04 +00:00
renovate-sh-app[bot] 7858dcb9c1 chore(deps): update dependency @openfeature/web-sdk to v1.7.2 (#115964)
| datasource | package              | from  | to    |
| ---------- | -------------------- | ----- | ----- |
| npm        | @openfeature/web-sdk | 1.7.1 | 1.7.2 |

Signed-off-by: renovate-sh-app[bot] <219655108+renovate-sh-app[bot]@users.noreply.github.com>
Co-authored-by: renovate-sh-app[bot] <219655108+renovate-sh-app[bot]@users.noreply.github.com>
2026-01-08 09:48:41 +00:00
48 changed files with 1433 additions and 248 deletions
+1 -1
View File
@@ -182,7 +182,7 @@ jobs:
with:
persist-credentials: false
- name: Setup Docker
uses: docker/setup-docker-action@e43656e248c0bd0647d3f5c195d116aacf6fcaf4 # v4
uses: docker/setup-docker-action@3fb92d6d9c634363128c8cce4bc3b2826526370a # v4
- name: Setup Node.js
uses: ./.github/actions/setup-node
- name: Install Tilt
+1 -1
View File
@@ -34,6 +34,6 @@ jobs:
uses: actions/checkout@v5
with:
persist-credentials: false
- uses: docker/setup-docker-action@e43656e248c0bd0647d3f5c195d116aacf6fcaf4 # v4
- uses: docker/setup-docker-action@3fb92d6d9c634363128c8cce4bc3b2826526370a # v4
- name: Build Dockerfile
run: make build-docker-full
@@ -5554,6 +5554,7 @@ export type ReportDashboard = {
};
export type Type = string;
export type ReportOptions = {
csvEncoding?: string;
layout?: string;
orientation?: string;
pdfCombineOneFile?: boolean;
+4
View File
@@ -207,6 +207,10 @@ export interface FeatureToggles {
*/
reportingRetries?: boolean;
/**
* Enables CSV encoding options in the reporting feature
*/
reportingCsvEncodingOptions?: boolean;
/**
* Send query to the same datasource in a single request when using server side expressions. The `cloudWatchBatchQueries` feature toggle should be enabled if this used with CloudWatch.
*/
sseGroupByDatasource?: boolean;
@@ -224,7 +224,7 @@ func (a *dashboardSqlAccess) CountResources(ctx context.Context, opts MigrateOpt
case "folder.grafana.app/folders":
summary := &resourcepb.BulkResponse_Summary{}
summary.Group = folders.GROUP
summary.Group = folders.RESOURCE
summary.Resource = folders.RESOURCE
_, err = sess.SQL("SELECT COUNT(*) FROM "+sql.Table("dashboard")+
" WHERE is_folder=TRUE AND org_id=?", orgId).Get(&summary.Count)
rsp.Summary = append(rsp.Summary, summary)
+1 -1
View File
@@ -53,7 +53,7 @@ func newIAMAuthorizer(
resourceAuthorizer[iamv0.RoleBindingInfo.GetName()] = authorizer
resourceAuthorizer[iamv0.ServiceAccountResourceInfo.GetName()] = authorizer
resourceAuthorizer[iamv0.UserResourceInfo.GetName()] = authorizer
resourceAuthorizer[iamv0.ExternalGroupMappingResourceInfo.GetName()] = authorizer
resourceAuthorizer[iamv0.ExternalGroupMappingResourceInfo.GetName()] = allowAuthorizer
resourceAuthorizer[iamv0.TeamResourceInfo.GetName()] = authorizer
resourceAuthorizer["searchUsers"] = serviceAuthorizer
resourceAuthorizer["searchTeams"] = serviceAuthorizer
@@ -0,0 +1,150 @@
package authorizer
import (
"context"
"fmt"
"github.com/grafana/authlib/types"
"k8s.io/apimachinery/pkg/runtime"
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/services/apiserver/auth/authorizer/storewrapper"
apierrors "k8s.io/apimachinery/pkg/api/errors"
)
type ExternalGroupMappingAuthorizer struct {
accessClient types.AccessClient
}
var _ storewrapper.ResourceStorageAuthorizer = (*ExternalGroupMappingAuthorizer)(nil)
func NewExternalGroupMappingAuthorizer(
accessClient types.AccessClient,
) *ExternalGroupMappingAuthorizer {
return &ExternalGroupMappingAuthorizer{
accessClient: accessClient,
}
}
// AfterGet implements ResourceStorageAuthorizer.
func (r *ExternalGroupMappingAuthorizer) AfterGet(ctx context.Context, obj runtime.Object) error {
authInfo, ok := types.AuthInfoFrom(ctx)
if !ok {
return storewrapper.ErrUnauthenticated
}
concreteObj, ok := obj.(*iamv0.ExternalGroupMapping)
if !ok {
return apierrors.NewInternalError(fmt.Errorf("expected ExternalGroupMapping, got %T: %w", obj, storewrapper.ErrUnexpectedType))
}
teamName := concreteObj.Spec.TeamRef.Name
checkReq := types.CheckRequest{
Namespace: authInfo.GetNamespace(),
Group: iamv0.GROUP,
Resource: iamv0.TeamResourceInfo.GetName(),
Verb: utils.VerbGetPermissions,
Name: teamName,
}
res, err := r.accessClient.Check(ctx, authInfo, checkReq, "")
if err != nil {
return apierrors.NewInternalError(err)
}
if !res.Allowed {
return apierrors.NewForbidden(
iamv0.ExternalGroupMappingResourceInfo.GroupResource(),
concreteObj.Name,
fmt.Errorf("user cannot access team %s", teamName),
)
}
return nil
}
// BeforeCreate implements ResourceStorageAuthorizer.
func (r *ExternalGroupMappingAuthorizer) BeforeCreate(ctx context.Context, obj runtime.Object) error {
return r.beforeWrite(ctx, obj)
}
// BeforeDelete implements ResourceStorageAuthorizer.
func (r *ExternalGroupMappingAuthorizer) BeforeDelete(ctx context.Context, obj runtime.Object) error {
return r.beforeWrite(ctx, obj)
}
// BeforeUpdate implements ResourceStorageAuthorizer.
func (r *ExternalGroupMappingAuthorizer) BeforeUpdate(ctx context.Context, obj runtime.Object) error {
// Update is not supported for ExternalGroupMapping resources and update attempts are blocked at a lower level,
// so this is just a safeguard.
return apierrors.NewMethodNotSupported(iamv0.ExternalGroupMappingResourceInfo.GroupResource(), "PUT/PATCH")
}
func (r *ExternalGroupMappingAuthorizer) beforeWrite(ctx context.Context, obj runtime.Object) error {
authInfo, ok := types.AuthInfoFrom(ctx)
if !ok {
return storewrapper.ErrUnauthenticated
}
concreteObj, ok := obj.(*iamv0.ExternalGroupMapping)
if !ok {
return apierrors.NewInternalError(fmt.Errorf("expected ExternalGroupMapping, got %T: %w", obj, storewrapper.ErrUnexpectedType))
}
teamName := concreteObj.Spec.TeamRef.Name
checkReq := types.CheckRequest{
Namespace: authInfo.GetNamespace(),
Group: iamv0.GROUP,
Resource: iamv0.TeamResourceInfo.GetName(),
Verb: utils.VerbSetPermissions,
Name: teamName,
}
res, err := r.accessClient.Check(ctx, authInfo, checkReq, "")
if err != nil {
return apierrors.NewInternalError(err)
}
if !res.Allowed {
return apierrors.NewForbidden(
iamv0.ExternalGroupMappingResourceInfo.GroupResource(),
concreteObj.Name,
fmt.Errorf("user cannot write team %s", teamName),
)
}
return nil
}
// FilterList implements ResourceStorageAuthorizer.
func (r *ExternalGroupMappingAuthorizer) FilterList(ctx context.Context, list runtime.Object) (runtime.Object, error) {
authInfo, ok := types.AuthInfoFrom(ctx)
if !ok {
return nil, storewrapper.ErrUnauthenticated
}
l, ok := list.(*iamv0.ExternalGroupMappingList)
if !ok {
return nil, apierrors.NewInternalError(fmt.Errorf("expected ExternalGroupMappingList, got %T: %w", list, storewrapper.ErrUnexpectedType))
}
var filteredItems []iamv0.ExternalGroupMapping
listReq := types.ListRequest{
Namespace: authInfo.GetNamespace(),
Group: iamv0.GROUP,
Resource: iamv0.TeamResourceInfo.GetName(),
Verb: utils.VerbGetPermissions,
}
canView, _, err := r.accessClient.Compile(ctx, authInfo, listReq)
if err != nil {
return nil, apierrors.NewInternalError(err)
}
for _, item := range l.Items {
if canView(item.Spec.TeamRef.Name, "") {
filteredItems = append(filteredItems, item)
}
}
l.Items = filteredItems
return l, nil
}
@@ -0,0 +1,229 @@
package authorizer
import (
"context"
"testing"
"github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/grafana/authlib/types"
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/utils"
)
func newExternalGroupMapping(teamName, name string) *iamv0.ExternalGroupMapping {
return &iamv0.ExternalGroupMapping{
ObjectMeta: metav1.ObjectMeta{Namespace: "org-2", Name: name},
Spec: iamv0.ExternalGroupMappingSpec{
TeamRef: iamv0.ExternalGroupMappingTeamRef{
Name: teamName,
},
},
}
}
func TestExternalGroupMapping_AfterGet(t *testing.T) {
mapping := newExternalGroupMapping("team-1", "mapping-1")
tests := []struct {
name string
shouldAllow bool
}{
{
name: "allow access",
shouldAllow: true,
},
{
name: "deny access",
shouldAllow: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
checkFunc := func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error) {
require.NotNil(t, id)
require.Equal(t, "user:u001", id.GetUID())
require.Equal(t, "org-2", id.GetNamespace())
require.Equal(t, "org-2", req.Namespace)
require.Equal(t, iamv0.GROUP, req.Group)
require.Equal(t, iamv0.TeamResourceInfo.GetName(), req.Resource)
require.Equal(t, "team-1", req.Name)
require.Equal(t, utils.VerbGetPermissions, req.Verb)
require.Equal(t, "", folder)
return types.CheckResponse{Allowed: tt.shouldAllow}, nil
}
accessClient := &fakeAccessClient{checkFunc: checkFunc}
authz := NewExternalGroupMappingAuthorizer(accessClient)
ctx := types.WithAuthInfo(context.Background(), user)
err := authz.AfterGet(ctx, mapping)
if tt.shouldAllow {
require.NoError(t, err)
} else {
require.Error(t, err)
}
require.True(t, accessClient.checkCalled)
})
}
}
func TestExternalGroupMapping_FilterList(t *testing.T) {
list := &iamv0.ExternalGroupMappingList{
Items: []iamv0.ExternalGroupMapping{
*newExternalGroupMapping("team-1", "mapping-1"),
*newExternalGroupMapping("team-2", "mapping-2"),
},
ListMeta: metav1.ListMeta{
SelfLink: "/apis/iam.grafana.app/v0alpha1/namespaces/org-2/externalgroupmappings",
},
}
compileFunc := func(id types.AuthInfo, req types.ListRequest) (types.ItemChecker, types.Zookie, error) {
require.NotNil(t, id)
require.Equal(t, "user:u001", id.GetUID())
require.Equal(t, "org-2", id.GetNamespace())
require.Equal(t, "org-2", req.Namespace)
require.Equal(t, iamv0.GROUP, req.Group)
require.Equal(t, iamv0.TeamResourceInfo.GetName(), req.Resource)
require.Equal(t, utils.VerbGetPermissions, req.Verb)
return func(name, folder string) bool {
return name == "team-1"
}, &types.NoopZookie{}, nil
}
accessClient := &fakeAccessClient{compileFunc: compileFunc}
authz := NewExternalGroupMappingAuthorizer(accessClient)
ctx := types.WithAuthInfo(context.Background(), user)
obj, err := authz.FilterList(ctx, list)
require.NoError(t, err)
require.NotNil(t, list)
require.True(t, accessClient.compileCalled)
filtered, ok := obj.(*iamv0.ExternalGroupMappingList)
require.True(t, ok)
require.Len(t, filtered.Items, 1)
require.Equal(t, "mapping-1", filtered.Items[0].Name)
}
func TestExternalGroupMapping_BeforeCreate(t *testing.T) {
mapping := newExternalGroupMapping("team-1", "mapping-1")
tests := []struct {
name string
shouldAllow bool
}{
{
name: "allow create",
shouldAllow: true,
},
{
name: "deny create",
shouldAllow: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
checkFunc := func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error) {
require.NotNil(t, id)
require.Equal(t, "user:u001", id.GetUID())
require.Equal(t, "org-2", id.GetNamespace())
require.Equal(t, "org-2", req.Namespace)
require.Equal(t, iamv0.GROUP, req.Group)
require.Equal(t, iamv0.TeamResourceInfo.GetName(), req.Resource)
require.Equal(t, "team-1", req.Name)
require.Equal(t, utils.VerbSetPermissions, req.Verb)
require.Equal(t, "", folder)
return types.CheckResponse{Allowed: tt.shouldAllow}, nil
}
accessClient := &fakeAccessClient{checkFunc: checkFunc}
authz := NewExternalGroupMappingAuthorizer(accessClient)
ctx := types.WithAuthInfo(context.Background(), user)
err := authz.BeforeCreate(ctx, mapping)
if tt.shouldAllow {
require.NoError(t, err)
} else {
require.Error(t, err)
}
require.True(t, accessClient.checkCalled)
})
}
}
func TestExternalGroupMapping_BeforeUpdate(t *testing.T) {
mapping := newExternalGroupMapping("team-1", "mapping-1")
accessClient := &fakeAccessClient{
checkFunc: func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error) {
require.Fail(t, "check should not be called")
return types.CheckResponse{}, nil
},
}
authz := NewExternalGroupMappingAuthorizer(accessClient)
ctx := types.WithAuthInfo(context.Background(), user)
err := authz.BeforeUpdate(ctx, mapping)
require.Error(t, err)
require.True(t, apierrors.IsMethodNotSupported(err))
require.Contains(t, err.Error(), "PUT/PATCH")
require.False(t, accessClient.checkCalled)
}
func TestExternalGroupMapping_BeforeDelete(t *testing.T) {
mapping := newExternalGroupMapping("team-1", "mapping-1")
tests := []struct {
name string
shouldAllow bool
}{
{
name: "allow delete",
shouldAllow: true,
},
{
name: "deny delete",
shouldAllow: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
checkFunc := func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error) {
require.NotNil(t, id)
require.Equal(t, "user:u001", id.GetUID())
require.Equal(t, "org-2", id.GetNamespace())
require.Equal(t, "org-2", req.Namespace)
require.Equal(t, iamv0.GROUP, req.Group)
require.Equal(t, iamv0.TeamResourceInfo.GetName(), req.Resource)
require.Equal(t, "team-1", req.Name)
require.Equal(t, utils.VerbSetPermissions, req.Verb)
require.Equal(t, "", folder)
return types.CheckResponse{Allowed: tt.shouldAllow}, nil
}
accessClient := &fakeAccessClient{checkFunc: checkFunc}
authz := NewExternalGroupMappingAuthorizer(accessClient)
ctx := types.WithAuthInfo(context.Background(), user)
err := authz.BeforeDelete(ctx, mapping)
if tt.shouldAllow {
require.NoError(t, err)
} else {
require.Error(t, err)
}
require.True(t, accessClient.checkCalled)
})
}
}
@@ -4,35 +4,15 @@ import (
"context"
"testing"
"github.com/go-jose/go-jose/v4/jwt"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/grafana/authlib/authn"
"github.com/grafana/authlib/types"
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/apimachinery/utils"
)
var (
user = authn.NewIDTokenAuthInfo(
authn.Claims[authn.AccessTokenClaims]{
Claims: jwt.Claims{Issuer: "grafana",
Subject: types.NewTypeID(types.TypeAccessPolicy, "grafana"), Audience: []string{"iam.grafana.app"}},
Rest: authn.AccessTokenClaims{
Namespace: "*",
Permissions: identity.ServiceIdentityClaims.Rest.Permissions,
DelegatedPermissions: identity.ServiceIdentityClaims.Rest.DelegatedPermissions,
},
}, &authn.Claims[authn.IDTokenClaims]{
Claims: jwt.Claims{Subject: types.NewTypeID(types.TypeUser, "u001")},
Rest: authn.IDTokenClaims{Namespace: "org-2", Identifier: "u001", Type: types.TypeUser},
},
)
)
func newResourcePermission(apiGroup, resource, name string) *iamv0.ResourcePermission {
return &iamv0.ResourcePermission{
ObjectMeta: metav1.ObjectMeta{Namespace: "org-2"},
@@ -222,26 +202,6 @@ func TestResourcePermissions_beforeWrite(t *testing.T) {
}
}
// fakeAccessClient is a mock implementation of claims.AccessClient
type fakeAccessClient struct {
checkCalled bool
checkFunc func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error)
compileCalled bool
compileFunc func(id types.AuthInfo, req types.ListRequest) (types.ItemChecker, types.Zookie, error)
}
func (m *fakeAccessClient) Check(ctx context.Context, id types.AuthInfo, req types.CheckRequest, folder string) (types.CheckResponse, error) {
m.checkCalled = true
return m.checkFunc(id, &req, folder)
}
func (m *fakeAccessClient) Compile(ctx context.Context, id types.AuthInfo, req types.ListRequest) (types.ItemChecker, types.Zookie, error) {
m.compileCalled = true
return m.compileFunc(id, req)
}
var _ types.AccessClient = (*fakeAccessClient)(nil)
type fakeParentProvider struct {
hasParent bool
getParentCalled bool
@@ -0,0 +1,48 @@
package authorizer
import (
"context"
"github.com/go-jose/go-jose/v4/jwt"
"github.com/grafana/authlib/authn"
"github.com/grafana/authlib/types"
"github.com/grafana/grafana/pkg/apimachinery/identity"
)
var (
// Shared test user identity
user = authn.NewIDTokenAuthInfo(
authn.Claims[authn.AccessTokenClaims]{
Claims: jwt.Claims{Issuer: "grafana",
Subject: types.NewTypeID(types.TypeAccessPolicy, "grafana"), Audience: []string{"iam.grafana.app"}},
Rest: authn.AccessTokenClaims{
Namespace: "*",
Permissions: identity.ServiceIdentityClaims.Rest.Permissions,
DelegatedPermissions: identity.ServiceIdentityClaims.Rest.DelegatedPermissions,
},
}, &authn.Claims[authn.IDTokenClaims]{
Claims: jwt.Claims{Subject: types.NewTypeID(types.TypeUser, "u001")},
Rest: authn.IDTokenClaims{Namespace: "org-2", Identifier: "u001", Type: types.TypeUser},
},
)
)
var _ types.AccessClient = (*fakeAccessClient)(nil)
// fakeAccessClient is a mock implementation of claims.AccessClient
type fakeAccessClient struct {
checkCalled bool
checkFunc func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error)
compileCalled bool
compileFunc func(id types.AuthInfo, req types.ListRequest) (types.ItemChecker, types.Zookie, error)
}
func (m *fakeAccessClient) Check(ctx context.Context, id types.AuthInfo, req types.CheckRequest, folder string) (types.CheckResponse, error) {
m.checkCalled = true
return m.checkFunc(id, &req, folder)
}
func (m *fakeAccessClient) Compile(ctx context.Context, id types.AuthInfo, req types.ListRequest) (types.ItemChecker, types.Zookie, error) {
m.compileCalled = true
return m.compileFunc(id, req)
}
+11 -2
View File
@@ -353,7 +353,8 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *ge
if err != nil {
return err
}
storage[extGroupMappingResource.StoragePath()] = extGroupMappingUniStore
var extGroupMappingStore storewrapper.K8sStorage = extGroupMappingUniStore
if b.externalGroupMappingStorage != nil {
extGroupMappingLegacyStore, err := NewLocalStore(extGroupMappingResource, apiGroupInfo.Scheme, opts.OptsGetter, b.reg, b.accessClient, b.externalGroupMappingStorage)
@@ -365,9 +366,17 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *ge
if err != nil {
return err
}
storage[extGroupMappingResource.StoragePath()] = dw
var ok bool
extGroupMappingStore, ok = dw.(storewrapper.K8sStorage)
if !ok {
return fmt.Errorf("expected storewrapper.K8sStorage, got %T", dw)
}
}
authzWrapper := storewrapper.New(extGroupMappingStore, iamauthorizer.NewExternalGroupMappingAuthorizer(b.accessClient))
storage[extGroupMappingResource.StoragePath()] = authzWrapper
//nolint:staticcheck // not yet migrated to OpenFeature
if b.features.IsEnabledGlobally(featuremgmt.FlagKubernetesAuthzApis) {
// v0alpha1
-21
View File
@@ -182,25 +182,6 @@ func newFolderTranslation() translation {
return folderTranslation
}
func newExternalGroupMappingTranslation() translation {
return translation{
resource: "teams.permissions",
attribute: "uid",
verbMapping: map[string]string{
utils.VerbGet: "teams.permissions:read",
utils.VerbList: "teams.permissions:read",
utils.VerbWatch: "teams.permissions:read",
utils.VerbCreate: "teams.permissions:write",
utils.VerbUpdate: "teams.permissions:write",
utils.VerbPatch: "teams.permissions:write",
utils.VerbDelete: "teams.permissions:write",
utils.VerbGetPermissions: "teams.permissions:write",
utils.VerbSetPermissions: "teams.permissions:write",
},
folderSupport: false,
}
}
func NewMapperRegistry() MapperRegistry {
skipScopeOnAllVerbs := map[string]bool{
utils.VerbCreate: true,
@@ -229,8 +210,6 @@ func NewMapperRegistry() MapperRegistry {
"serviceaccounts": newResourceTranslation("serviceaccounts", "uid", false, map[string]bool{utils.VerbCreate: true}),
// Teams is a special case. We translate user permissions from id to uid based.
"teams": newResourceTranslation("teams", "uid", false, map[string]bool{utils.VerbCreate: true}),
// ExternalGroupMappings is a special case. We translate team permissions from id to uid based.
"externalgroupmappings": newExternalGroupMappingTranslation(),
"coreroles": translation{
resource: "roles",
attribute: "uid",
+2 -2
View File
@@ -90,7 +90,7 @@ func ProvideZanzanaClient(cfg *setting.Cfg, db db.DB, tracer tracing.Tracer, fea
authzv1.RegisterAuthzServiceServer(channel, srv)
authzextv1.RegisterAuthzExtentionServiceServer(channel, srv)
client, err := zClient.New(channel)
client, err := zClient.New(channel, reg)
if err != nil {
return nil, fmt.Errorf("failed to initialize zanzana client: %w", err)
}
@@ -169,7 +169,7 @@ func NewRemoteZanzanaClient(cfg ZanzanaClientConfig, reg prometheus.Registerer)
return nil, fmt.Errorf("failed to create zanzana client to remote server: %w", err)
}
client, err := zClient.New(conn)
client, err := zClient.New(conn, reg)
if err != nil {
return nil, fmt.Errorf("failed to initialize zanzana client: %w", err)
}
+22 -1
View File
@@ -9,6 +9,7 @@ import (
authzlib "github.com/grafana/authlib/authz"
authzv1 "github.com/grafana/authlib/authz/proto/v1"
authlib "github.com/grafana/authlib/types"
"github.com/prometheus/client_golang/prometheus"
"github.com/grafana/grafana/pkg/infra/log"
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
@@ -25,15 +26,17 @@ type Client struct {
authz authzv1.AuthzServiceClient
authzext authzextv1.AuthzExtentionServiceClient
authzlibclient *authzlib.ClientImpl
metrics *clientMetrics
}
func New(cc grpc.ClientConnInterface) (*Client, error) {
func New(cc grpc.ClientConnInterface, reg prometheus.Registerer) (*Client, error) {
authzlibclient := authzlib.NewClient(cc, authzlib.WithTracerClientOption(tracer))
c := &Client{
authzlibclient: authzlibclient,
authz: authzv1.NewAuthzServiceClient(cc),
authzext: authzextv1.NewAuthzExtentionServiceClient(cc),
logger: log.New("zanzana.client"),
metrics: newClientMetrics(reg),
}
return c, nil
@@ -43,6 +46,9 @@ func (c *Client) Check(ctx context.Context, id authlib.AuthInfo, req authlib.Che
ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Check")
defer span.End()
timer := prometheus.NewTimer(c.metrics.requestDurationSeconds.WithLabelValues("Check", req.Namespace))
defer timer.ObserveDuration()
return c.authzlibclient.Check(ctx, id, req, folder)
}
@@ -50,6 +56,9 @@ func (c *Client) Compile(ctx context.Context, id authlib.AuthInfo, req authlib.L
ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Compile")
defer span.End()
timer := prometheus.NewTimer(c.metrics.requestDurationSeconds.WithLabelValues("Compile", req.Namespace))
defer timer.ObserveDuration()
return c.authzlibclient.Compile(ctx, id, req)
}
@@ -64,6 +73,9 @@ func (c *Client) Write(ctx context.Context, req *authzextv1.WriteRequest) error
ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Write")
defer span.End()
timer := prometheus.NewTimer(c.metrics.requestDurationSeconds.WithLabelValues("Write", req.Namespace))
defer timer.ObserveDuration()
_, err := c.authzext.Write(ctx, req)
return err
}
@@ -72,6 +84,9 @@ func (c *Client) BatchCheck(ctx context.Context, req *authzextv1.BatchCheckReque
ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Check")
defer span.End()
timer := prometheus.NewTimer(c.metrics.requestDurationSeconds.WithLabelValues("BatchCheck", req.Namespace))
defer timer.ObserveDuration()
return c.authzext.BatchCheck(ctx, req)
}
@@ -87,6 +102,9 @@ func (c *Client) Mutate(ctx context.Context, req *authzextv1.MutateRequest) erro
ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Mutate")
defer span.End()
timer := prometheus.NewTimer(c.metrics.requestDurationSeconds.WithLabelValues("Mutate", req.Namespace))
defer timer.ObserveDuration()
_, err := c.authzext.Mutate(ctx, req)
return err
}
@@ -95,5 +113,8 @@ func (c *Client) Query(ctx context.Context, req *authzextv1.QueryRequest) (*auth
ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Query")
defer span.End()
timer := prometheus.NewTimer(c.metrics.requestDurationSeconds.WithLabelValues("Query", req.Namespace))
defer timer.ObserveDuration()
return c.authzext.Query(ctx, req)
}
+24 -4
View File
@@ -7,10 +7,10 @@ import (
const (
metricsNamespace = "iam"
metricsSubSystem = "authz_zanzana"
metricsSubSystem = "authz_zanzana_client"
)
type metrics struct {
type shadowClientMetrics struct {
// evaluationsSeconds is a summary for evaluating access for a specific engine (RBAC and zanzana)
evaluationsSeconds *prometheus.HistogramVec
// compileSeconds is a summary for compiling item checker for a specific engine (RBAC and zanzana)
@@ -19,8 +19,13 @@ type metrics struct {
evaluationStatusTotal *prometheus.CounterVec
}
func newShadowClientMetrics(reg prometheus.Registerer) *metrics {
return &metrics{
type clientMetrics struct {
// requestDurationSeconds is a summary for zanzana client request duration
requestDurationSeconds *prometheus.HistogramVec
}
func newShadowClientMetrics(reg prometheus.Registerer) *shadowClientMetrics {
return &shadowClientMetrics{
evaluationsSeconds: promauto.With(reg).NewHistogramVec(
prometheus.HistogramOpts{
Name: "engine_evaluations_seconds",
@@ -52,3 +57,18 @@ func newShadowClientMetrics(reg prometheus.Registerer) *metrics {
),
}
}
func newClientMetrics(reg prometheus.Registerer) *clientMetrics {
return &clientMetrics{
requestDurationSeconds: promauto.With(reg).NewHistogramVec(
prometheus.HistogramOpts{
Name: "request_duration_seconds",
Help: "Histogram for zanzana client request duration",
Namespace: metricsNamespace,
Subsystem: metricsSubSystem,
Buckets: prometheus.ExponentialBuckets(0.00001, 4, 10),
},
[]string{"method", "request_namespace"},
),
}
}
@@ -20,7 +20,7 @@ type ShadowClient struct {
logger log.Logger
accessClient authlib.AccessClient
zanzanaClient authlib.AccessClient
metrics *metrics
metrics *shadowClientMetrics
}
// WithShadowClient returns a new access client that runs zanzana checks in the background.
+7
View File
@@ -322,6 +322,13 @@ var (
Owner: grafanaOperatorExperienceSquad,
RequiresRestart: true,
},
{
Name: "reportingCsvEncodingOptions",
Description: "Enables CSV encoding options in the reporting feature",
Stage: FeatureStageExperimental,
FrontendOnly: false,
Owner: grafanaOperatorExperienceSquad,
},
{
Name: "sseGroupByDatasource",
Description: "Send query to the same datasource in a single request when using server side expressions. The `cloudWatchBatchQueries` feature toggle should be enabled if this used with CloudWatch.",
+1
View File
@@ -43,6 +43,7 @@ configurableSchedulerTick,experimental,@grafana/alerting-squad,false,true,false
dashgpt,GA,@grafana/dashboards-squad,false,false,true
aiGeneratedDashboardChanges,experimental,@grafana/dashboards-squad,false,false,true
reportingRetries,preview,@grafana/grafana-operator-experience-squad,false,true,false
reportingCsvEncodingOptions,experimental,@grafana/grafana-operator-experience-squad,false,false,false
sseGroupByDatasource,experimental,@grafana/grafana-datasources-core-services,false,false,false
lokiRunQueriesInParallel,privatePreview,@grafana/observability-logs,false,false,false
externalServiceAccounts,preview,@grafana/identity-access-team,false,false,false
1 Name Stage Owner requiresDevMode RequiresRestart FrontendOnly
43 dashgpt GA @grafana/dashboards-squad false false true
44 aiGeneratedDashboardChanges experimental @grafana/dashboards-squad false false true
45 reportingRetries preview @grafana/grafana-operator-experience-squad false true false
46 reportingCsvEncodingOptions experimental @grafana/grafana-operator-experience-squad false false false
47 sseGroupByDatasource experimental @grafana/grafana-datasources-core-services false false false
48 lokiRunQueriesInParallel privatePreview @grafana/observability-logs false false false
49 externalServiceAccounts preview @grafana/identity-access-team false false false
+4
View File
@@ -135,6 +135,10 @@ const (
// Enables rendering retries for the reporting feature
FlagReportingRetries = "reportingRetries"
// FlagReportingCsvEncodingOptions
// Enables CSV encoding options in the reporting feature
FlagReportingCsvEncodingOptions = "reportingCsvEncodingOptions"
// FlagSseGroupByDatasource
// Send query to the same datasource in a single request when using server side expressions. The `cloudWatchBatchQueries` feature toggle should be enabled if this used with CloudWatch.
FlagSseGroupByDatasource = "sseGroupByDatasource"
+12
View File
@@ -3137,6 +3137,18 @@
"hideFromDocs": true
}
},
{
"metadata": {
"name": "reportingCsvEncodingOptions",
"resourceVersion": "1766080709938",
"creationTimestamp": "2025-12-18T17:58:29Z"
},
"spec": {
"description": "Enables CSV encoding options in the reporting feature",
"stage": "experimental",
"codeowner": "@grafana/grafana-operator-experience-squad"
}
},
{
"metadata": {
"name": "reportingRetries",
+1 -1
View File
@@ -440,7 +440,7 @@ func (s *ServiceImpl) buildAlertNavLinks(c *contextmodel.ReqContext) *navtree.Na
if s.features.IsEnabled(c.Req.Context(), featuremgmt.FlagAlertingTriage) {
if hasAccess(ac.EvalAny(ac.EvalPermission(ac.ActionAlertingRuleRead), ac.EvalPermission(ac.ActionAlertingRuleExternalRead))) {
alertChildNavs = append(alertChildNavs, &navtree.NavLink{
Text: "Alerts", SubTitle: "Visualize active and pending alerts", Id: "alert-alerts", Url: s.cfg.AppSubURL + "/alerting/alerts", Icon: "bell", IsNew: true,
Text: "Alert activity", SubTitle: "Visualize active and pending alerts", Id: "alert-alerts", Url: s.cfg.AppSubURL + "/alerting/alerts", Icon: "bell", IsNew: true,
})
}
}
+2
View File
@@ -637,6 +637,8 @@ type UnifiedStorageConfig struct {
// EnableMigration indicates whether migration is enabled for the resource.
// If not set, will use the default from MigratedUnifiedResources.
EnableMigration bool
// AutoMigrationThreshold is the threshold below which a resource is automatically migrated.
AutoMigrationThreshold int
}
type InstallPlugin struct {
+44 -4
View File
@@ -8,6 +8,10 @@ import (
"github.com/grafana/grafana/pkg/util/osutil"
)
// DefaultAutoMigrationThreshold is the default threshold for auto migration switching.
// If a resource has entries at or below this count, it will be migrated.
const DefaultAutoMigrationThreshold = 10
const (
PlaylistResource = "playlists.playlist.grafana.app"
FolderResource = "folders.folder.grafana.app"
@@ -21,6 +25,13 @@ var MigratedUnifiedResources = map[string]bool{
DashboardResource: false,
}
// AutoMigratedUnifiedResources maps resources that support auto-migration
// TODO: remove this before Grafana 13 GA: https://github.com/grafana/search-and-storage-team/issues/613
var AutoMigratedUnifiedResources = map[string]bool{
FolderResource: true,
DashboardResource: true,
}
// read storage configs from ini file. They look like:
// [unified_storage.<group>.<resource>]
// <field> = <value>
@@ -59,6 +70,13 @@ func (cfg *Cfg) setUnifiedStorageConfig() {
enableMigration = section.Key("enableMigration").MustBool(MigratedUnifiedResources[resourceName])
}
// parse autoMigrationThreshold from resource section
autoMigrationThreshold := 0
autoMigrate := AutoMigratedUnifiedResources[resourceName]
if autoMigrate {
autoMigrationThreshold = section.Key("autoMigrationThreshold").MustInt(DefaultAutoMigrationThreshold)
}
storageConfig[resourceName] = UnifiedStorageConfig{
DualWriterMode: rest.DualWriterMode(dualWriterMode),
DualWriterPeriodicDataSyncJobEnabled: dualWriterPeriodicDataSyncJobEnabled,
@@ -66,6 +84,7 @@ func (cfg *Cfg) setUnifiedStorageConfig() {
DataSyncerRecordsLimit: dataSyncerRecordsLimit,
DataSyncerInterval: dataSyncerInterval,
EnableMigration: enableMigration,
AutoMigrationThreshold: autoMigrationThreshold,
}
}
cfg.UnifiedStorage = storageConfig
@@ -73,13 +92,13 @@ func (cfg *Cfg) setUnifiedStorageConfig() {
// Set indexer config for unified storage
section := cfg.Raw.Section("unified_storage")
cfg.DisableDataMigrations = section.Key("disable_data_migrations").MustBool(false)
if !cfg.DisableDataMigrations && cfg.getUnifiedStorageType() == "unified" {
if !cfg.DisableDataMigrations && cfg.UnifiedStorageType() == "unified" {
// Helper log to find instances running migrations in the future
cfg.Logger.Info("Unified migration configs enforced")
cfg.enforceMigrationToUnifiedConfigs()
} else {
// Helper log to find instances disabling migration
cfg.Logger.Info("Unified migration configs enforcement disabled", "storage_type", cfg.getUnifiedStorageType(), "disable_data_migrations", cfg.DisableDataMigrations)
cfg.Logger.Info("Unified migration configs enforcement disabled", "storage_type", cfg.UnifiedStorageType(), "disable_data_migrations", cfg.DisableDataMigrations)
}
cfg.EnableSearch = section.Key("enable_search").MustBool(false)
cfg.MaxPageSizeBytes = section.Key("max_page_size_bytes").MustInt(0)
@@ -147,14 +166,15 @@ func (cfg *Cfg) enforceMigrationToUnifiedConfigs() {
DualWriterMode: 5,
DualWriterMigrationDataSyncDisabled: true,
EnableMigration: true,
AutoMigrationThreshold: resourceCfg.AutoMigrationThreshold,
}
}
}
// getUnifiedStorageType returns the configured storage type without creating or mutating keys.
// UnifiedStorageType returns the configured storage type without creating or mutating keys.
// Precedence: env > ini > default ("unified").
// Used to decide unified storage behavior early without side effects.
func (cfg *Cfg) getUnifiedStorageType() string {
func (cfg *Cfg) UnifiedStorageType() string {
const (
grafanaAPIServerSectionName = "grafana-apiserver"
storageTypeKeyName = "storage_type"
@@ -168,3 +188,23 @@ func (cfg *Cfg) getUnifiedStorageType() string {
}
return defaultStorageType
}
// UnifiedStorageConfig returns the UnifiedStorageConfig for a resource.
func (cfg *Cfg) UnifiedStorageConfig(resource string) UnifiedStorageConfig {
if cfg.UnifiedStorage == nil {
return UnifiedStorageConfig{}
}
return cfg.UnifiedStorage[resource]
}
// EnableMode5 enables migration and sets mode 5 for a resource.
func (cfg *Cfg) EnableMode5(resource string) {
if cfg.UnifiedStorage == nil {
cfg.UnifiedStorage = make(map[string]UnifiedStorageConfig)
}
config := cfg.UnifiedStorage[resource]
config.DualWriterMode = rest.Mode5
config.DualWriterMigrationDataSyncDisabled = true
config.EnableMigration = true
cfg.UnifiedStorage[resource] = config
}
@@ -43,10 +43,16 @@ func TestCfg_setUnifiedStorageConfig(t *testing.T) {
}
assert.Equal(t, exists, true, migratedResource)
expectedThreshold := 0
if AutoMigratedUnifiedResources[migratedResource] {
expectedThreshold = DefaultAutoMigrationThreshold
}
assert.Equal(t, UnifiedStorageConfig{
DualWriterMode: 5,
DualWriterMigrationDataSyncDisabled: true,
EnableMigration: isEnabled,
AutoMigrationThreshold: expectedThreshold,
}, resourceCfg, migratedResource)
}
}
@@ -71,6 +77,7 @@ func TestCfg_setUnifiedStorageConfig(t *testing.T) {
DualWriterPeriodicDataSyncJobEnabled: true,
DataSyncerRecordsLimit: 1001,
DataSyncerInterval: time.Minute * 10,
AutoMigrationThreshold: 0,
})
validateMigratedResources(false)
@@ -214,8 +214,18 @@ func runMigrationTestSuite(t *testing.T, testCases []resourceMigratorTestCase) {
for _, state := range testStates {
t.Run(state.tc.name(), func(t *testing.T) {
// Verify resources now exist in unified storage after migration
state.tc.verify(t, helper, true)
shouldExist := true
for _, gvr := range state.tc.resources() {
resourceKey := fmt.Sprintf("%s.%s", gvr.Resource, gvr.Group)
// Resources exist if they're either:
// 1. In MigratedUnifiedResources (enabled by default), OR
// 2. In AutoMigratedUnifiedResources (auto-migrated because count is below threshold)
if !setting.MigratedUnifiedResources[resourceKey] && !setting.AutoMigratedUnifiedResources[resourceKey] {
shouldExist = false
break
}
}
state.tc.verify(t, helper, shouldExist)
})
}
@@ -270,7 +280,7 @@ const (
var migrationIDsToDefault = map[string]bool{
playlistsID: true,
foldersAndDashboardsID: false,
foldersAndDashboardsID: true, // Auto-migrated when resource count is below threshold
}
func verifyRegisteredMigrations(t *testing.T, helper *apis.K8sTestHelper, onlyDefault bool, optOut bool) {
@@ -10,9 +10,11 @@ import (
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/registry/apis/dashboard/legacy"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
"github.com/grafana/grafana/pkg/util/xorm"
"github.com/grafana/grafana/pkg/util/xorm/core"
"k8s.io/apimachinery/pkg/runtime/schema"
)
@@ -31,6 +33,20 @@ type ResourceMigration struct {
migrationID string
validators []Validator // Optional: custom validation logic for this migration
log log.Logger
cfg *setting.Cfg
autoMigrate bool // If true, auto-migrate resource if count is below threshold
hadErrors bool // Tracks if errors occurred during migration (used with ignoreErrors)
}
// ResourceMigrationOption is a functional option for configuring ResourceMigration.
type ResourceMigrationOption func(*ResourceMigration)
// WithAutoMigrate configures the migration to auto-migrate resource if count is below threshold.
func WithAutoMigrate(cfg *setting.Cfg) ResourceMigrationOption {
return func(m *ResourceMigration) {
m.cfg = cfg
m.autoMigrate = true
}
}
// NewResourceMigration creates a new migration for the specified resources.
@@ -39,14 +55,24 @@ func NewResourceMigration(
resources []schema.GroupResource,
migrationID string,
validators []Validator,
opts ...ResourceMigrationOption,
) *ResourceMigration {
return &ResourceMigration{
m := &ResourceMigration{
migrator: migrator,
resources: resources,
migrationID: migrationID,
validators: validators,
log: log.New("storage.unified.resource_migration." + migrationID),
}
for _, opt := range opts {
opt(m)
}
return m
}
func (m *ResourceMigration) SkipMigrationLog() bool {
// Skip populating the log table if auto-migrate is enabled and errors occurred
return m.autoMigrate && m.hadErrors
}
var _ migrator.CodeMigration = (*ResourceMigration)(nil)
@@ -57,7 +83,23 @@ func (m *ResourceMigration) SQL(_ migrator.Dialect) string {
}
// Exec implements migrator.CodeMigration interface. Executes the migration across all organizations.
func (m *ResourceMigration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
func (m *ResourceMigration) Exec(sess *xorm.Session, mg *migrator.Migrator) (err error) {
// Track any errors that occur during migration
defer func() {
if err != nil {
if m.autoMigrate {
m.log.Warn(
`[WARN] Resource migration failed and is currently skipped.
This migration will be enforced in the next major Grafana release, where failures will block startup or resource loading.
This warning is intended to help you detect and report issues early.
Please investigate the failure and report it to the Grafana team so it can be addressed before the next major release.`,
"error", err)
}
m.hadErrors = true
}
}()
ctx := context.Background()
orgs, err := m.getAllOrgs(sess)
@@ -75,7 +117,8 @@ func (m *ResourceMigration) Exec(sess *xorm.Session, mg *migrator.Migrator) erro
if mg.Dialect.DriverName() == migrator.SQLite {
// reuse transaction in SQLite to avoid "database is locked" errors
tx, err := sess.Tx()
var tx *core.Tx
tx, err = sess.Tx()
if err != nil {
m.log.Error("Failed to get transaction from session", "error", err)
return fmt.Errorf("failed to get transaction: %w", err)
@@ -85,12 +128,22 @@ func (m *ResourceMigration) Exec(sess *xorm.Session, mg *migrator.Migrator) erro
}
for _, org := range orgs {
if err := m.migrateOrg(ctx, sess, org); err != nil {
if err = m.migrateOrg(ctx, sess, org); err != nil {
return err
}
}
// Auto-enable mode 5 for resources after successful migration
// TODO: remove this before Grafana 13 GA: https://github.com/grafana/search-and-storage-team/issues/613
if m.autoMigrate {
for _, gr := range m.resources {
m.log.Info("Auto-enabling mode 5 for resource", "resource", gr.Resource+"."+gr.Group)
m.cfg.EnableMode5(gr.Resource + "." + gr.Group)
}
}
m.log.Info("Migration completed successfully for all organizations", "org_count", len(orgs))
return nil
}
+222 -36
View File
@@ -1,11 +1,13 @@
package migrations
import (
"context"
"fmt"
v1beta1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1"
folders "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1"
playlists "github.com/grafana/grafana/apps/playlist/pkg/apis/playlist/v0alpha1"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/registry/apis/dashboard/legacy"
sqlstoremigrator "github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"github.com/grafana/grafana/pkg/setting"
@@ -14,69 +16,70 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
type ResourceDefinition struct {
GroupResource schema.GroupResource
MigratorFunc string // Name of the method: "MigrateFolders", "MigrateDashboards", etc.
type resourceDefinition struct {
groupResource schema.GroupResource
migratorFunc string // Name of the method: "MigrateFolders", "MigrateDashboards", etc.
}
type migrationDefinition struct {
name string
migrationID string // The ID stored in the migration log table (e.g., "playlists migration")
resources []string
registerFunc func(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient)
registerFunc func(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient, opts ...ResourceMigrationOption)
}
var resourceRegistry = []ResourceDefinition{
var resourceRegistry = []resourceDefinition{
{
GroupResource: schema.GroupResource{Group: folders.GROUP, Resource: folders.RESOURCE},
MigratorFunc: "MigrateFolders",
groupResource: schema.GroupResource{Group: folders.GROUP, Resource: folders.RESOURCE},
migratorFunc: "MigrateFolders",
},
{
GroupResource: schema.GroupResource{Group: v1beta1.GROUP, Resource: v1beta1.LIBRARY_PANEL_RESOURCE},
MigratorFunc: "MigrateLibraryPanels",
groupResource: schema.GroupResource{Group: v1beta1.GROUP, Resource: v1beta1.LIBRARY_PANEL_RESOURCE},
migratorFunc: "MigrateLibraryPanels",
},
{
GroupResource: schema.GroupResource{Group: v1beta1.GROUP, Resource: v1beta1.DASHBOARD_RESOURCE},
MigratorFunc: "MigrateDashboards",
groupResource: schema.GroupResource{Group: v1beta1.GROUP, Resource: v1beta1.DASHBOARD_RESOURCE},
migratorFunc: "MigrateDashboards",
},
{
GroupResource: schema.GroupResource{Group: playlists.APIGroup, Resource: "playlists"},
MigratorFunc: "MigratePlaylists",
groupResource: schema.GroupResource{Group: playlists.APIGroup, Resource: "playlists"},
migratorFunc: "MigratePlaylists",
},
}
var migrationRegistry = []migrationDefinition{
{
name: "playlists",
migrationID: "playlists migration",
resources: []string{setting.PlaylistResource},
registerFunc: registerPlaylistMigration,
},
{
name: "folders and dashboards",
migrationID: "folders and dashboards migration",
resources: []string{setting.FolderResource, setting.DashboardResource},
registerFunc: registerDashboardAndFolderMigration,
},
}
func registerMigrations(cfg *setting.Cfg, mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient) error {
func registerMigrations(ctx context.Context,
cfg *setting.Cfg,
mg *sqlstoremigrator.Migrator,
migrator UnifiedMigrator,
client resource.ResourceClient,
sqlStore db.DB,
) error {
for _, migration := range migrationRegistry {
var (
hasValue bool
allEnabled bool
)
for _, res := range migration.resources {
enabled := cfg.UnifiedStorage[res].EnableMigration
if !hasValue {
allEnabled = enabled
hasValue = true
continue
}
if enabled != allEnabled {
return fmt.Errorf("cannot migrate resources separately: %v migration must be either all enabled or all disabled", migration.resources)
}
if shouldAutoMigrate(ctx, migration, cfg, sqlStore) {
migration.registerFunc(mg, migrator, client, WithAutoMigrate(cfg))
continue
}
if !allEnabled {
enabled, err := isMigrationEnabled(migration, cfg)
if err != nil {
return err
}
if !enabled {
logger.Info("Migration is disabled in config, skipping", "migration", migration.name)
continue
}
@@ -85,10 +88,193 @@ func registerMigrations(cfg *setting.Cfg, mg *sqlstoremigrator.Migrator, migrato
return nil
}
func getResourceDefinition(group, resource string) *ResourceDefinition {
func registerDashboardAndFolderMigration(mg *sqlstoremigrator.Migrator,
migrator UnifiedMigrator,
client resource.ResourceClient,
opts ...ResourceMigrationOption,
) {
foldersDef := getResourceDefinition("folder.grafana.app", "folders")
dashboardsDef := getResourceDefinition("dashboard.grafana.app", "dashboards")
driverName := mg.Dialect.DriverName()
folderCountValidator := NewCountValidator(
client,
foldersDef.groupResource,
"dashboard",
"org_id = ? and is_folder = true",
driverName,
)
dashboardCountValidator := NewCountValidator(
client,
dashboardsDef.groupResource,
"dashboard",
"org_id = ? and is_folder = false",
driverName,
)
folderTreeValidator := NewFolderTreeValidator(client, foldersDef.groupResource, driverName)
dashboardsAndFolders := NewResourceMigration(
migrator,
[]schema.GroupResource{foldersDef.groupResource, dashboardsDef.groupResource},
"folders-dashboards",
[]Validator{folderCountValidator, dashboardCountValidator, folderTreeValidator},
opts...,
)
mg.AddMigration("folders and dashboards migration", dashboardsAndFolders)
}
func registerPlaylistMigration(mg *sqlstoremigrator.Migrator,
migrator UnifiedMigrator,
client resource.ResourceClient,
opts ...ResourceMigrationOption,
) {
playlistsDef := getResourceDefinition("playlist.grafana.app", "playlists")
driverName := mg.Dialect.DriverName()
playlistCountValidator := NewCountValidator(
client,
playlistsDef.groupResource,
"playlist",
"org_id = ?",
driverName,
)
playlistsMigration := NewResourceMigration(
migrator,
[]schema.GroupResource{playlistsDef.groupResource},
"playlists",
[]Validator{playlistCountValidator},
opts...,
)
mg.AddMigration("playlists migration", playlistsMigration)
}
// TODO: remove this before Grafana 13 GA: https://github.com/grafana/search-and-storage-team/issues/613
func shouldAutoMigrate(ctx context.Context, migration migrationDefinition, cfg *setting.Cfg, sqlStore db.DB) bool {
autoMigrate := false
for _, res := range migration.resources {
config := cfg.UnifiedStorageConfig(res)
if config.DualWriterMode == 5 {
return false
}
if !setting.AutoMigratedUnifiedResources[res] {
continue
}
if checkIfAlreadyMigrated(ctx, migration, sqlStore) {
for _, res := range migration.resources {
cfg.EnableMode5(res)
}
logger.Info("Auto-migration already completed, enabling mode 5 for resources", "migration", migration.name)
return true
}
autoMigrate = true
threshold := int64(setting.DefaultAutoMigrationThreshold)
if config.AutoMigrationThreshold > 0 {
threshold = int64(config.AutoMigrationThreshold)
}
count, err := countResource(ctx, sqlStore, res)
if err != nil {
logger.Warn("Failed to count resource for auto migration check", "resource", res, "error", err)
return false
}
logger.Info("Resource count for auto migration check", "resource", res, "count", count, "threshold", threshold)
if count > threshold {
return false
}
}
if !autoMigrate {
return false
}
logger.Info("Auto-migration enabled for migration", "migration", migration.name)
return true
}
func checkIfAlreadyMigrated(ctx context.Context, migration migrationDefinition, sqlStore db.DB) bool {
if migration.migrationID == "" {
return false
}
exists, err := migrationExists(ctx, sqlStore, migration.migrationID)
if err != nil {
logger.Warn("Failed to check if migration exists", "migration", migration.name, "error", err)
return false
}
return exists
}
func isMigrationEnabled(migration migrationDefinition, cfg *setting.Cfg) (bool, error) {
var (
hasValue bool
allEnabled bool
)
for _, res := range migration.resources {
enabled := cfg.UnifiedStorage[res].EnableMigration
if !hasValue {
allEnabled = enabled
hasValue = true
continue
}
if enabled != allEnabled {
return false, fmt.Errorf("cannot migrate resources separately: %v migration must be either all enabled or all disabled", migration.resources)
}
}
return allEnabled, nil
}
// TODO: remove this before Grafana 13 GA: https://github.com/grafana/search-and-storage-team/issues/613
func countResource(ctx context.Context, sqlStore db.DB, resourceName string) (int64, error) {
var count int64
err := sqlStore.WithDbSession(ctx, func(sess *db.Session) error {
switch resourceName {
case setting.DashboardResource:
var err error
count, err = sess.Table("dashboard").Where("is_folder = ?", false).Count()
return err
case setting.FolderResource:
var err error
count, err = sess.Table("dashboard").Where("is_folder = ?", true).Count()
return err
default:
return fmt.Errorf("unknown resource: %s", resourceName)
}
})
return count, err
}
const migrationLogTableName = "unifiedstorage_migration_log"
func migrationExists(ctx context.Context, sqlStore db.DB, migrationID string) (bool, error) {
var count int64
err := sqlStore.WithDbSession(ctx, func(sess *db.Session) error {
var err error
count, err = sess.Table(migrationLogTableName).Where("migration_id = ?", migrationID).Count()
return err
})
if err != nil {
return false, fmt.Errorf("failed to check migration existence: %w", err)
}
return count > 0, nil
}
func getResourceDefinition(group, resource string) *resourceDefinition {
for i := range resourceRegistry {
r := &resourceRegistry[i]
if r.GroupResource.Group == group && r.GroupResource.Resource == resource {
if r.groupResource.Group == group && r.groupResource.Resource == resource {
return r
}
}
@@ -102,8 +288,8 @@ func buildResourceKey(group, resource, namespace string) *resourcepb.ResourceKey
}
return &resourcepb.ResourceKey{
Namespace: namespace,
Group: def.GroupResource.Group,
Resource: def.GroupResource.Resource,
Group: def.groupResource.Group,
Resource: def.groupResource.Resource,
}
}
@@ -113,7 +299,7 @@ func getMigratorFunc(accessor legacy.MigrationDashboardAccessor, group, resource
return nil
}
switch def.MigratorFunc {
switch def.migratorFunc {
case "MigrateFolders":
return accessor.MigrateFolders
case "MigrateLibraryPanels":
@@ -130,7 +316,7 @@ func getMigratorFunc(accessor legacy.MigrationDashboardAccessor, group, resource
func validateRegisteredResources() error {
registeredMap := make(map[string]bool)
for _, gr := range resourceRegistry {
key := fmt.Sprintf("%s.%s", gr.GroupResource.Resource, gr.GroupResource.Group)
key := fmt.Sprintf("%s.%s", gr.groupResource.Resource, gr.groupResource.Group)
registeredMap[key] = true
}
@@ -1,12 +1,15 @@
package migrations
import (
"context"
"strings"
"testing"
sqlstoremigrator "github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// TestRegisterMigrations exercises registerMigrations with various EnableMigration configs using a table-driven test.
@@ -14,20 +17,28 @@ func TestRegisterMigrations(t *testing.T) {
origRegistry := migrationRegistry
t.Cleanup(func() { migrationRegistry = origRegistry })
// Use fake resource names that are NOT in setting.AutoMigratedUnifiedResources
// to avoid triggering the auto-migrate code path which requires a non-nil sqlStore.
const (
fakePlaylistResource = "fake.playlists.resource"
fakeFolderResource = "fake.folders.resource"
fakeDashboardResource = "fake.dashboards.resource"
)
// helper to build a fake registry with custom register funcs that bump counters
makeFakeRegistry := func(migrationCalls map[string]int) []migrationDefinition {
return []migrationDefinition{
{
name: "playlists",
resources: []string{setting.PlaylistResource},
registerFunc: func(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient) {
resources: []string{fakePlaylistResource},
registerFunc: func(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient, opts ...ResourceMigrationOption) {
migrationCalls["playlists"]++
},
},
{
name: "folders and dashboards",
resources: []string{setting.FolderResource, setting.DashboardResource},
registerFunc: func(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient) {
resources: []string{fakeFolderResource, fakeDashboardResource},
registerFunc: func(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient, opts ...ResourceMigrationOption) {
migrationCalls["folders and dashboards"]++
},
},
@@ -38,7 +49,9 @@ func TestRegisterMigrations(t *testing.T) {
makeCfg := func(vals map[string]bool) *setting.Cfg {
cfg := &setting.Cfg{UnifiedStorage: make(map[string]setting.UnifiedStorageConfig)}
for k, v := range vals {
cfg.UnifiedStorage[k] = setting.UnifiedStorageConfig{EnableMigration: v}
cfg.UnifiedStorage[k] = setting.UnifiedStorageConfig{
EnableMigration: v,
}
}
return cfg
}
@@ -71,13 +84,13 @@ func TestRegisterMigrations(t *testing.T) {
migrationRegistry = makeFakeRegistry(migrationCalls)
cfg := makeCfg(map[string]bool{
setting.PlaylistResource: tt.enablePlaylist,
setting.FolderResource: tt.enableFolder,
setting.DashboardResource: tt.enableDashboard,
fakePlaylistResource: tt.enablePlaylist,
fakeFolderResource: tt.enableFolder,
fakeDashboardResource: tt.enableDashboard,
})
// We pass nils for migrator dependencies because our fake registerFuncs don't use them
err := registerMigrations(cfg, nil, nil, nil)
err := registerMigrations(context.Background(), cfg, nil, nil, nil, nil)
if tt.wantErr {
require.Error(t, err, "expected error for mismatched enablement")
@@ -90,3 +103,176 @@ func TestRegisterMigrations(t *testing.T) {
})
}
}
// TestResourceMigration_AutoMigrateEnablesMode5 verifies the autoMigrate behavior:
// - When autoMigrate=true AND cfg is set AND storage type is "unified", mode 5 should be enabled
// - In all other cases, mode 5 should NOT be enabled
func TestResourceMigration_AutoMigrateEnablesMode5(t *testing.T) {
// Helper to create a cfg with unified storage type
makeUnifiedCfg := func() *setting.Cfg {
cfg := setting.NewCfg()
cfg.Raw.Section("grafana-apiserver").Key("storage_type").SetValue("unified")
cfg.UnifiedStorage = make(map[string]setting.UnifiedStorageConfig)
return cfg
}
// Helper to create a cfg with legacy storage type
makeLegacyCfg := func() *setting.Cfg {
cfg := setting.NewCfg()
cfg.Raw.Section("grafana-apiserver").Key("storage_type").SetValue("legacy")
cfg.UnifiedStorage = make(map[string]setting.UnifiedStorageConfig)
return cfg
}
tests := []struct {
name string
autoMigrate bool
cfg *setting.Cfg
resources []string
wantMode5Enabled bool
description string
}{
{
name: "autoMigrate enabled with unified storage",
autoMigrate: true,
cfg: makeUnifiedCfg(),
resources: []string{setting.DashboardResource},
wantMode5Enabled: true,
description: "Should enable mode 5 when autoMigrate=true and storage type is unified",
},
{
name: "autoMigrate disabled with unified storage",
autoMigrate: false,
cfg: makeUnifiedCfg(),
resources: []string{setting.DashboardResource},
wantMode5Enabled: false,
description: "Should NOT enable mode 5 when autoMigrate=false",
},
{
name: "autoMigrate enabled with legacy storage",
autoMigrate: true,
cfg: makeLegacyCfg(),
resources: []string{setting.DashboardResource},
wantMode5Enabled: false,
description: "Should NOT enable mode 5 when storage type is legacy",
},
{
name: "autoMigrate enabled with nil cfg",
autoMigrate: true,
cfg: nil,
resources: []string{setting.DashboardResource},
wantMode5Enabled: false,
description: "Should NOT enable mode 5 when cfg is nil",
},
{
name: "autoMigrate enabled with multiple resources",
autoMigrate: true,
cfg: makeUnifiedCfg(),
resources: []string{setting.FolderResource, setting.DashboardResource},
wantMode5Enabled: true,
description: "Should enable mode 5 for all resources when autoMigrate=true",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Build schema.GroupResource from resource strings
resources := make([]schema.GroupResource, 0, len(tt.resources))
for _, r := range tt.resources {
parts := strings.SplitN(r, ".", 2)
resources = append(resources, schema.GroupResource{
Resource: parts[0],
Group: parts[1],
})
}
// Create the migration with options
var opts []ResourceMigrationOption
if tt.autoMigrate {
opts = append(opts, WithAutoMigrate(tt.cfg))
}
m := NewResourceMigration(nil, resources, "test-auto-migrate", nil, opts...)
// Simulate what happens at the end of a successful migration
// This is the logic from Exec() that we're testing
if m.autoMigrate && m.cfg != nil && m.cfg.UnifiedStorageType() == "unified" {
for _, gr := range m.resources {
m.cfg.EnableMode5(gr.Resource + "." + gr.Group)
}
}
// Verify mode 5 was enabled (or not) for each resource
for _, resourceName := range tt.resources {
if tt.cfg == nil {
// If cfg is nil, we can't check - just verify we didn't panic
continue
}
config := tt.cfg.UnifiedStorageConfig(resourceName)
if tt.wantMode5Enabled {
require.Equal(t, 5, int(config.DualWriterMode), "%s: %s", tt.description, resourceName)
require.True(t, config.EnableMigration, "%s: EnableMigration should be true for %s", tt.description, resourceName)
require.True(t, config.DualWriterMigrationDataSyncDisabled, "%s: DualWriterMigrationDataSyncDisabled should be true for %s", tt.description, resourceName)
} else {
require.Equal(t, 0, int(config.DualWriterMode), "%s: mode should be 0 for %s", tt.description, resourceName)
}
}
})
}
}
// TestResourceMigration_SkipMigrationLog verifies the SkipMigrationLog behavior:
// - When ignoreErrors=true AND errors occurred (hadErrors=true), skip writing to migration log
// This allows the migration to be re-run on the next startup
// - In all other cases, write to migration log normally
//
// This is important for the folders/dashboards migration which uses WithIgnoreErrors() to handle
// partial failures gracefully while still allowing retry on next startup.
func TestResourceMigration_SkipMigrationLog(t *testing.T) {
tests := []struct {
name string
autoMigrate bool
hadErrors bool
want bool
description string
}{
{
name: "normal migration success",
autoMigrate: false,
hadErrors: false,
want: false,
description: "Normal successful migration should write to log",
},
{
name: "ignoreErrors migration success",
autoMigrate: true,
hadErrors: false,
want: false,
description: "Migration with ignoreErrors that succeeds should still write to log",
},
{
name: "normal migration with errors",
autoMigrate: false,
hadErrors: true,
want: false,
description: "Migration that fails without ignoreErrors should write error to log",
},
{
name: "ignoreErrors migration with errors - skip log",
autoMigrate: true,
hadErrors: true,
want: true,
description: "Migration with ignoreErrors that has errors should SKIP log to allow retry",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &ResourceMigration{
autoMigrate: tt.autoMigrate,
hadErrors: tt.hadErrors,
}
require.Equal(t, tt.want, m.SkipMigrationLog(), tt.description)
})
}
}
+5 -57
View File
@@ -14,7 +14,6 @@ import (
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/otel"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var tracer = otel.Tracer("github.com/grafana/grafana/pkg/storage/unified/migrations")
@@ -54,6 +53,7 @@ func (p *UnifiedStorageMigrationServiceImpl) Run(ctx context.Context) error {
logger.Info("Data migrations are disabled, skipping")
return nil
}
logger.Info("Running migrations for unified storage")
metrics.MUnifiedStorageMigrationStatus.Set(3)
return RegisterMigrations(ctx, p.migrator, p.cfg, p.sqlStore, p.client)
@@ -79,7 +79,7 @@ func RegisterMigrations(
return err
}
if err := registerMigrations(cfg, mg, migrator, client); err != nil {
if err := registerMigrations(ctx, cfg, mg, migrator, client, sqlStore); err != nil {
return err
}
@@ -92,65 +92,13 @@ func RegisterMigrations(
db.SetMaxOpenConns(3)
defer db.SetMaxOpenConns(maxOpenConns)
}
if err := mg.RunMigrations(ctx,
err := mg.RunMigrations(ctx,
sec.Key("migration_locking").MustBool(true),
sec.Key("locking_attempt_timeout_sec").MustInt()); err != nil {
sec.Key("locking_attempt_timeout_sec").MustInt())
if err != nil {
return fmt.Errorf("unified storage data migration failed: %w", err)
}
logger.Info("Unified storage migrations completed successfully")
return nil
}
func registerDashboardAndFolderMigration(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient) {
foldersDef := getResourceDefinition("folder.grafana.app", "folders")
dashboardsDef := getResourceDefinition("dashboard.grafana.app", "dashboards")
driverName := mg.Dialect.DriverName()
folderCountValidator := NewCountValidator(
client,
foldersDef.GroupResource,
"dashboard",
"org_id = ? and is_folder = true",
driverName,
)
dashboardCountValidator := NewCountValidator(
client,
dashboardsDef.GroupResource,
"dashboard",
"org_id = ? and is_folder = false",
driverName,
)
folderTreeValidator := NewFolderTreeValidator(client, foldersDef.GroupResource, driverName)
dashboardsAndFolders := NewResourceMigration(
migrator,
[]schema.GroupResource{foldersDef.GroupResource, dashboardsDef.GroupResource},
"folders-dashboards",
[]Validator{folderCountValidator, dashboardCountValidator, folderTreeValidator},
)
mg.AddMigration("folders and dashboards migration", dashboardsAndFolders)
}
func registerPlaylistMigration(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient) {
playlistsDef := getResourceDefinition("playlist.grafana.app", "playlists")
driverName := mg.Dialect.DriverName()
playlistCountValidator := NewCountValidator(
client,
playlistsDef.GroupResource,
"playlist",
"org_id = ?",
driverName,
)
playlistsMigration := NewResourceMigration(
migrator,
[]schema.GroupResource{playlistsDef.GroupResource},
"playlists",
[]Validator{playlistCountValidator},
)
mg.AddMigration("playlists migration", playlistsMigration)
}
@@ -0,0 +1,211 @@
package threshold
import (
"context"
"fmt"
"net/http"
"os"
"testing"
authlib "github.com/grafana/authlib/types"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/services/folder"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/tests/apis"
"github.com/grafana/grafana/pkg/tests/testinfra"
"github.com/grafana/grafana/pkg/tests/testsuite"
"github.com/grafana/grafana/pkg/util/testutil"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// TODO: remove this test before Grafana 13 GA
func TestMain(m *testing.M) {
testsuite.Run(m)
}
// TestIntegrationAutoMigrateThresholdExceeded verifies that auto-migration is skipped when
// resource count exceeds the configured threshold.
// TODO: remove this test before Grafana 13 GA
func TestIntegrationAutoMigrateThresholdExceeded(t *testing.T) {
testutil.SkipIntegrationTestInShortMode(t)
if db.IsTestDbSQLite() {
// Share the same SQLite DB file between steps
tmpDir := t.TempDir()
dbPath := tmpDir + "/shared-threshold-test.db"
oldVal := os.Getenv("SQLITE_TEST_DB")
require.NoError(t, os.Setenv("SQLITE_TEST_DB", dbPath))
t.Cleanup(func() {
if oldVal == "" {
_ = os.Unsetenv("SQLITE_TEST_DB")
} else {
_ = os.Setenv("SQLITE_TEST_DB", oldVal)
}
})
t.Logf("Using shared database path: %s", dbPath)
}
var org1 *apis.OrgUsers
var orgB *apis.OrgUsers
dashboardGVR := schema.GroupVersionResource{
Group: "dashboard.grafana.app",
Version: "v1beta1",
Resource: "dashboards",
}
folderGVR := schema.GroupVersionResource{
Group: "folder.grafana.app",
Version: "v1beta1",
Resource: "folders",
}
dashboardKey := fmt.Sprintf("%s.%s", dashboardGVR.Resource, dashboardGVR.Group)
folderKey := fmt.Sprintf("%s.%s", folderGVR.Resource, folderGVR.Group)
playlistKey := "playlists.playlist.grafana.app"
// Step 1: Create resources exceeding the threshold (3 resources, threshold=1)
t.Run("Step 1: Create resources exceeding threshold", func(t *testing.T) {
unifiedConfig := map[string]setting.UnifiedStorageConfig{}
helper := apis.NewK8sTestHelper(t, testinfra.GrafanaOpts{
AppModeProduction: true,
DisableAnonymous: true,
DisableDataMigrations: true,
DisableDBCleanup: true,
APIServerStorageType: "unified",
UnifiedStorageConfig: unifiedConfig,
})
org1 = &helper.Org1
orgB = &helper.OrgB
// Create 3 dashboards
for i := 1; i <= 3; i++ {
createTestDashboard(t, helper, fmt.Sprintf("Threshold Dashboard %d", i))
}
// Create 3 folders
for i := 1; i <= 3; i++ {
createTestFolder(t, helper, fmt.Sprintf("folder-%d", i), fmt.Sprintf("Threshold Folder %d", i), "")
}
// Explicitly shutdown helper before Step 1 ends to ensure database is properly closed
helper.Shutdown()
})
// Set SKIP_DB_TRUNCATE to prevent truncation in subsequent steps
oldSkipTruncate := os.Getenv("SKIP_DB_TRUNCATE")
require.NoError(t, os.Setenv("SKIP_DB_TRUNCATE", "true"))
t.Cleanup(func() {
if oldSkipTruncate == "" {
_ = os.Unsetenv("SKIP_DB_TRUNCATE")
} else {
_ = os.Setenv("SKIP_DB_TRUNCATE", oldSkipTruncate)
}
})
// Step 2: Verify auto-migration is skipped due to threshold
t.Run("Step 2: Verify auto-migration skipped (threshold exceeded)", func(t *testing.T) {
// Set threshold=1, but we have 3 resources of each type, so migration should be skipped
// Disable playlists migration since we're only testing dashboard/folder threshold behavior
unifiedConfig := map[string]setting.UnifiedStorageConfig{
dashboardKey: {AutoMigrationThreshold: 1, EnableMigration: false},
folderKey: {AutoMigrationThreshold: 1, EnableMigration: false},
playlistKey: {EnableMigration: false},
}
helper := apis.NewK8sTestHelperWithOpts(t, apis.K8sTestHelperOpts{
GrafanaOpts: testinfra.GrafanaOpts{
AppModeProduction: true,
DisableAnonymous: true,
DisableDataMigrations: false, // Allow migration system to run
APIServerStorageType: "unified",
UnifiedStorageConfig: unifiedConfig,
},
Org1Users: org1,
OrgBUsers: orgB,
})
t.Cleanup(helper.Shutdown)
namespace := authlib.OrgNamespaceFormatter(helper.Org1.OrgID)
dashCli := helper.GetResourceClient(apis.ResourceClientArgs{
User: helper.Org1.Admin,
Namespace: namespace,
GVR: dashboardGVR,
})
verifyResourceCount(t, dashCli, 3)
folderCli := helper.GetResourceClient(apis.ResourceClientArgs{
User: helper.Org1.Admin,
Namespace: namespace,
GVR: folderGVR,
})
verifyResourceCount(t, folderCli, 3)
// Verify migration did NOT run by checking the migration log
count, err := helper.GetEnv().SQLStore.GetEngine().Table("unifiedstorage_migration_log").
Where("migration_id = ?", "folders and dashboards migration").
Count()
require.NoError(t, err)
require.Equal(t, int64(0), count, "Migration should not have run")
})
}
func createTestDashboard(t *testing.T, helper *apis.K8sTestHelper, title string) string {
t.Helper()
payload := fmt.Sprintf(`{"dashboard": {"title": "%s", "panels": []}, "overwrite": false}`, title)
result := apis.DoRequest(helper, apis.RequestParams{
User: helper.Org1.Admin,
Method: "POST",
Path: "/api/dashboards/db",
Body: []byte(payload),
}, &map[string]interface{}{})
require.NotNil(t, result.Response)
require.Equal(t, 200, result.Response.StatusCode)
uid := (*result.Result)["uid"].(string)
require.NotEmpty(t, uid)
return uid
}
func createTestFolder(t *testing.T, helper *apis.K8sTestHelper, uid, title, parentUID string) *folder.Folder {
t.Helper()
payload := fmt.Sprintf(`{
"title": "%s",
"uid": "%s"`, title, uid)
if parentUID != "" {
payload += fmt.Sprintf(`,
"parentUid": "%s"`, parentUID)
}
payload += "}"
folderCreate := apis.DoRequest(helper, apis.RequestParams{
User: helper.Org1.Admin,
Method: http.MethodPost,
Path: "/api/folders",
Body: []byte(payload),
}, &folder.Folder{})
require.NotNil(t, folderCreate.Result)
return folderCreate.Result
}
// verifyResourceCount verifies that the expected number of resources exist in K8s storage
func verifyResourceCount(t *testing.T, client *apis.K8sResourceClient, expectedCount int) {
t.Helper()
l, err := client.Resource.List(context.Background(), metav1.ListOptions{})
require.NoError(t, err)
resources, err := meta.ExtractList(l)
require.NoError(t, err)
require.Equal(t, expectedCount, len(resources))
}
@@ -12,7 +12,7 @@ INSERT INTO {{ .Ident .TableName }}
VALUES (
{{ .Arg .GUID }},
{{ .Arg .KeyPath }},
COALESCE({{ .Arg .Value }}, ""),
{{ .Arg .Value }},
{{ .Arg .Group }},
{{ .Arg .Resource }},
{{ .Arg .Namespace }},
@@ -10,7 +10,7 @@ INSERT INTO {{ .Ident "resource_history" }}
{{ .Ident "folder" }}
)
VALUES (
COALESCE({{ .Arg .Value }}, ""),
{{ .Arg .Value }},
{{ .Arg .GUID }},
{{ .Arg .Group }},
{{ .Arg .Resource }},
@@ -5,7 +5,7 @@ INSERT INTO {{ .Ident .TableName }}
)
VALUES (
{{ .Arg .KeyPath }},
COALESCE({{ .Arg .Value }}, "")
{{ .Arg .Value }}
)
{{- if eq .DialectName "mysql" }}
ON DUPLICATE KEY UPDATE {{ .Ident "value" }} = {{ .Arg .Value }}
+9 -4
View File
@@ -349,6 +349,11 @@ func (w *sqlWriteCloser) Close() error {
}
w.closed = true
value := w.buf.Bytes()
if value == nil {
// to prevent NOT NULL constraint violations
value = []byte{}
}
// do regular kv save: simple key_path + value insert with conflict check.
// can only do this on resource_events for now, until we drop the columns in resource_history
@@ -356,7 +361,7 @@ func (w *sqlWriteCloser) Close() error {
_, err := dbutil.Exec(w.ctx, w.kv.db, sqlKVSaveEvent, sqlKVSaveRequest{
SQLTemplate: sqltemplate.New(w.kv.dialect),
sqlKVSectionKey: w.sectionKey,
Value: w.buf.Bytes(),
Value: value,
})
if err != nil {
@@ -380,7 +385,7 @@ func (w *sqlWriteCloser) Close() error {
SQLTemplate: sqltemplate.New(w.kv.dialect),
sqlKVSectionKey: w.sectionKey,
GUID: uuid.New().String(),
Value: w.buf.Bytes(),
Value: value,
})
if err != nil {
@@ -397,7 +402,7 @@ func (w *sqlWriteCloser) Close() error {
_, err = dbutil.Exec(w.ctx, w.kv.db, sqlKVUpdateData, sqlKVSaveRequest{
SQLTemplate: sqltemplate.New(w.kv.dialect),
sqlKVSectionKey: w.sectionKey,
Value: w.buf.Bytes(),
Value: value,
})
if err != nil {
@@ -433,7 +438,7 @@ func (w *sqlWriteCloser) Close() error {
_, err = dbutil.Exec(w.ctx, tx, sqlKVInsertLegacyResourceHistory, sqlKVSaveRequest{
SQLTemplate: sqltemplate.New(w.kv.dialect),
sqlKVSectionKey: w.sectionKey,
Value: w.buf.Bytes(),
Value: value,
GUID: dataKey.GUID,
Group: dataKey.Group,
Resource: dataKey.Resource,
@@ -217,5 +217,8 @@ func initResourceTables(mg *migrator.Migrator) string {
migrator.ConvertUniqueKeyToPrimaryKey(mg, oldResourceVersionUniqueKey, updatedResourceVersionTable)
mg.AddMigration("Change key_path collation of resource_history in postgres", migrator.NewRawSQLMigration("").Postgres(`ALTER TABLE resource_history ALTER COLUMN key_path TYPE VARCHAR(2048) COLLATE "C";`))
mg.AddMigration("Change key_path collation of resource_events in postgres", migrator.NewRawSQLMigration("").Postgres(`ALTER TABLE resource_events ALTER COLUMN key_path TYPE VARCHAR(2048) COLLATE "C";`))
return marker
}
@@ -87,7 +87,7 @@
"tags": [
"ExternalGroupMapping"
],
"description": "list or watch objects of kind ExternalGroupMapping",
"description": "list objects of kind ExternalGroupMapping",
"operationId": "listExternalGroupMapping",
"parameters": [
{
@@ -8690,32 +8690,6 @@
"description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.",
"type": "string",
"format": "date-time"
},
"io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent": {
"description": "Event represents a single event to a watched resource.",
"type": "object",
"required": [
"type",
"object"
],
"properties": {
"object": {
"description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.",
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension"
}
]
},
"type": {
"type": "string",
"default": ""
}
}
},
"io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object"
}
}
}
+2
View File
@@ -557,6 +557,8 @@ func CreateGrafDir(t *testing.T, opts GrafanaOpts) (string, string) {
require.NoError(t, err)
_, err = section.NewKey("enableMigration", fmt.Sprintf("%t", v.EnableMigration))
require.NoError(t, err)
_, err = section.NewKey("autoMigrationThreshold", fmt.Sprintf("%d", v.AutoMigrationThreshold))
require.NoError(t, err)
}
}
if opts.UnifiedStorageEnableSearch {
+3
View File
@@ -6990,6 +6990,9 @@
"ReportOptions": {
"type": "object",
"properties": {
"csvEncoding": {
"type": "string"
},
"layout": {
"type": "string"
},
+3
View File
@@ -20504,6 +20504,9 @@
"ReportOptions": {
"type": "object",
"properties": {
"csvEncoding": {
"type": "string"
},
"layout": {
"type": "string"
},
@@ -36,7 +36,11 @@ import {
isExpressionQueryInAlert,
} from '../../../rule-editor/formProcessing';
import { RuleFormType, RuleFormValues } from '../../../types/rule-form';
import { GRAFANA_RULES_SOURCE_NAME, getDefaultOrFirstCompatibleDataSource } from '../../../utils/datasource';
import {
GRAFANA_RULES_SOURCE_NAME,
getDefaultOrFirstCompatibleDataSource,
getRulesDataSources,
} from '../../../utils/datasource';
import { PromOrLokiQuery, isPromOrLokiQuery } from '../../../utils/rule-form';
import {
isCloudAlertingRuleByType,
@@ -417,7 +421,9 @@ export const QueryAndExpressionsStep = ({ editingExistingRule, onDataChange, mod
]);
const { sectionTitle, helpLabel, helpContent, helpLink } = DESCRIPTIONS[type ?? RuleFormType.grafana];
// Only show the data source managed option if there are data sources with manageAlerts enabled
const hasAlertEnabledDataSources = useMemo(() => getRulesDataSources().length > 0, []);
const canSelectDataSourceManaged = onlyOneDSInQueries(queries) && hasAlertEnabledDataSources;
if (!type) {
return null;
}
@@ -437,8 +443,6 @@ export const QueryAndExpressionsStep = ({ editingExistingRule, onDataChange, mod
}
: undefined;
const canSelectDataSourceManaged = onlyOneDSInQueries(queries);
return (
<>
<RuleEditorSection
@@ -506,7 +510,7 @@ export const QueryAndExpressionsStep = ({ editingExistingRule, onDataChange, mod
}}
/>
</Field>
{mode === 'edit' && (
{mode === 'edit' && hasAlertEnabledDataSources && (
<>
<Divider />
<SmartAlertTypeDetector
@@ -194,4 +194,53 @@ describe('RuleEditor grafana managed rules', () => {
]),
});
});
it('should not show rule type switch when no data sources have manageAlerts enabled', async () => {
// Setup data source with manageAlerts explicitly disabled
setupDataSources(
mockDataSource(
{
type: 'prometheus',
name: 'Prom-disabled',
uid: 'prometheus-disabled',
isDefault: true,
jsonData: { manageAlerts: false },
},
{ alerting: true, module: 'core:plugin/prometheus' }
)
);
renderRuleEditor();
// Wait for the form to load
await screen.findByRole('textbox', { name: 'name' });
// The rule type switch should NOT be visible
expect(screen.queryByText('Rule type')).not.toBeInTheDocument();
expect(screen.queryByTestId('rule-type-radio-group')).not.toBeInTheDocument();
});
it('should show rule type switch when data sources have manageAlerts enabled', async () => {
// Setup data source with manageAlerts enabled
setupDataSources(
mockDataSource(
{
type: 'prometheus',
name: 'Prom-enabled',
uid: 'prometheus-enabled',
isDefault: true,
jsonData: { manageAlerts: true },
},
{ alerting: true, module: 'core:plugin/prometheus' }
)
);
renderRuleEditor();
// Wait for the form to load
await screen.findByRole('textbox', { name: 'name' });
// The rule type section should be visible
expect(await screen.findByText('Rule type')).toBeInTheDocument();
});
});
@@ -7,10 +7,11 @@ import { setPluginComponentsHook, setPluginLinksHook } from '@grafana/runtime';
import { AccessControlAction } from 'app/types/accessControl';
import { setupMswServer } from '../mockApi';
import { grantUserPermissions, grantUserRole } from '../mocks';
import { grantUserPermissions, grantUserRole, mockDataSource } from '../mocks';
import { setGrafanaRuleGroupExportResolver } from '../mocks/server/configure';
import { alertingFactory } from '../mocks/server/db';
import { RulesFilter } from '../search/rulesSearchParser';
import { setupDataSources } from '../testSetup/datasources';
import RuleListPage, { RuleListActions } from './RuleList.v2';
import { loadDefaultSavedSearch } from './filter/useSavedSearches';
@@ -365,6 +366,51 @@ describe('RuleListActions', () => {
expect(ui.exportDrawer.query()).toBeInTheDocument();
});
});
describe('Data source options visibility', () => {
it('should not show "New Data source recording rule" option when no data sources have manageAlerts enabled', async () => {
// Set up only data sources with manageAlerts explicitly set to false
// This replaces the default data sources that have manageAlerts defaulting to true
setupDataSources(
mockDataSource({
name: 'Prometheus-disabled',
uid: 'prometheus-disabled',
type: 'prometheus',
jsonData: { manageAlerts: false },
})
);
grantUserPermissions([AccessControlAction.AlertingRuleExternalWrite]);
const { user } = render(<RuleListActions />);
await user.click(ui.moreButton.get());
const menu = await ui.moreMenu.find();
expect(ui.menuOptions.newDataSourceRecordingRule.query(menu)).not.toBeInTheDocument();
});
it('should show "New Data source recording rule" option when data sources have manageAlerts enabled', async () => {
// Set up data source with manageAlerts enabled
setupDataSources(
mockDataSource({
name: 'Prometheus-enabled',
uid: 'prometheus-enabled',
type: 'prometheus',
jsonData: { manageAlerts: true },
})
);
grantUserPermissions([AccessControlAction.AlertingRuleExternalWrite]);
const { user } = render(<RuleListActions />);
await user.click(ui.moreButton.get());
const menu = await ui.moreMenu.find();
expect(ui.menuOptions.newDataSourceRecordingRule.query(menu)).toBeInTheDocument();
});
});
});
describe('RuleListPage v2 - View switching', () => {
@@ -13,6 +13,7 @@ import { useListViewMode } from '../components/rules/Filter/RulesViewModeSelecto
import { AIAlertRuleButtonComponent } from '../enterprise-components/AI/AIGenAlertRuleButton/addAIAlertRuleButton';
import { AlertingAction, useAlertingAbility } from '../hooks/useAbilities';
import { useRulesFilter } from '../hooks/useFilteredRules';
import { getRulesDataSources } from '../utils/datasource';
import { FilterView } from './FilterView';
import { GroupedView } from './GroupedView';
@@ -41,8 +42,11 @@ export function RuleListActions() {
const [createCloudRuleSupported, createCloudRuleAllowed] = useAlertingAbility(AlertingAction.CreateExternalAlertRule);
const [exportRulesSupported, exportRulesAllowed] = useAlertingAbility(AlertingAction.ExportGrafanaManagedRules);
// Check if there are any data sources with manageAlerts enabled
const hasAlertEnabledDataSources = useMemo(() => getRulesDataSources().length > 0, []);
const canCreateGrafanaRules = createGrafanaRuleSupported && createGrafanaRuleAllowed;
const canCreateCloudRules = createCloudRuleSupported && createCloudRuleAllowed;
const canCreateCloudRules = createCloudRuleSupported && createCloudRuleAllowed && hasAlertEnabledDataSources;
const canExportRules = exportRulesSupported && exportRulesAllowed;
const canCreateRules = canCreateGrafanaRules || canCreateCloudRules;
@@ -200,7 +200,7 @@ describe('createSpanLinkFactory', () => {
datasource: 'loki1_uid',
queries: [
{
expr: '{cluster="cluster1", hostname="hostname1", service_namespace="namespace1"} | label_format log_line_contains_trace_id=`{{ contains "7946b05c2e2e4e5a" __line__ }}` | log_line_contains_trace_id="true" or trace_id="7946b05c2e2e4e5a" | label_format log_line_contains_span_id=`{{ contains "6605c7b08e715d6c" __line__ }}` | log_line_contains_span_id="true" or span_id="6605c7b08e715d6c"',
expr: '{cluster="cluster1", hostname="hostname1", service_namespace="namespace1"} | logfmt | json | drop __error__, __error_details__ | trace_id="7946b05c2e2e4e5a" | span_id="6605c7b08e715d6c"',
refId: '',
datasource: { uid: 'loki1_uid' },
},
@@ -446,12 +446,12 @@ function getQueryForLoki(
let expr = '{${__tags}}';
if (filterByTraceID && span.traceID) {
expr +=
' | label_format log_line_contains_trace_id=`{{ contains "${__span.traceId}" __line__ }}` | log_line_contains_trace_id="true" or trace_id="${__span.traceId}"';
}
if (filterBySpanID && span.spanID) {
expr +=
' | label_format log_line_contains_span_id=`{{ contains "${__span.spanId}" __line__ }}` | log_line_contains_span_id="true" or span_id="${__span.spanId}"';
expr += ' | logfmt | json | drop __error__, __error_details__ | trace_id="${__span.traceId}"';
if (filterBySpanID && span.spanID) {
expr += ' | span_id="${__span.spanId}"';
}
} else if (filterBySpanID && span.spanID) {
expr += ' | logfmt | json | drop __error__, __error_details__ | span_id="${__span.spanId}"';
}
return {
+3
View File
@@ -10039,6 +10039,9 @@
},
"ReportOptions": {
"properties": {
"csvEncoding": {
"type": "string"
},
"layout": {
"type": "string"
},
+1
View File
@@ -16,6 +16,7 @@ queryLibrary=true
queryService=true
secretsManagementAppPlatform = true
secretsManagementAppPlatformUI = true
reportingCsvEncodingOptions = true
[environment]
stack_id = 12345
+11 -11
View File
@@ -6121,32 +6121,32 @@ __metadata:
languageName: node
linkType: hard
"@openfeature/ofrep-core@npm:^1.0.0":
version: 1.1.0
resolution: "@openfeature/ofrep-core@npm:1.1.0"
"@openfeature/ofrep-core@npm:^2.0.0":
version: 2.0.0
resolution: "@openfeature/ofrep-core@npm:2.0.0"
peerDependencies:
"@openfeature/core": ^1.6.0
checksum: 10/4198f2f1abf974822bf14530a7f514292d8235552d6e61465d29ffe42d092f675e7f56a9e9da5aa7d45dfbd98cc36316efbdaadaf83e8f510f35865612f5f24f
checksum: 10/598656fb35c517fec8abfc4cd8c5a9cb11d4c0f1698e6aa1dd1cf7c42ebb244a9e5be7c8f0fecdc0d2246747b1a51f7e2fe5b84e2f648b23cf40435d2e3dc176
languageName: node
linkType: hard
"@openfeature/ofrep-web-provider@npm:^0.3.3":
version: 0.3.3
resolution: "@openfeature/ofrep-web-provider@npm:0.3.3"
version: 0.3.5
resolution: "@openfeature/ofrep-web-provider@npm:0.3.5"
dependencies:
"@openfeature/ofrep-core": "npm:^1.0.0"
"@openfeature/ofrep-core": "npm:^2.0.0"
peerDependencies:
"@openfeature/web-sdk": ^1.4.0
checksum: 10/85f362e3ebaa9d421be91e4d966284e28850649e417d5db81181960b95ac693c9faa4a0bdc84eeb03fa694a8cd1e1f5d9de9bf0fba62f8e2669f9637836f0884
checksum: 10/699a9d2591e9d5834f02baa3d68972e52d37efed3866e2c06ad5deb071447c68fdd8afdc2e66e9808f7be162358a338f69cb7912fcb70320f7a7bc6abd00d3d6
languageName: node
linkType: hard
"@openfeature/web-sdk@npm:^1.6.1":
version: 1.7.1
resolution: "@openfeature/web-sdk@npm:1.7.1"
version: 1.7.2
resolution: "@openfeature/web-sdk@npm:1.7.2"
peerDependencies:
"@openfeature/core": ^1.9.0
checksum: 10/358cabfddda8bf67a9bd96ce28f0f84a3218416907f8df4f2a58750ee840d164d86a1c859e8c9aa92d132fa28fcf72a0f17ceece798ff8af297d3e1c1e30cdab
checksum: 10/3fc966d0523ca1941b44350d4c8a42c1b74cb035265d3574a2a6b16e87fa9edfb7211ec945b937399badad5de1e771c55ba7ce04d6eb02f265fc5a37e3af9395
languageName: node
linkType: hard