Compare commits

...

16 Commits

Author SHA1 Message Date
Michael Mandrus
74c21ce75c update workspace 2026-01-14 14:56:17 -05:00
Michael Mandrus
ad7a6e9a7a Merge branch 'main' into mmandrus/secrets/dek-cache 2026-01-14 14:38:57 -05:00
Ezequiel Victorero
72f7bd3900 Snapshots: Support public snapshot instance in latest version (#116086) 2026-01-14 15:28:16 -03:00
Will Assis
ba416eab4e unified-storage: dont use polling notifier with sqlite in sqlkv (#116283)
* unified-storage: dont use polling notifier with sqlite in sqlkv
2026-01-14 18:22:39 +00:00
Alan Martin
189d50d815 UI: Use react-table column header types in InteractiveTable with story and tests (#116091)
* feat(InteractiveTable): allow custom header rendering

* docs(InteractiveTable): add story for custom header rendering

* test(InteractiveTable): add tests for custom header rendering

* docs(InteractiveTable): add custom header rendering documentation

* fix: test failure from non-a11y code
2026-01-14 17:59:03 +00:00
Mariell Hoversholm
450eaba447 test: skip integration test in short mode (#116280) 2026-01-14 18:33:55 +01:00
Kristina Demeshchik
87f5d5e741 Dashboard: Hide export options in collapsible row (#116155)
* Introduce export options

* Reset keys

* Introduce a new key

* Generate new keys

* Rename the label

* re-generate key

* Fix the spacing

* Remove debuggers

* Add subtitle

* refactor component

* update labels

* faield tests

* Update tooltip

* Linting issue
2026-01-14 12:12:33 -05:00
Andrew Hackmann
5e68b07cac Elasticsearch: Make code editor look more like prometheus (#115461)
* Make code editor look more prometheus

* add warning when switching builders

* address adam's feedback

* yarn
2026-01-14 09:50:35 -07:00
Adela Almasan
99acd3766d Suggestions: Update empty state (#116172) 2026-01-14 10:37:42 -06:00
Michael Mandrus
b73869ea9c use noop cache 2025-11-18 12:10:56 -05:00
Michael Mandrus
3c2f629bb9 Merge remote-tracking branch 'origin/main' into mmandrus/secrets/dek-cache 2025-11-18 11:59:33 -05:00
Michael Mandrus
075761ec66 Merge remote-tracking branch 'origin/main' into mmandrus/secrets/dek-cache 2025-11-14 00:13:08 -05:00
Michael Mandrus
3974e88cbe flush the encryption cache during consolidation 2025-11-14 00:03:48 -05:00
Michael Mandrus
1da89b70a0 use the cache in most places 2025-11-13 23:55:31 -05:00
Michael Mandrus
197019f554 add namespace, plus unit tests for cache 2025-11-13 22:34:25 -05:00
Michael Mandrus
773baf47e1 pass dek cache into encryption manager 2025-11-13 15:33:25 -05:00
40 changed files with 2150 additions and 248 deletions

4
go.mod
View File

@@ -32,14 +32,14 @@ require (
github.com/armon/go-radix v1.0.0 // @grafana/grafana-app-platform-squad
github.com/aws/aws-sdk-go v1.55.7 // @grafana/aws-datasources
github.com/aws/aws-sdk-go-v2 v1.40.0 // @grafana/aws-datasources
github.com/aws/aws-sdk-go-v2/credentials v1.18.21 // indirect; @grafana/grafana-operator-experience-squad
github.com/aws/aws-sdk-go-v2/credentials v1.18.21 // @grafana/grafana-operator-experience-squad
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.45.3 // @grafana/aws-datasources
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.51.0 // @grafana/aws-datasources
github.com/aws/aws-sdk-go-v2/service/ec2 v1.225.2 // @grafana/aws-datasources
github.com/aws/aws-sdk-go-v2/service/oam v1.18.3 // @grafana/aws-datasources
github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 // @grafana/aws-datasources
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.40.1 // @grafana/grafana-operator-experience-squad
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // indirect; @grafana/grafana-operator-experience-squad
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // @grafana/grafana-operator-experience-squad
github.com/aws/smithy-go v1.23.2 // @grafana/aws-datasources
github.com/beevik/etree v1.4.1 // @grafana/grafana-backend-group
github.com/benbjohnson/clock v1.3.5 // @grafana/alerting-backend

View File

@@ -117,6 +117,44 @@ export const MyComponent = () => {
};
```
### Custom Header Rendering
Column headers can be customized using strings, React elements, or renderer functions. The `header` property accepts any value that matches React Table's `Renderer` type.
**Important:** When using custom header content, prefer inline elements (like `<span>`) over block elements (like `<div>`) to avoid layout issues. Block-level elements can cause extra spacing and alignment problems in table headers because they disrupt the table's inline flow. Use `display: inline-flex` or `display: inline-block` when you need flexbox or block-like behavior.
```tsx
const columns: Array<Column<TableData>> = [
// React element header
{
id: 'checkbox',
header: (
<>
<label htmlFor="select-all" className="sr-only">
Select all rows
</label>
<Checkbox id="select-all" />
</>
),
cell: () => <Checkbox aria-label="Select row" />,
},
// Function renderer header
{
id: 'firstName',
header: () => (
<span style={{ display: 'inline-flex', alignItems: 'center', gap: '8px' }}>
<Icon name="user" size="sm" />
<span>First Name</span>
</span>
),
},
// String header
{ id: 'lastName', header: 'Last name' },
];
```
### Custom Cell Rendering
Individual cells can be rendered using custom content dy defining a `cell` property on the column definition.

View File

@@ -3,8 +3,11 @@ import { useCallback, useMemo, useState } from 'react';
import { CellProps } from 'react-table';
import { LinkButton } from '../Button/Button';
import { Checkbox } from '../Forms/Checkbox';
import { Field } from '../Forms/Field';
import { Icon } from '../Icon/Icon';
import { Input } from '../Input/Input';
import { Text } from '../Text/Text';
import { FetchDataArgs, InteractiveTable, InteractiveTableHeaderTooltip } from './InteractiveTable';
import mdx from './InteractiveTable.mdx';
@@ -297,4 +300,40 @@ export const WithControlledSort: StoryFn<typeof InteractiveTable> = (args) => {
return <InteractiveTable {...args} data={data} pageSize={15} fetchData={fetchData} />;
};
export const WithCustomHeader: TableStoryObj = {
args: {
columns: [
// React element header
{
id: 'checkbox',
header: (
<>
<label htmlFor="select-all" className="sr-only">
Select all rows
</label>
<Checkbox id="select-all" />
</>
),
cell: () => <Checkbox aria-label="Select row" />,
},
// Function renderer header
{
id: 'firstName',
header: () => (
<span style={{ display: 'inline-flex', alignItems: 'center', gap: '8px' }}>
<Icon name="user" size="sm" />
<Text element="span">First Name</Text>
</span>
),
sortType: 'string',
},
// String header
{ id: 'lastName', header: 'Last name', sortType: 'string' },
{ id: 'car', header: 'Car', sortType: 'string' },
{ id: 'age', header: 'Age', sortType: 'number' },
],
data: pageableData.slice(0, 10),
getRowId: (r) => r.id,
},
};
export default meta;

View File

@@ -2,6 +2,9 @@ import { render, screen, within } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import * as React from 'react';
import { Checkbox } from '../Forms/Checkbox';
import { Icon } from '../Icon/Icon';
import { InteractiveTable } from './InteractiveTable';
import { Column } from './types';
@@ -247,4 +250,104 @@ describe('InteractiveTable', () => {
expect(fetchData).toHaveBeenCalledWith({ sortBy: [{ id: 'id', desc: false }] });
});
});
describe('custom header rendering', () => {
it('should render string headers', () => {
const columns: Array<Column<TableData>> = [{ id: 'id', header: 'ID' }];
const data: TableData[] = [{ id: '1', value: '1', country: 'Sweden' }];
render(<InteractiveTable columns={columns} data={data} getRowId={getRowId} />);
expect(screen.getByRole('columnheader', { name: 'ID' })).toBeInTheDocument();
});
it('should render React element headers', () => {
const columns: Array<Column<TableData>> = [
{
id: 'checkbox',
header: (
<>
<label htmlFor="select-all" className="sr-only">
Select all rows
</label>
<Checkbox id="select-all" data-testid="header-checkbox" />
</>
),
cell: () => <Checkbox data-testid="cell-checkbox" aria-label="Select row" />,
},
];
const data: TableData[] = [{ id: '1', value: '1', country: 'Sweden' }];
render(<InteractiveTable columns={columns} data={data} getRowId={getRowId} />);
expect(screen.getByTestId('header-checkbox')).toBeInTheDocument();
expect(screen.getByTestId('cell-checkbox')).toBeInTheDocument();
expect(screen.getByLabelText('Select all rows')).toBeInTheDocument();
expect(screen.getByLabelText('Select row')).toBeInTheDocument();
expect(screen.getByText('Select all rows')).toBeInTheDocument();
});
it('should render function renderer headers', () => {
const columns: Array<Column<TableData>> = [
{
id: 'firstName',
header: () => (
<span style={{ display: 'inline-flex', alignItems: 'center', gap: '8px' }}>
<Icon name="user" size="sm" data-testid="header-icon" />
<span>First Name</span>
</span>
),
sortType: 'string',
},
];
const data: TableData[] = [{ id: '1', value: '1', country: 'Sweden' }];
render(<InteractiveTable columns={columns} data={data} getRowId={getRowId} />);
expect(screen.getByTestId('header-icon')).toBeInTheDocument();
expect(screen.getByRole('columnheader', { name: /first name/i })).toBeInTheDocument();
});
it('should render all header types together', () => {
const columns: Array<Column<TableData>> = [
{
id: 'checkbox',
header: (
<>
<label htmlFor="select-all" className="sr-only">
Select all rows
</label>
<Checkbox id="select-all" data-testid="header-checkbox" />
</>
),
cell: () => <Checkbox aria-label="Select row" />,
},
{
id: 'id',
header: () => (
<span style={{ display: 'inline-flex', alignItems: 'center', gap: '8px' }}>
<Icon name="user" size="sm" data-testid="header-icon" />
<span>ID</span>
</span>
),
sortType: 'string',
},
{ id: 'country', header: 'Country', sortType: 'string' },
{ id: 'value', header: 'Value' },
];
const data: TableData[] = [
{ id: '1', value: 'Value 1', country: 'Sweden' },
{ id: '2', value: 'Value 2', country: 'Norway' },
];
render(<InteractiveTable columns={columns} data={data} getRowId={getRowId} />);
expect(screen.getByTestId('header-checkbox')).toBeInTheDocument();
expect(screen.getByTestId('header-icon')).toBeInTheDocument();
expect(screen.getByRole('columnheader', { name: 'Country' })).toBeInTheDocument();
expect(screen.getByRole('columnheader', { name: 'Value' })).toBeInTheDocument();
// Verify data is rendered
expect(screen.getByText('Sweden')).toBeInTheDocument();
expect(screen.getByText('Norway')).toBeInTheDocument();
expect(screen.getByText('Value 1')).toBeInTheDocument();
expect(screen.getByText('Value 2')).toBeInTheDocument();
});
});
});

View File

@@ -1,5 +1,5 @@
import { ReactNode } from 'react';
import { CellProps, DefaultSortTypes, IdType, SortByFn } from 'react-table';
import { CellProps, DefaultSortTypes, HeaderProps, IdType, Renderer, SortByFn } from 'react-table';
export interface Column<TableData extends object> {
/**
@@ -11,9 +11,9 @@ export interface Column<TableData extends object> {
*/
cell?: (props: CellProps<TableData>) => ReactNode;
/**
* Header name. if `undefined` the header will be empty. Useful for action columns.
* Header name. Can be a string, renderer function, or undefined. If `undefined` the header will be empty. Useful for action columns.
*/
header?: string;
header?: Renderer<HeaderProps<TableData>>;
/**
* Column sort type. If `undefined` the column will not be sortable.
* */

View File

@@ -76,21 +76,27 @@ func (hs *HTTPServer) CreateDashboardSnapshot(c *contextmodel.ReqContext) {
return
}
// Do not check permissions when the instance snapshot public mode is enabled
if !hs.Cfg.SnapshotPublicMode {
evaluator := ac.EvalAll(ac.EvalPermission(dashboards.ActionSnapshotsCreate), ac.EvalPermission(dashboards.ActionDashboardsRead, dashboards.ScopeDashboardsProvider.GetResourceScopeUID(cmd.Dashboard.GetNestedString("uid"))))
if canSave, err := hs.AccessControl.Evaluate(c.Req.Context(), c.SignedInUser, evaluator); err != nil || !canSave {
c.JsonApiErr(http.StatusForbidden, "forbidden", err)
return
}
}
dashboardsnapshots.CreateDashboardSnapshot(c, snapshot.SnapshotSharingOptions{
cfg := snapshot.SnapshotSharingOptions{
SnapshotsEnabled: hs.Cfg.SnapshotEnabled,
ExternalEnabled: hs.Cfg.ExternalEnabled,
ExternalSnapshotName: hs.Cfg.ExternalSnapshotName,
ExternalSnapshotURL: hs.Cfg.ExternalSnapshotUrl,
}, cmd, hs.dashboardsnapshotsService)
}
if hs.Cfg.SnapshotPublicMode {
// Public mode: no user or dashboard validation needed
dashboardsnapshots.CreateDashboardSnapshotPublic(c, cfg, cmd, hs.dashboardsnapshotsService)
return
}
// Regular mode: check permissions
evaluator := ac.EvalAll(ac.EvalPermission(dashboards.ActionSnapshotsCreate), ac.EvalPermission(dashboards.ActionDashboardsRead, dashboards.ScopeDashboardsProvider.GetResourceScopeUID(cmd.Dashboard.GetNestedString("uid"))))
if canSave, err := hs.AccessControl.Evaluate(c.Req.Context(), c.SignedInUser, evaluator); err != nil || !canSave {
c.JsonApiErr(http.StatusForbidden, "forbidden", err)
return
}
dashboardsnapshots.CreateDashboardSnapshot(c, cfg, cmd, hs.dashboardsnapshotsService)
}
// GET /api/snapshots/:key
@@ -213,13 +219,6 @@ func (hs *HTTPServer) DeleteDashboardSnapshot(c *contextmodel.ReqContext) respon
return response.Error(http.StatusUnauthorized, "OrgID mismatch", nil)
}
if queryResult.External {
err := dashboardsnapshots.DeleteExternalDashboardSnapshot(queryResult.ExternalDeleteURL)
if err != nil {
return response.Error(http.StatusInternalServerError, "Failed to delete external dashboard", err)
}
}
// Dashboard can be empty (creation error or external snapshot). This means that the mustInt here returns a 0,
// which before RBAC would result in a dashboard which has no ACL. A dashboard without an ACL would fallback
// to the users org role, which for editors and admins would essentially always be allowed here. With RBAC,
@@ -239,6 +238,13 @@ func (hs *HTTPServer) DeleteDashboardSnapshot(c *contextmodel.ReqContext) respon
}
}
if queryResult.External {
err := dashboardsnapshots.DeleteExternalDashboardSnapshot(queryResult.ExternalDeleteURL)
if err != nil {
return response.Error(http.StatusInternalServerError, "Failed to delete external dashboard", err)
}
}
cmd := &dashboardsnapshots.DeleteDashboardSnapshotCommand{DeleteKey: queryResult.DeleteKey}
if err := hs.dashboardsnapshotsService.DeleteDashboardSnapshot(c.Req.Context(), cmd); err != nil {

View File

@@ -16,7 +16,6 @@ import (
_ "github.com/blugelabs/bluge"
_ "github.com/blugelabs/bluge_segment_api"
_ "github.com/crewjam/saml"
_ "github.com/docker/go-connections/nat"
_ "github.com/go-jose/go-jose/v4"
_ "github.com/gobwas/glob"
_ "github.com/googleapis/gax-go/v2"
@@ -32,7 +31,6 @@ import (
_ "github.com/spf13/cobra" // used by the standalone apiserver cli
_ "github.com/spyzhov/ajson"
_ "github.com/stretchr/testify/require"
_ "github.com/testcontainers/testcontainers-go"
_ "gocloud.dev/secrets/awskms"
_ "gocloud.dev/secrets/azurekeyvault"
_ "gocloud.dev/secrets/gcpkms"
@@ -57,7 +55,8 @@ import (
_ "github.com/grafana/e2e"
_ "github.com/grafana/gofpdf"
_ "github.com/grafana/gomemcache/memcache"
_ "github.com/grafana/tempo/pkg/traceql"
_ "github.com/grafana/grafana/apps/alerting/alertenrichment/pkg/apis/alertenrichment/v1beta1"
_ "github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1"
_ "github.com/grafana/tempo/pkg/traceql"
)

View File

@@ -14,6 +14,9 @@ type EncryptionManager interface {
// implementation present at manager.EncryptionService.
Encrypt(ctx context.Context, namespace xkube.Namespace, payload []byte) (EncryptedPayload, error)
Decrypt(ctx context.Context, namespace xkube.Namespace, payload EncryptedPayload) ([]byte, error)
// Since consolidation occurs at a level above the EncryptionManager, we need to allow that process to manually flush the cache
FlushCache(namespace xkube.Namespace)
}
type EncryptedPayload struct {

View File

@@ -7,11 +7,13 @@ import (
"fmt"
"strconv"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/usagestats"
@@ -19,6 +21,7 @@ import (
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption"
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption/cipher"
"github.com/grafana/grafana/pkg/registry/apis/secret/xkube"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/util"
)
@@ -26,6 +29,9 @@ type EncryptionManager struct {
tracer trace.Tracer
store contracts.DataKeyStorage
usageStats usagestats.Service
cfg *setting.Cfg
dataKeyCache encryption.DataKeyCache
mtx sync.Mutex
@@ -44,6 +50,8 @@ func ProvideEncryptionManager(
usageStats usagestats.Service,
enc cipher.Cipher,
providerConfig encryption.ProviderConfig,
dataKeyCache encryption.DataKeyCache,
cfg *setting.Cfg,
) (contracts.EncryptionManager, error) {
currentProviderID := providerConfig.CurrentProvider
if _, ok := providerConfig.AvailableProviders[currentProviderID]; !ok {
@@ -57,6 +65,8 @@ func ProvideEncryptionManager(
cipher: enc,
log: log.New("encryption"),
providerConfig: providerConfig,
dataKeyCache: dataKeyCache,
cfg: cfg,
}
s.registerUsageMetrics()
@@ -173,6 +183,11 @@ func (s *EncryptionManager) currentDataKey(ctx context.Context, namespace xkube.
// dataKeyByLabel looks up for data key in cache by label.
// Otherwise, it fetches it from database, decrypts it and caches it decrypted.
func (s *EncryptionManager) dataKeyByLabel(ctx context.Context, namespace, label string) (string, []byte, error) {
// 0. Get data key from in-memory cache.
if entry, exists := s.dataKeyCache.GetByLabel(namespace, label); exists && entry.Active {
return entry.Id, entry.DataKey, nil
}
// 1. Get data key from database.
dataKey, err := s.store.GetCurrentDataKey(ctx, namespace, label)
if err != nil {
@@ -194,6 +209,9 @@ func (s *EncryptionManager) dataKeyByLabel(ctx context.Context, namespace, label
return "", nil, err
}
// 3. Store the decrypted data key into the in-memory cache.
s.cacheDataKey(namespace, dataKey, decrypted)
return dataKey.UID, decrypted, nil
}
@@ -240,6 +258,9 @@ func (s *EncryptionManager) newDataKey(ctx context.Context, namespace string, la
return "", nil, err
}
// 4. Store the decrypted data key into the in-memory cache.
s.cacheDataKey(namespace, &dbDataKey, dataKey)
return id, dataKey, nil
}
@@ -303,6 +324,11 @@ func (s *EncryptionManager) dataKeyById(ctx context.Context, namespace, id strin
))
defer span.End()
// 0. Get data key from in-memory cache.
if entry, exists := s.dataKeyCache.GetById(namespace, id); exists && entry.Active {
return entry.DataKey, nil
}
// 1. Get encrypted data key from database.
dataKey, err := s.store.GetDataKey(ctx, namespace, id)
if err != nil {
@@ -321,9 +347,82 @@ func (s *EncryptionManager) dataKeyById(ctx context.Context, namespace, id strin
return nil, err
}
// 3. Store the decrypted data key into the in-memory cache.
s.cacheDataKey(namespace, dataKey, decrypted)
return decrypted, nil
}
func (s *EncryptionManager) GetProviders() encryption.ProviderConfig {
return s.providerConfig
}
func (s *EncryptionManager) FlushCache(namespace xkube.Namespace) {
s.dataKeyCache.Flush(namespace.String())
}
func (s *EncryptionManager) Run(ctx context.Context) error {
gc := time.NewTicker(s.cfg.SecretsManagement.DataKeysCacheCleanupInterval)
grp, gCtx := errgroup.WithContext(ctx)
for {
select {
case <-gc.C:
s.log.Debug("Removing expired data keys from cache...")
s.dataKeyCache.RemoveExpired()
s.log.Debug("Removing expired data keys from cache finished successfully")
case <-gCtx.Done():
s.log.Debug("Grafana is shutting down; stopping...")
gc.Stop()
if err := grp.Wait(); err != nil && !errors.Is(err, context.Canceled) {
return err
}
return nil
}
}
}
// NB: Much of this was copied or derived from the original implementation in the legacy SecretsService.
//
// Caching a data key is tricky, because at SecretsService level we cannot guarantee
// that a newly created data key has actually been persisted, depending on the different
// use cases that rely on SecretsService encryption and different database engines that
// we have support for, because the data key creation may have happened within a DB TX,
// that may fail afterwards.
//
// Therefore, if we cache a data key that hasn't been persisted with success (and won't),
// and later that one is used for a encryption operation (aside from the DB TX that created
// it), we may end up with data encrypted by a non-persisted data key, which could end up
// in (unrecoverable) data corruption.
//
// So, we cache the data key by id and/or by label, depending on the data key's lifetime,
// assuming that a data key older than a "caution period" should have been persisted.
//
// Look at the comments inline for further details.
// You can also take a look at the issue below for more context:
// https://github.com/grafana/grafana-enterprise/issues/4252
func (s *EncryptionManager) cacheDataKey(namespace string, dataKey *contracts.SecretDataKey, decrypted []byte) {
// First, we cache the data key by id, because cache "by id" is
// only used by decrypt operations, so no risk of corrupting data.
entry := &encryption.DataKeyCacheEntry{
Namespace: namespace,
Id: dataKey.UID,
Label: dataKey.Label,
DataKey: decrypted,
Active: dataKey.Active,
}
s.dataKeyCache.AddById(namespace, entry)
// Then, we cache the data key by label, ONLY if data key's lifetime
// is longer than a certain "caution period", because cache "by label"
// is used (only) by encrypt operations, and we want to ensure that
// no data key is cached for encryption ops before being persisted.
nowMinusCautionPeriod := time.Now().Add(-s.cfg.SecretsManagement.DataKeysCacheCautionPeriod)
if dataKey.Created.Before(nowMinusCautionPeriod) {
s.dataKeyCache.AddByLabel(namespace, entry)
}
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"errors"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
@@ -201,6 +202,8 @@ func TestEncryptionService_UseCurrentProvider(t *testing.T) {
usageStats,
enc,
ossProviders,
&NoopDataKeyCache{},
cfg,
)
require.NoError(t, err)
@@ -226,6 +229,8 @@ func TestEncryptionService_UseCurrentProvider(t *testing.T) {
usageStats,
enc,
ossProviders,
&NoopDataKeyCache{},
cfg,
)
require.NoError(t, err)
@@ -275,6 +280,8 @@ func TestEncryptionService_SecretKeyVersionUpgrade(t *testing.T) {
usageStats,
enc,
ossProviders,
&NoopDataKeyCache{},
cfgV1,
)
require.NoError(t, err)
@@ -313,6 +320,8 @@ func TestEncryptionService_SecretKeyVersionUpgrade(t *testing.T) {
usageStats,
enc,
ossProvidersV2,
&NoopDataKeyCache{},
cfgV2,
)
require.NoError(t, err)
@@ -368,6 +377,8 @@ func TestEncryptionService_SecretKeyVersionUpgrade(t *testing.T) {
usageStats,
enc,
ossProviders,
&NoopDataKeyCache{},
cfgV1,
)
require.NoError(t, err)
@@ -392,6 +403,8 @@ func TestEncryptionService_SecretKeyVersionUpgrade(t *testing.T) {
usageStats,
enc,
ossProvidersV2,
&NoopDataKeyCache{},
cfgV2,
)
require.NoError(t, err)
@@ -573,6 +586,8 @@ func TestIntegration_SecretsService(t *testing.T) {
usageStats,
enc,
ossProviders,
&NoopDataKeyCache{},
cfg,
)
require.NoError(t, err)
@@ -610,6 +625,8 @@ func TestEncryptionService_ThirdPartyProviders(t *testing.T) {
enc, err := service.ProvideAESGCMCipherService(tracer, usageStats)
require.NoError(t, err)
cfg := &setting.Cfg{}
svc, err := ProvideEncryptionManager(
tracer,
nil,
@@ -621,6 +638,8 @@ func TestEncryptionService_ThirdPartyProviders(t *testing.T) {
encryption.ProviderID("fakeProvider.v1"): &fakeProvider{},
},
},
&NoopDataKeyCache{},
cfg,
)
require.NoError(t, err)
@@ -628,3 +647,88 @@ func TestEncryptionService_ThirdPartyProviders(t *testing.T) {
require.Len(t, encMgr.providerConfig.AvailableProviders, 1)
require.Contains(t, encMgr.providerConfig.AvailableProviders, encryption.ProviderID("fakeProvider.v1"))
}
func TestEncryptionService_FlushCache(t *testing.T) {
ctx := context.Background()
namespace := xkube.Namespace("test-namespace")
plaintext := []byte("secret data to encrypt")
// Set up the encryption manager with a real OSS DEK cache
testDB := sqlstore.NewTestStore(t, sqlstore.WithMigrator(migrator.New()))
tracer := noop.NewTracerProvider().Tracer("test")
database := database.ProvideDatabase(testDB, tracer)
cfg := &setting.Cfg{
SecretsManagement: setting.SecretsManagerSettings{
CurrentEncryptionProvider: "secret_key.v1",
ConfiguredKMSProviders: map[string]map[string]string{"secret_key.v1": {"secret_key": "SW2YcwTIb9zpOOhoPsMm"}},
DataKeysCacheTTL: time.Hour, // Long TTL to ensure keys don't expire during test
DataKeysCacheCautionPeriod: 0 * time.Second, // Override the caution period for testing
},
}
store, err := encryptionstorage.ProvideDataKeyStorage(database, tracer, nil)
require.NoError(t, err)
usageStats := &usagestats.UsageStatsMock{T: t}
enc, err := service.ProvideAESGCMCipherService(tracer, usageStats)
require.NoError(t, err)
ossProviders, err := osskmsproviders.ProvideOSSKMSProviders(cfg, enc)
require.NoError(t, err)
// Create a real OSS DEK cache
dekCache := ProvideOSSDataKeyCache(cfg)
encMgr, err := ProvideEncryptionManager(
tracer,
store,
usageStats,
enc,
ossProviders,
dekCache,
cfg,
)
require.NoError(t, err)
svc := encMgr.(*EncryptionManager)
// Encrypt some data - this will create a DEK and cache it
encrypted, err := svc.Encrypt(ctx, namespace, plaintext)
require.NoError(t, err)
// Verify we can decrypt - this should use the cached key
decrypted, err := svc.Decrypt(ctx, namespace, encrypted)
require.NoError(t, err)
assert.Equal(t, plaintext, decrypted)
// Get the data key ID from the encrypted payload
dataKeyID := encrypted.DataKeyID
// Verify the key is in the cache by checking both by ID and by label
label := encryption.KeyLabel(svc.providerConfig.CurrentProvider)
_, existsById := dekCache.GetById(namespace.String(), dataKeyID)
assert.True(t, existsById, "DEK should be cached by ID before flush")
_, existsByLabel := dekCache.GetByLabel(namespace.String(), label)
assert.True(t, existsByLabel, "DEK should be cached by label before flush")
// Flush the cache for this namespace
svc.FlushCache(namespace)
// Verify the cache is empty for this namespace
_, existsById = dekCache.GetById(namespace.String(), dataKeyID)
assert.False(t, existsById, "DEK should not be in cache by ID after flush")
_, existsByLabel = dekCache.GetByLabel(namespace.String(), label)
assert.False(t, existsByLabel, "DEK should not be in cache by label after flush")
// Verify we can still decrypt - this should fetch from DB and re-cache
decrypted, err = svc.Decrypt(ctx, namespace, encrypted)
require.NoError(t, err)
assert.Equal(t, plaintext, decrypted)
// Verify the key is back in the cache after the decrypt operation
_, existsById = dekCache.GetById(namespace.String(), dataKeyID)
assert.True(t, existsById, "DEK should be re-cached by ID after decrypt")
}

View File

@@ -0,0 +1,130 @@
package manager
import (
"strconv"
"sync"
"time"
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption"
"github.com/grafana/grafana/pkg/setting"
"github.com/prometheus/client_golang/prometheus"
)
type ossDataKeyCache struct {
mtx sync.RWMutex
byId map[string]map[string]*encryption.DataKeyCacheEntry
byLabel map[string]map[string]*encryption.DataKeyCacheEntry
cacheTTL time.Duration
}
func ProvideOSSDataKeyCache(cfg *setting.Cfg) encryption.DataKeyCache {
return &ossDataKeyCache{
byId: make(map[string]map[string]*encryption.DataKeyCacheEntry),
byLabel: make(map[string]map[string]*encryption.DataKeyCacheEntry),
cacheTTL: cfg.SecretsManagement.DataKeysCacheTTL,
}
}
func (c *ossDataKeyCache) GetById(namespace, id string) (_ *encryption.DataKeyCacheEntry, exists bool) {
defer func() {
cacheReadsCounter.With(prometheus.Labels{
"hit": strconv.FormatBool(exists),
"method": "byId",
}).Inc()
}()
c.mtx.RLock()
defer c.mtx.RUnlock()
entries, exists := c.byId[namespace]
if !exists {
return nil, false
}
entry, exists := entries[id]
if !exists || entry.IsExpired() || entry.Namespace != namespace {
return nil, false
}
return entry, true
}
func (c *ossDataKeyCache) GetByLabel(namespace, label string) (_ *encryption.DataKeyCacheEntry, exists bool) {
defer func() {
cacheReadsCounter.With(prometheus.Labels{
"hit": strconv.FormatBool(exists),
"method": "byLabel",
}).Inc()
}()
c.mtx.RLock()
defer c.mtx.RUnlock()
entries, exists := c.byLabel[namespace]
if !exists {
return nil, false
}
entry, exists := entries[label]
if !exists || entry.IsExpired() || entry.Namespace != namespace {
return nil, false
}
return entry, true
}
func (c *ossDataKeyCache) AddById(namespace string, entry *encryption.DataKeyCacheEntry) {
c.mtx.Lock()
defer c.mtx.Unlock()
entry.Expiration = time.Now().Add(c.cacheTTL)
entry.Namespace = namespace
entries, exists := c.byId[namespace]
if !exists {
entries = make(map[string]*encryption.DataKeyCacheEntry)
c.byId[namespace] = entries
}
entries[entry.Id] = entry
}
func (c *ossDataKeyCache) AddByLabel(namespace string, entry *encryption.DataKeyCacheEntry) {
c.mtx.Lock()
defer c.mtx.Unlock()
entry.Expiration = time.Now().Add(c.cacheTTL)
entry.Namespace = namespace
entries, exists := c.byLabel[namespace]
if !exists {
entries = make(map[string]*encryption.DataKeyCacheEntry)
c.byLabel[namespace] = entries
}
entries[entry.Label] = entry
}
func (c *ossDataKeyCache) RemoveExpired() {
c.mtx.Lock()
defer c.mtx.Unlock()
for _, entries := range c.byId {
for id, entry := range entries {
if entry.IsExpired() {
delete(entries, id)
}
}
}
for _, entries := range c.byLabel {
for label, entry := range entries {
if entry.IsExpired() {
delete(entries, label)
}
}
}
}
func (c *ossDataKeyCache) Flush(namespace string) {
c.mtx.Lock()
c.byId[namespace] = make(map[string]*encryption.DataKeyCacheEntry)
c.byLabel[namespace] = make(map[string]*encryption.DataKeyCacheEntry)
c.mtx.Unlock()
}

View File

@@ -0,0 +1,570 @@
package manager
import (
"testing"
"time"
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption"
"github.com/grafana/grafana/pkg/setting"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestOSSDataKeyCache(t *testing.T) {
t.Parallel()
settings := setting.NewCfg()
settings.SecretsManagement = setting.SecretsManagerSettings{
DataKeysCacheTTL: 999 * time.Hour, // avoid expiration for testing
}
cache := ProvideOSSDataKeyCache(settings)
namespace := "test-namespace"
entry := &encryption.DataKeyCacheEntry{
Id: "key-123",
Label: "2024-01-01@provider.key1",
DataKey: []byte("test-data-key"),
Active: true,
}
t.Run("AddById and GetById", func(t *testing.T) {
cache.AddById(namespace, entry)
retrieved, exists := cache.GetById(namespace, entry.Id)
require.True(t, exists, "entry should exist after adding")
assert.Equal(t, entry.Id, retrieved.Id)
assert.Equal(t, entry.Label, retrieved.Label)
assert.Equal(t, entry.DataKey, retrieved.DataKey)
assert.Equal(t, entry.Active, retrieved.Active)
assert.Equal(t, namespace, retrieved.Namespace)
assert.True(t, retrieved.Expiration.After(time.Now()), "expiration should be in the future")
})
t.Run("AddByLabel and GetByLabel", func(t *testing.T) {
cache.AddByLabel(namespace, entry)
retrieved, exists := cache.GetByLabel(namespace, entry.Label)
require.True(t, exists, "entry should exist after adding")
assert.Equal(t, entry.Id, retrieved.Id)
assert.Equal(t, entry.Label, retrieved.Label)
assert.Equal(t, entry.DataKey, retrieved.DataKey)
assert.Equal(t, entry.Active, retrieved.Active)
assert.Equal(t, namespace, retrieved.Namespace)
assert.True(t, retrieved.Expiration.After(time.Now()), "expiration should be in the future")
})
t.Run("GetById and GetByLabel are independent", func(t *testing.T) {
cache2 := ProvideOSSDataKeyCache(settings)
ns := "independent-test"
entryById := &encryption.DataKeyCacheEntry{
Id: "id-only-key",
Label: "label1",
DataKey: []byte("data1"),
}
entryByLabel := &encryption.DataKeyCacheEntry{
Id: "id2",
Label: "label-only-key",
DataKey: []byte("data2"),
}
cache2.AddById(ns, entryById)
cache2.AddByLabel(ns, entryByLabel)
// Should find by ID
retrieved, exists := cache2.GetById(ns, entryById.Id)
require.True(t, exists)
assert.Equal(t, entryById.Id, retrieved.Id)
// Should not find by label that wasn't added via AddByLabel
_, exists = cache2.GetByLabel(ns, entryById.Label)
assert.False(t, exists)
// Should find by label
retrieved, exists = cache2.GetByLabel(ns, entryByLabel.Label)
require.True(t, exists)
assert.Equal(t, entryByLabel.Label, retrieved.Label)
// Should not find by ID that wasn't added via AddById
_, exists = cache2.GetById(ns, entryByLabel.Id)
assert.False(t, exists)
})
}
func TestOSSDataKeyCache_FalseConditions(t *testing.T) {
t.Parallel()
settings := setting.NewCfg()
settings.SecretsManagement = setting.SecretsManagerSettings{
DataKeysCacheTTL: 999 * time.Hour,
}
cache := ProvideOSSDataKeyCache(settings)
namespace := "test-namespace"
entry := &encryption.DataKeyCacheEntry{
Id: "key-123",
Label: "2024-01-01@provider.key1",
DataKey: []byte("test-data-key"),
Active: true,
}
t.Run("GetById returns false for non-existent namespace", func(t *testing.T) {
_, exists := cache.GetById("non-existent-namespace", "any-id")
assert.False(t, exists)
})
t.Run("GetById returns false for non-existent id", func(t *testing.T) {
cache.AddById(namespace, entry)
_, exists := cache.GetById(namespace, "non-existent-id")
assert.False(t, exists)
})
t.Run("GetByLabel returns false for non-existent namespace", func(t *testing.T) {
_, exists := cache.GetByLabel("non-existent-namespace", "any-label")
assert.False(t, exists)
})
t.Run("GetByLabel returns false for non-existent label", func(t *testing.T) {
cache.AddByLabel(namespace, entry)
_, exists := cache.GetByLabel(namespace, "non-existent-label")
assert.False(t, exists)
})
t.Run("GetById returns false for expired entry", func(t *testing.T) {
shortTTLSettings := setting.NewCfg()
shortTTLSettings.SecretsManagement = setting.SecretsManagerSettings{
DataKeysCacheTTL: 1 * time.Millisecond,
}
shortCache := ProvideOSSDataKeyCache(shortTTLSettings)
namespace := "test-ns"
expiredEntry := &encryption.DataKeyCacheEntry{
Id: "expired-key",
Label: "expired-label",
DataKey: []byte("expired-data"),
}
shortCache.AddById(namespace, expiredEntry)
time.Sleep(10 * time.Millisecond)
_, exists := shortCache.GetById(namespace, expiredEntry.Id)
assert.False(t, exists, "should return false for expired entry")
})
t.Run("GetByLabel returns false for expired entry", func(t *testing.T) {
shortTTLSettings := setting.NewCfg()
shortTTLSettings.SecretsManagement = setting.SecretsManagerSettings{
DataKeysCacheTTL: 1 * time.Millisecond,
}
shortCache := ProvideOSSDataKeyCache(shortTTLSettings)
namespace := "test-ns"
expiredEntry := &encryption.DataKeyCacheEntry{
Id: "expired-key",
Label: "expired-label",
DataKey: []byte("expired-data"),
}
shortCache.AddByLabel(namespace, expiredEntry)
time.Sleep(10 * time.Millisecond)
_, exists := shortCache.GetByLabel(namespace, expiredEntry.Label)
assert.False(t, exists, "should return false for expired entry")
})
t.Run("GetById returns false when entry namespace doesn't match", func(t *testing.T) {
// This tests the entry.Namespace != namespace check in GetById
// This is a defensive check that shouldn't normally happen if AddById works correctly
testCache := ProvideOSSDataKeyCache(settings).(*ossDataKeyCache)
// Manually insert an entry with mismatched namespace to test the defensive check
mismatchedEntry := &encryption.DataKeyCacheEntry{
Id: "test-id",
Label: "test-label",
DataKey: []byte("test-data"),
Namespace: "wrong-namespace",
Expiration: time.Now().Add(999 * time.Hour),
}
testCache.mtx.Lock()
testCache.byId["correct-namespace"] = map[string]*encryption.DataKeyCacheEntry{
mismatchedEntry.Id: mismatchedEntry,
}
testCache.mtx.Unlock()
_, exists := testCache.GetById("correct-namespace", mismatchedEntry.Id)
assert.False(t, exists, "should return false when entry namespace doesn't match lookup namespace")
})
t.Run("GetByLabel returns false when entry namespace doesn't match", func(t *testing.T) {
// This tests the entry.Namespace != namespace check in GetByLabel
testCache := ProvideOSSDataKeyCache(settings).(*ossDataKeyCache)
// Manually insert an entry with mismatched namespace to test the defensive check
mismatchedEntry := &encryption.DataKeyCacheEntry{
Id: "test-id",
Label: "test-label",
DataKey: []byte("test-data"),
Namespace: "wrong-namespace",
Expiration: time.Now().Add(999 * time.Hour),
}
testCache.mtx.Lock()
testCache.byLabel["correct-namespace"] = map[string]*encryption.DataKeyCacheEntry{
"test-label": mismatchedEntry,
}
testCache.mtx.Unlock()
_, exists := testCache.GetByLabel("correct-namespace", mismatchedEntry.Label)
assert.False(t, exists, "should return false when entry namespace doesn't match lookup namespace")
})
}
// Test namespace isolation
func TestOSSDataKeyCache_NamespaceIsolation(t *testing.T) {
t.Parallel()
settings := setting.NewCfg()
settings.SecretsManagement = setting.SecretsManagerSettings{
DataKeysCacheTTL: 999 * time.Hour,
}
cache := ProvideOSSDataKeyCache(settings)
namespace1 := "namespace-1"
namespace2 := "namespace-2"
entry1 := &encryption.DataKeyCacheEntry{
Id: "shared-id",
Label: "shared-label",
DataKey: []byte("data-from-ns1"),
Active: true,
}
entry2 := &encryption.DataKeyCacheEntry{
Id: "shared-id",
Label: "shared-label",
DataKey: []byte("data-from-ns2"),
Active: false,
}
t.Run("entries with same ID in different namespaces are isolated", func(t *testing.T) {
cache.AddById(namespace1, entry1)
cache.AddById(namespace2, entry2)
retrieved1, exists := cache.GetById(namespace1, entry1.Id)
require.True(t, exists)
assert.Equal(t, entry1.DataKey, retrieved1.DataKey)
assert.Equal(t, namespace1, retrieved1.Namespace)
assert.True(t, retrieved1.Active)
retrieved2, exists := cache.GetById(namespace2, entry2.Id)
require.True(t, exists)
assert.Equal(t, entry2.DataKey, retrieved2.DataKey)
assert.Equal(t, namespace2, retrieved2.Namespace)
assert.False(t, retrieved2.Active)
})
t.Run("entries with same label in different namespaces are isolated", func(t *testing.T) {
cache.AddByLabel(namespace1, entry1)
cache.AddByLabel(namespace2, entry2)
retrieved1, exists := cache.GetByLabel(namespace1, entry1.Label)
require.True(t, exists)
assert.Equal(t, entry1.DataKey, retrieved1.DataKey)
assert.Equal(t, namespace1, retrieved1.Namespace)
assert.True(t, retrieved1.Active)
retrieved2, exists := cache.GetByLabel(namespace2, entry2.Label)
require.True(t, exists)
assert.Equal(t, entry2.DataKey, retrieved2.DataKey)
assert.Equal(t, namespace2, retrieved2.Namespace)
assert.False(t, retrieved2.Active)
})
t.Run("cannot retrieve entry from wrong namespace", func(t *testing.T) {
// flush both namespaces since the cache is full of stuff now
cache.Flush(namespace1)
cache.Flush(namespace2)
cache.AddById(namespace1, entry1)
_, exists := cache.GetById(namespace2, entry1.Id)
assert.False(t, exists, "should not find entry from different namespace")
cache.AddByLabel(namespace1, entry1)
_, exists = cache.GetByLabel(namespace2, entry1.Label)
assert.False(t, exists, "should not find entry from different namespace")
})
}
func TestOSSDataKeyCache_Expiration(t *testing.T) {
t.Parallel()
t.Run("entries expire after TTL", func(t *testing.T) {
settings := setting.NewCfg()
settings.SecretsManagement = setting.SecretsManagerSettings{
DataKeysCacheTTL: 50 * time.Millisecond,
}
cache := ProvideOSSDataKeyCache(settings)
namespace := "test-ns"
entry := &encryption.DataKeyCacheEntry{
Id: "expiring-key",
Label: "expiring-label",
DataKey: []byte("expiring-data"),
}
cache.AddById(namespace, entry)
cache.AddByLabel(namespace, entry)
// Should exist immediately
_, exists := cache.GetById(namespace, entry.Id)
assert.True(t, exists, "entry should exist immediately after adding")
_, exists = cache.GetByLabel(namespace, entry.Label)
assert.True(t, exists, "entry should exist immediately after adding")
// Wait for expiration
time.Sleep(100 * time.Millisecond)
// Should not exist after expiration
_, exists = cache.GetById(namespace, entry.Id)
assert.False(t, exists, "entry should not exist after TTL expires")
_, exists = cache.GetByLabel(namespace, entry.Label)
assert.False(t, exists, "entry should not exist after TTL expires")
})
t.Run("RemoveExpired removes only expired entries", func(t *testing.T) {
settings := setting.NewCfg()
settings.SecretsManagement = setting.SecretsManagerSettings{
DataKeysCacheTTL: 50 * time.Millisecond,
}
cache := ProvideOSSDataKeyCache(settings)
namespace := "test-ns"
// Add entries that will expire
expiredEntry1 := &encryption.DataKeyCacheEntry{
Id: "expired-1",
Label: "expired-label-1",
DataKey: []byte("expired-data-1"),
}
expiredEntry2 := &encryption.DataKeyCacheEntry{
Id: "expired-2",
Label: "expired-label-2",
DataKey: []byte("expired-data-2"),
}
cache.AddById(namespace, expiredEntry1)
cache.AddByLabel(namespace, expiredEntry2)
// Wait for expiration
time.Sleep(100 * time.Millisecond)
// Add fresh entries
freshEntry1 := &encryption.DataKeyCacheEntry{
Id: "fresh-1",
Label: "fresh-label-1",
DataKey: []byte("fresh-data-1"),
}
freshEntry2 := &encryption.DataKeyCacheEntry{
Id: "fresh-2",
Label: "fresh-label-2",
DataKey: []byte("fresh-data-2"),
}
cache.AddById(namespace, freshEntry1)
cache.AddByLabel(namespace, freshEntry2)
// Before RemoveExpired, expired entries still exist in the map
// but GetById/GetByLabel return false due to IsExpired() check
// Call RemoveExpired
cache.RemoveExpired()
// Fresh entries should still exist
_, exists := cache.GetById(namespace, freshEntry1.Id)
assert.True(t, exists, "fresh entry should still exist after RemoveExpired")
_, exists = cache.GetByLabel(namespace, freshEntry2.Label)
assert.True(t, exists, "fresh entry should still exist after RemoveExpired")
// Expired entries should not exist
ossCache := cache.(*ossDataKeyCache)
_, exists = ossCache.byId[namespace][expiredEntry1.Id]
assert.False(t, exists, "expired entry should not exist after RemoveExpired")
_, exists = ossCache.byLabel[namespace][expiredEntry2.Label]
assert.False(t, exists, "expired entry should not exist after RemoveExpired")
})
t.Run("RemoveExpired handles multiple namespaces", func(t *testing.T) {
settings := setting.NewCfg()
settings.SecretsManagement = setting.SecretsManagerSettings{
DataKeysCacheTTL: 50 * time.Millisecond,
}
cache := ProvideOSSDataKeyCache(settings)
ns1 := "namespace-1"
ns2 := "namespace-2"
ns1ExpiredEntry := &encryption.DataKeyCacheEntry{
Id: "expired-key-ns1",
Label: "expired-label-ns1",
DataKey: []byte("expired-data"),
}
ns2ExpiredEntry := &encryption.DataKeyCacheEntry{
Id: "expired-key-ns2",
Label: "expired-label-ns2",
DataKey: []byte("expired-data"),
}
cache.AddById(ns1, ns1ExpiredEntry)
cache.AddByLabel(ns1, ns1ExpiredEntry)
cache.AddById(ns2, ns2ExpiredEntry)
cache.AddByLabel(ns2, ns2ExpiredEntry)
time.Sleep(100 * time.Millisecond)
ns1FreshEntry := &encryption.DataKeyCacheEntry{
Id: "fresh-key-ns1",
Label: "fresh-label-ns1",
DataKey: []byte("fresh-data-ns1"),
}
ns2FreshEntry := &encryption.DataKeyCacheEntry{
Id: "fresh-key-ns2",
Label: "fresh-label-ns2",
DataKey: []byte("fresh-data-ns2"),
}
cache.AddById(ns1, ns1FreshEntry)
cache.AddByLabel(ns1, ns1FreshEntry)
cache.AddById(ns2, ns2FreshEntry)
cache.AddByLabel(ns2, ns2FreshEntry)
cache.RemoveExpired()
// Fresh entries in both namespaces should exist
_, exists := cache.GetById(ns1, ns1FreshEntry.Id)
assert.True(t, exists)
_, exists = cache.GetByLabel(ns1, ns1FreshEntry.Label)
assert.True(t, exists)
_, exists = cache.GetById(ns2, ns2FreshEntry.Id)
assert.True(t, exists)
_, exists = cache.GetByLabel(ns2, ns2FreshEntry.Label)
assert.True(t, exists)
// Expired entries in both namespaces should not exist
ossCache := cache.(*ossDataKeyCache)
_, exists = ossCache.byId[ns1][ns1ExpiredEntry.Id]
assert.False(t, exists)
_, exists = ossCache.byId[ns2][ns2ExpiredEntry.Id]
assert.False(t, exists)
_, exists = ossCache.byLabel[ns1][ns1ExpiredEntry.Label]
assert.False(t, exists)
_, exists = ossCache.byLabel[ns2][ns2ExpiredEntry.Label]
assert.False(t, exists)
})
}
// Test Flush()
func TestOSSDataKeyCache_Flush(t *testing.T) {
t.Parallel()
settings := setting.NewCfg()
settings.SecretsManagement = setting.SecretsManagerSettings{
DataKeysCacheTTL: 999 * time.Hour,
}
cache := ProvideOSSDataKeyCache(settings)
namespace1 := "namespace-1"
namespace2 := "namespace-2"
entry1 := &encryption.DataKeyCacheEntry{
Id: "key-1",
Label: "label-1",
DataKey: []byte("data-1"),
}
entry2 := &encryption.DataKeyCacheEntry{
Id: "key-2",
Label: "label-2",
DataKey: []byte("data-2"),
}
t.Run("Flush removes all entries from specified namespace", func(t *testing.T) {
cache.AddById(namespace1, entry1)
cache.AddByLabel(namespace1, entry1)
// Verify entries exist
_, exists := cache.GetById(namespace1, entry1.Id)
require.True(t, exists)
_, exists = cache.GetByLabel(namespace1, entry1.Label)
require.True(t, exists)
// Flush namespace1
cache.Flush(namespace1)
// Entries should no longer exist
_, exists = cache.GetById(namespace1, entry1.Id)
assert.False(t, exists, "entry should not exist after flush")
_, exists = cache.GetByLabel(namespace1, entry1.Label)
assert.False(t, exists, "entry should not exist after flush")
})
t.Run("Flush only affects specified namespace", func(t *testing.T) {
cache.AddById(namespace1, entry1)
cache.AddByLabel(namespace1, entry1)
cache.AddById(namespace2, entry2)
cache.AddByLabel(namespace2, entry2)
// Flush only namespace1
cache.Flush(namespace1)
// namespace1 entries should not exist
_, exists := cache.GetById(namespace1, entry1.Id)
assert.False(t, exists)
_, exists = cache.GetByLabel(namespace1, entry1.Label)
assert.False(t, exists)
// namespace2 entries should still exist
_, exists = cache.GetById(namespace2, entry2.Id)
assert.True(t, exists, "entries in other namespace should not be affected")
_, exists = cache.GetByLabel(namespace2, entry2.Label)
assert.True(t, exists, "entries in other namespace should not be affected")
})
t.Run("Flush on non-existent namespace does not panic", func(t *testing.T) {
assert.NotPanics(t, func() {
cache.Flush("non-existent-namespace")
})
})
t.Run("can add entries after flush", func(t *testing.T) {
cache.AddById(namespace1, entry1)
cache.Flush(namespace1)
// Add new entry after flush
newEntry := &encryption.DataKeyCacheEntry{
Id: "new-key",
Label: "new-label",
DataKey: []byte("new-data"),
}
cache.AddById(namespace1, newEntry)
// New entry should exist
_, exists := cache.GetById(namespace1, "new-key")
assert.True(t, exists, "should be able to add entries after flush")
})
}

View File

@@ -0,0 +1,27 @@
package manager
import "github.com/grafana/grafana/pkg/registry/apis/secret/encryption"
// This is being used as the data key cache in both OSS and Enterprise while we discuss security requirements for DEK caching
type noopDataKeyCache struct {
}
func ProvideNoopDataKeyCache() encryption.DataKeyCache {
return &noopDataKeyCache{}
}
func (c *noopDataKeyCache) GetById(_ string, _ string) (*encryption.DataKeyCacheEntry, bool) {
return nil, false
}
func (c *noopDataKeyCache) GetByLabel(_ string, _ string) (*encryption.DataKeyCacheEntry, bool) {
return nil, false
}
func (c *noopDataKeyCache) AddById(_ string, _ *encryption.DataKeyCacheEntry) {}
func (c *noopDataKeyCache) AddByLabel(_ string, _ *encryption.DataKeyCacheEntry) {}
func (c *noopDataKeyCache) RemoveExpired() {}
func (c *noopDataKeyCache) Flush(_ string) {}

View File

@@ -7,6 +7,7 @@ import (
"go.opentelemetry.io/otel/trace/noop"
"github.com/grafana/grafana/pkg/infra/usagestats"
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption"
"github.com/grafana/grafana/pkg/registry/apis/secret/encryption/cipher/service"
osskmsproviders "github.com/grafana/grafana/pkg/registry/apis/secret/encryption/kmsproviders"
"github.com/grafana/grafana/pkg/services/sqlstore"
@@ -47,8 +48,32 @@ func setupTestService(tb testing.TB) *EncryptionManager {
usageStats,
enc,
ossProviders,
&NoopDataKeyCache{},
cfg,
)
require.NoError(tb, err)
return encMgr.(*EncryptionManager)
}
type NoopDataKeyCache struct {
}
func (c *NoopDataKeyCache) GetById(namespace, id string) (*encryption.DataKeyCacheEntry, bool) {
return nil, false
}
func (c *NoopDataKeyCache) GetByLabel(namespace, label string) (*encryption.DataKeyCacheEntry, bool) {
return nil, false
}
func (c *NoopDataKeyCache) AddById(namespace string, entry *encryption.DataKeyCacheEntry) {
}
func (c *NoopDataKeyCache) AddByLabel(namespace string, entry *encryption.DataKeyCacheEntry) {
}
func (c *NoopDataKeyCache) RemoveExpired() {
}
func (c *NoopDataKeyCache) Flush(namespace string) {}

View File

@@ -40,3 +40,25 @@ func (id ProviderID) Kind() (string, error) {
func KeyLabel(providerID ProviderID) string {
return fmt.Sprintf("%s@%s", time.Now().Format("2006-01-02"), providerID)
}
type DataKeyCache interface {
GetById(namespace, id string) (*DataKeyCacheEntry, bool)
GetByLabel(namespace, label string) (*DataKeyCacheEntry, bool)
AddById(namespace string, entry *DataKeyCacheEntry)
AddByLabel(namespace string, entry *DataKeyCacheEntry)
RemoveExpired()
Flush(namespace string)
}
type DataKeyCacheEntry struct {
Namespace string
Id string
Label string
DataKey []byte
Active bool
Expiration time.Time
}
func (e DataKeyCacheEntry) IsExpired() bool {
return e.Expiration.Before(time.Now())
}

View File

@@ -62,7 +62,7 @@ func setupTestService(t *testing.T, cfg *setting.Cfg) (*OSSKeeperService, error)
ossProviders, err := osskmsproviders.ProvideOSSKMSProviders(cfg, enc)
require.NoError(t, err)
encryptionManager, err := manager.ProvideEncryptionManager(tracer, dataKeyStore, usageStats, enc, ossProviders)
encryptionManager, err := manager.ProvideEncryptionManager(tracer, dataKeyStore, usageStats, enc, ossProviders, &manager.NoopDataKeyCache{}, cfg)
require.NoError(t, err)
// Initialize the keeper service

View File

@@ -53,6 +53,9 @@ func (s *ConsolidationService) Consolidate(ctx context.Context) (err error) {
return fmt.Errorf("disabling all data keys: %w", err)
}
// Keep track of which namespaces we have already flushed so we get to take advantage of caching the new values
flushedNamespaces := make(map[string]bool)
// List all encrypted values.
encryptedValues, err := s.globalEncryptedValueStore.ListAll(ctx, contracts.ListOpts{}, nil)
if err != nil {
@@ -60,6 +63,12 @@ func (s *ConsolidationService) Consolidate(ctx context.Context) (err error) {
}
for _, ev := range encryptedValues {
// Flush the cache for this namespace if we haven't already
if !flushedNamespaces[ev.Namespace] {
s.encryptionManager.FlushCache(xkube.Namespace(ev.Namespace))
flushedNamespaces[ev.Namespace] = true
}
// Decrypt the value using its old data key.
decryptedValue, err := s.encryptionManager.Decrypt(ctx, xkube.Namespace(ev.Namespace), ev.EncryptedPayload)
if err != nil {

View File

@@ -121,6 +121,8 @@ func Setup(t *testing.T, opts ...func(*SetupConfig)) Sut {
usageStats,
enc,
ossProviders,
&manager.NoopDataKeyCache{},
cfg,
)
require.NoError(t, err)

View File

@@ -488,7 +488,8 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
if err != nil {
return nil, err
}
encryptionManager, err := manager2.ProvideEncryptionManager(tracer, dataKeyStorage, usageStats, cipher, providerConfig)
dataKeyCache := manager2.ProvideNoopDataKeyCache()
encryptionManager, err := manager2.ProvideEncryptionManager(tracer, dataKeyStorage, usageStats, cipher, providerConfig, dataKeyCache, cfg)
if err != nil {
return nil, err
}
@@ -1154,7 +1155,8 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
if err != nil {
return nil, err
}
encryptionManager, err := manager2.ProvideEncryptionManager(tracer, dataKeyStorage, usageStats, cipher, providerConfig)
dataKeyCache := manager2.ProvideNoopDataKeyCache()
encryptionManager, err := manager2.ProvideEncryptionManager(tracer, dataKeyStorage, usageStats, cipher, providerConfig, dataKeyCache, cfg)
if err != nil {
return nil, err
}
@@ -1716,7 +1718,8 @@ func InitializeForCLI(ctx context.Context, cfg *setting.Cfg) (Runner, error) {
if err != nil {
return Runner{}, err
}
encryptionManager, err := manager2.ProvideEncryptionManager(tracer, dataKeyStorage, usageStats, cipher, providerConfig)
dataKeyCache := manager2.ProvideNoopDataKeyCache()
encryptionManager, err := manager2.ProvideEncryptionManager(tracer, dataKeyStorage, usageStats, cipher, providerConfig, dataKeyCache, cfg)
if err != nil {
return Runner{}, err
}

View File

@@ -18,6 +18,7 @@ import (
"github.com/grafana/grafana/pkg/registry/apis/secret"
"github.com/grafana/grafana/pkg/registry/apis/secret/contracts"
gsmKMSProviders "github.com/grafana/grafana/pkg/registry/apis/secret/encryption/kmsproviders"
gsmEncryptionManager "github.com/grafana/grafana/pkg/registry/apis/secret/encryption/manager"
"github.com/grafana/grafana/pkg/registry/apis/secret/secretkeeper"
secretService "github.com/grafana/grafana/pkg/registry/apis/secret/service"
"github.com/grafana/grafana/pkg/registry/apps/advisor"
@@ -152,6 +153,8 @@ var wireExtsBasicSet = wire.NewSet(
aggregatorrunner.ProvideNoopAggregatorConfigurator,
apisregistry.WireSetExts,
gsmKMSProviders.ProvideOSSKMSProviders,
//gsmEncryptionManager.ProvideOSSDataKeyCache, // Temporarily use noop cache
gsmEncryptionManager.ProvideNoopDataKeyCache,
secret.ProvideSecureValueClient,
provisioningExtras,
configProviderExtras,

View File

@@ -36,6 +36,9 @@ var client = &http.Client{
Transport: &http.Transport{Proxy: http.ProxyFromEnvironment},
}
// CreateDashboardSnapshot creates a snapshot when running Grafana in regular mode.
// It validates the user and dashboard exist before creating the snapshot.
// This mode supports both local and external snapshots.
func CreateDashboardSnapshot(c *contextmodel.ReqContext, cfg snapshot.SnapshotSharingOptions, cmd CreateDashboardSnapshotCommand, svc Service) {
if !cfg.SnapshotsEnabled {
c.JsonApiErr(http.StatusForbidden, "Dashboard Snapshots are disabled", nil)
@@ -43,6 +46,7 @@ func CreateDashboardSnapshot(c *contextmodel.ReqContext, cfg snapshot.SnapshotSh
}
uid := cmd.Dashboard.GetNestedString("uid")
user, err := identity.GetRequester(c.Req.Context())
if err != nil {
c.JsonApiErr(http.StatusBadRequest, "missing user in context", nil)
@@ -59,21 +63,18 @@ func CreateDashboardSnapshot(c *contextmodel.ReqContext, cfg snapshot.SnapshotSh
return
}
cmd.ExternalURL = ""
cmd.OrgID = user.GetOrgID()
cmd.UserID, _ = identity.UserIdentifier(user.GetID())
if cmd.Name == "" {
cmd.Name = "Unnamed snapshot"
}
var snapshotUrl string
cmd.ExternalURL = ""
cmd.OrgID = user.GetOrgID()
cmd.UserID, _ = identity.UserIdentifier(user.GetID())
originalDashboardURL, err := createOriginalDashboardURL(&cmd)
if err != nil {
c.JsonApiErr(http.StatusInternalServerError, "Invalid app URL", err)
return
}
var snapshotURL string
if cmd.External {
// Handle external snapshot creation
if !cfg.ExternalEnabled {
c.JsonApiErr(http.StatusForbidden, "External dashboard creation is disabled", nil)
return
@@ -85,40 +86,83 @@ func CreateDashboardSnapshot(c *contextmodel.ReqContext, cfg snapshot.SnapshotSh
return
}
snapshotUrl = resp.Url
cmd.Key = resp.Key
cmd.DeleteKey = resp.DeleteKey
cmd.ExternalURL = resp.Url
cmd.ExternalDeleteURL = resp.DeleteUrl
cmd.Dashboard = &common.Unstructured{}
snapshotURL = resp.Url
metrics.MApiDashboardSnapshotExternal.Inc()
} else {
cmd.Dashboard.SetNestedField(originalDashboardURL, "snapshot", "originalUrl")
if cmd.Key == "" {
var err error
cmd.Key, err = util.GetRandomString(32)
if err != nil {
c.JsonApiErr(http.StatusInternalServerError, "Could not generate random string", err)
return
}
// Handle local snapshot creation
originalDashboardURL, err := createOriginalDashboardURL(&cmd)
if err != nil {
c.JsonApiErr(http.StatusInternalServerError, "Invalid app URL", err)
return
}
if cmd.DeleteKey == "" {
var err error
cmd.DeleteKey, err = util.GetRandomString(32)
if err != nil {
c.JsonApiErr(http.StatusInternalServerError, "Could not generate random string", err)
return
}
snapshotURL, err = prepareLocalSnapshot(&cmd, originalDashboardURL)
if err != nil {
c.JsonApiErr(http.StatusInternalServerError, "Could not generate random string", err)
return
}
snapshotUrl = setting.ToAbsUrl("dashboard/snapshot/" + cmd.Key)
metrics.MApiDashboardSnapshotCreate.Inc()
}
saveAndRespond(c, svc, cmd, snapshotURL)
}
// CreateDashboardSnapshotPublic creates a snapshot when running Grafana in public mode.
// In public mode, there is no user or dashboard information to validate.
// Only local snapshots are supported (external snapshots are not available).
func CreateDashboardSnapshotPublic(c *contextmodel.ReqContext, cfg snapshot.SnapshotSharingOptions, cmd CreateDashboardSnapshotCommand, svc Service) {
if !cfg.SnapshotsEnabled {
c.JsonApiErr(http.StatusForbidden, "Dashboard Snapshots are disabled", nil)
return
}
if cmd.Name == "" {
cmd.Name = "Unnamed snapshot"
}
snapshotURL, err := prepareLocalSnapshot(&cmd, "")
if err != nil {
c.JsonApiErr(http.StatusInternalServerError, "Could not generate random string", err)
return
}
metrics.MApiDashboardSnapshotCreate.Inc()
saveAndRespond(c, svc, cmd, snapshotURL)
}
// prepareLocalSnapshot prepares the command for a local snapshot and returns the snapshot URL.
func prepareLocalSnapshot(cmd *CreateDashboardSnapshotCommand, originalDashboardURL string) (string, error) {
cmd.Dashboard.SetNestedField(originalDashboardURL, "snapshot", "originalUrl")
if cmd.Key == "" {
key, err := util.GetRandomString(32)
if err != nil {
return "", err
}
cmd.Key = key
}
if cmd.DeleteKey == "" {
deleteKey, err := util.GetRandomString(32)
if err != nil {
return "", err
}
cmd.DeleteKey = deleteKey
}
return setting.ToAbsUrl("dashboard/snapshot/" + cmd.Key), nil
}
// saveAndRespond saves the snapshot and sends the response.
func saveAndRespond(c *contextmodel.ReqContext, svc Service, cmd CreateDashboardSnapshotCommand, snapshotURL string) {
result, err := svc.CreateDashboardSnapshot(c.Req.Context(), &cmd)
if err != nil {
c.JsonApiErr(http.StatusInternalServerError, "Failed to create snapshot", err)
@@ -128,7 +172,7 @@ func CreateDashboardSnapshot(c *contextmodel.ReqContext, cfg snapshot.SnapshotSh
c.JSON(http.StatusOK, snapshot.DashboardCreateResponse{
Key: result.Key,
DeleteKey: result.DeleteKey,
URL: snapshotUrl,
URL: snapshotURL,
DeleteURL: setting.ToAbsUrl("api/snapshots-delete/" + result.DeleteKey),
})
}

View File

@@ -20,40 +20,30 @@ import (
"github.com/grafana/grafana/pkg/web"
)
func TestCreateDashboardSnapshot_DashboardNotFound(t *testing.T) {
mockService := &MockService{}
cfg := snapshot.SnapshotSharingOptions{
SnapshotsEnabled: true,
ExternalEnabled: false,
func createTestDashboard(t *testing.T) *common.Unstructured {
t.Helper()
dashboard := &common.Unstructured{}
dashboardData := map[string]any{
"uid": "test-dashboard-uid",
"id": 123,
}
testUser := &user.SignedInUser{
dashboardBytes, _ := json.Marshal(dashboardData)
_ = json.Unmarshal(dashboardBytes, dashboard)
return dashboard
}
func createTestUser() *user.SignedInUser {
return &user.SignedInUser{
UserID: 1,
OrgID: 1,
Login: "testuser",
Name: "Test User",
Email: "test@example.com",
}
dashboard := &common.Unstructured{}
dashboardData := map[string]interface{}{
"uid": "test-dashboard-uid",
"id": 123,
}
dashboardBytes, _ := json.Marshal(dashboardData)
_ = json.Unmarshal(dashboardBytes, dashboard)
cmd := CreateDashboardSnapshotCommand{
DashboardCreateCommand: snapshot.DashboardCreateCommand{
Dashboard: dashboard,
Name: "Test Snapshot",
},
}
mockService.On("ValidateDashboardExists", mock.Anything, int64(1), "test-dashboard-uid").
Return(dashboards.ErrDashboardNotFound)
req, _ := http.NewRequest("POST", "/api/snapshots", nil)
req = req.WithContext(identity.WithRequester(req.Context(), testUser))
}
func createReqContext(t *testing.T, req *http.Request, testUser *user.SignedInUser) (*contextmodel.ReqContext, *httptest.ResponseRecorder) {
t.Helper()
recorder := httptest.NewRecorder()
ctx := &contextmodel.ReqContext{
Context: &web.Context{
@@ -63,13 +53,319 @@ func TestCreateDashboardSnapshot_DashboardNotFound(t *testing.T) {
SignedInUser: testUser,
Logger: log.NewNopLogger(),
}
CreateDashboardSnapshot(ctx, cfg, cmd, mockService)
mockService.AssertExpectations(t)
assert.Equal(t, http.StatusBadRequest, recorder.Code)
var response map[string]interface{}
err := json.Unmarshal(recorder.Body.Bytes(), &response)
require.NoError(t, err)
assert.Equal(t, "Dashboard not found", response["message"])
return ctx, recorder
}
// TestCreateDashboardSnapshot tests snapshot creation in regular mode (non-public instance).
// These tests cover scenarios when Grafana is running as a regular server with user authentication.
func TestCreateDashboardSnapshot(t *testing.T) {
t.Run("should return error when dashboard not found", func(t *testing.T) {
mockService := &MockService{}
cfg := snapshot.SnapshotSharingOptions{
SnapshotsEnabled: true,
ExternalEnabled: false,
}
testUser := createTestUser()
dashboard := createTestDashboard(t)
cmd := CreateDashboardSnapshotCommand{
DashboardCreateCommand: snapshot.DashboardCreateCommand{
Dashboard: dashboard,
Name: "Test Snapshot",
},
}
mockService.On("ValidateDashboardExists", mock.Anything, int64(1), "test-dashboard-uid").
Return(dashboards.ErrDashboardNotFound)
req, _ := http.NewRequest("POST", "/api/snapshots", nil)
req = req.WithContext(identity.WithRequester(req.Context(), testUser))
ctx, recorder := createReqContext(t, req, testUser)
CreateDashboardSnapshot(ctx, cfg, cmd, mockService)
mockService.AssertExpectations(t)
assert.Equal(t, http.StatusBadRequest, recorder.Code)
var response map[string]any
err := json.Unmarshal(recorder.Body.Bytes(), &response)
require.NoError(t, err)
assert.Equal(t, "Dashboard not found", response["message"])
})
t.Run("should create external snapshot when external is enabled", func(t *testing.T) {
externalServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, "/api/snapshots", r.URL.Path)
assert.Equal(t, "POST", r.Method)
response := map[string]any{
"key": "external-key",
"deleteKey": "external-delete-key",
"url": "https://external.example.com/dashboard/snapshot/external-key",
"deleteUrl": "https://external.example.com/api/snapshots-delete/external-delete-key",
}
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(response)
}))
defer externalServer.Close()
mockService := NewMockService(t)
cfg := snapshot.SnapshotSharingOptions{
SnapshotsEnabled: true,
ExternalEnabled: true,
ExternalSnapshotURL: externalServer.URL,
}
testUser := createTestUser()
dashboard := createTestDashboard(t)
cmd := CreateDashboardSnapshotCommand{
DashboardCreateCommand: snapshot.DashboardCreateCommand{
Dashboard: dashboard,
Name: "Test External Snapshot",
External: true,
},
}
mockService.On("ValidateDashboardExists", mock.Anything, int64(1), "test-dashboard-uid").
Return(nil)
mockService.On("CreateDashboardSnapshot", mock.Anything, mock.Anything).
Return(&DashboardSnapshot{
Key: "external-key",
DeleteKey: "external-delete-key",
}, nil)
req, _ := http.NewRequest("POST", "/api/snapshots", nil)
req = req.WithContext(identity.WithRequester(req.Context(), testUser))
ctx, recorder := createReqContext(t, req, testUser)
CreateDashboardSnapshot(ctx, cfg, cmd, mockService)
mockService.AssertExpectations(t)
assert.Equal(t, http.StatusOK, recorder.Code)
var response map[string]any
err := json.Unmarshal(recorder.Body.Bytes(), &response)
require.NoError(t, err)
assert.Equal(t, "external-key", response["key"])
assert.Equal(t, "external-delete-key", response["deleteKey"])
assert.Equal(t, "https://external.example.com/dashboard/snapshot/external-key", response["url"])
})
t.Run("should return forbidden when external is disabled", func(t *testing.T) {
mockService := NewMockService(t)
cfg := snapshot.SnapshotSharingOptions{
SnapshotsEnabled: true,
ExternalEnabled: false,
}
testUser := createTestUser()
dashboard := createTestDashboard(t)
cmd := CreateDashboardSnapshotCommand{
DashboardCreateCommand: snapshot.DashboardCreateCommand{
Dashboard: dashboard,
Name: "Test External Snapshot",
External: true,
},
}
mockService.On("ValidateDashboardExists", mock.Anything, int64(1), "test-dashboard-uid").
Return(nil)
req, _ := http.NewRequest("POST", "/api/snapshots", nil)
req = req.WithContext(identity.WithRequester(req.Context(), testUser))
ctx, recorder := createReqContext(t, req, testUser)
CreateDashboardSnapshot(ctx, cfg, cmd, mockService)
mockService.AssertExpectations(t)
assert.Equal(t, http.StatusForbidden, recorder.Code)
var response map[string]any
err := json.Unmarshal(recorder.Body.Bytes(), &response)
require.NoError(t, err)
assert.Equal(t, "External dashboard creation is disabled", response["message"])
})
t.Run("should create local snapshot", func(t *testing.T) {
mockService := NewMockService(t)
cfg := snapshot.SnapshotSharingOptions{
SnapshotsEnabled: true,
}
testUser := createTestUser()
dashboard := createTestDashboard(t)
cmd := CreateDashboardSnapshotCommand{
DashboardCreateCommand: snapshot.DashboardCreateCommand{
Dashboard: dashboard,
Name: "Test Local Snapshot",
},
Key: "local-key",
DeleteKey: "local-delete-key",
}
mockService.On("ValidateDashboardExists", mock.Anything, int64(1), "test-dashboard-uid").
Return(nil)
mockService.On("CreateDashboardSnapshot", mock.Anything, mock.Anything).
Return(&DashboardSnapshot{
Key: "local-key",
DeleteKey: "local-delete-key",
}, nil)
req, _ := http.NewRequest("POST", "/api/snapshots", nil)
req = req.WithContext(identity.WithRequester(req.Context(), testUser))
ctx, recorder := createReqContext(t, req, testUser)
CreateDashboardSnapshot(ctx, cfg, cmd, mockService)
mockService.AssertExpectations(t)
assert.Equal(t, http.StatusOK, recorder.Code)
var response map[string]any
err := json.Unmarshal(recorder.Body.Bytes(), &response)
require.NoError(t, err)
assert.Equal(t, "local-key", response["key"])
assert.Equal(t, "local-delete-key", response["deleteKey"])
assert.Contains(t, response["url"], "dashboard/snapshot/local-key")
assert.Contains(t, response["deleteUrl"], "api/snapshots-delete/local-delete-key")
})
}
// TestCreateDashboardSnapshotPublic tests snapshot creation in public mode.
// These tests cover scenarios when Grafana is running as a public snapshot server
// where no user authentication or dashboard validation is required.
func TestCreateDashboardSnapshotPublic(t *testing.T) {
t.Run("should create local snapshot without user context", func(t *testing.T) {
mockService := NewMockService(t)
cfg := snapshot.SnapshotSharingOptions{
SnapshotsEnabled: true,
}
dashboard := createTestDashboard(t)
cmd := CreateDashboardSnapshotCommand{
DashboardCreateCommand: snapshot.DashboardCreateCommand{
Dashboard: dashboard,
Name: "Test Snapshot",
},
Key: "test-key",
DeleteKey: "test-delete-key",
}
mockService.On("CreateDashboardSnapshot", mock.Anything, mock.Anything).
Return(&DashboardSnapshot{
Key: "test-key",
DeleteKey: "test-delete-key",
}, nil)
req, _ := http.NewRequest("POST", "/api/snapshots", nil)
recorder := httptest.NewRecorder()
ctx := &contextmodel.ReqContext{
Context: &web.Context{
Req: req,
Resp: web.NewResponseWriter("POST", recorder),
},
Logger: log.NewNopLogger(),
}
CreateDashboardSnapshotPublic(ctx, cfg, cmd, mockService)
mockService.AssertExpectations(t)
assert.Equal(t, http.StatusOK, recorder.Code)
var response map[string]any
err := json.Unmarshal(recorder.Body.Bytes(), &response)
require.NoError(t, err)
assert.Equal(t, "test-key", response["key"])
assert.Equal(t, "test-delete-key", response["deleteKey"])
assert.Contains(t, response["url"], "dashboard/snapshot/test-key")
assert.Contains(t, response["deleteUrl"], "api/snapshots-delete/test-delete-key")
})
t.Run("should return forbidden when snapshots are disabled", func(t *testing.T) {
mockService := NewMockService(t)
cfg := snapshot.SnapshotSharingOptions{
SnapshotsEnabled: false,
}
dashboard := createTestDashboard(t)
cmd := CreateDashboardSnapshotCommand{
DashboardCreateCommand: snapshot.DashboardCreateCommand{
Dashboard: dashboard,
Name: "Test Snapshot",
},
}
req, _ := http.NewRequest("POST", "/api/snapshots", nil)
recorder := httptest.NewRecorder()
ctx := &contextmodel.ReqContext{
Context: &web.Context{
Req: req,
Resp: web.NewResponseWriter("POST", recorder),
},
Logger: log.NewNopLogger(),
}
CreateDashboardSnapshotPublic(ctx, cfg, cmd, mockService)
assert.Equal(t, http.StatusForbidden, recorder.Code)
var response map[string]any
err := json.Unmarshal(recorder.Body.Bytes(), &response)
require.NoError(t, err)
assert.Equal(t, "Dashboard Snapshots are disabled", response["message"])
})
}
// TestDeleteExternalDashboardSnapshot tests deletion of external snapshots.
// This function is called in public mode and doesn't require user context.
func TestDeleteExternalDashboardSnapshot(t *testing.T) {
t.Run("should return nil on successful deletion", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, "GET", r.Method)
w.WriteHeader(http.StatusOK)
}))
defer server.Close()
err := DeleteExternalDashboardSnapshot(server.URL)
assert.NoError(t, err)
})
t.Run("should gracefully handle already deleted snapshot", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
response := map[string]any{
"message": "Failed to get dashboard snapshot",
}
_ = json.NewEncoder(w).Encode(response)
}))
defer server.Close()
err := DeleteExternalDashboardSnapshot(server.URL)
assert.NoError(t, err)
})
t.Run("should return error on unexpected status code", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
defer server.Close()
err := DeleteExternalDashboardSnapshot(server.URL)
assert.Error(t, err)
assert.Contains(t, err.Error(), "unexpected response when deleting external snapshot")
assert.Contains(t, err.Error(), "404")
})
t.Run("should return error on 500 with different message", func(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
response := map[string]any{
"message": "Some other error",
}
_ = json.NewEncoder(w).Encode(response)
}))
defer server.Close()
err := DeleteExternalDashboardSnapshot(server.URL)
assert.Error(t, err)
assert.Contains(t, err.Error(), "500")
})
}

View File

@@ -11,8 +11,18 @@ const (
)
type SecretsManagerSettings struct {
// Which encryption provider to use to encrypt any new secrets
CurrentEncryptionProvider string
// The time to live for decrypted data keys in memory
DataKeysCacheTTL time.Duration
// The interval to remove expired data keys from the cache
DataKeysCacheCleanupInterval time.Duration
// The caution period is the time after which a data key is assumed to be persisted in the worst case scenario.
DataKeysCacheCautionPeriod time.Duration
// Whether to use a Redis cache for data keys instead of the in-memory cache
DataKeysCacheUseRedis bool
// ConfiguredKMSProviders is a map of KMS providers found in the config file. The keys are in the format of <provider>.<keyName>, and the values are a map of the properties in that section
// In OSS, the provider type can only be "secret_key". In Enterprise, it can additionally be one of: "aws_kms", "azure_keyvault", "google_kms", "hashicorp_vault"
ConfiguredKMSProviders map[string]map[string]string
@@ -73,6 +83,12 @@ func (cfg *Cfg) readSecretsManagerSettings() {
cfg.SecretsManagement.AWSKeeperAccessKeyID = secretsMgmt.Key("aws_access_key_id").MustString("")
cfg.SecretsManagement.AWSKeeperSecretAccessKey = secretsMgmt.Key("aws_secret_access_key").MustString("")
cfg.SecretsManagement.DataKeysCacheUseRedis = secretsMgmt.Key("data_keys_cache_use_redis").MustBool(false)
cfg.SecretsManagement.DataKeysCacheTTL = secretsMgmt.Key("data_keys_cache_ttl").MustDuration(15 * time.Minute)
cfg.SecretsManagement.DataKeysCacheCleanupInterval = secretsMgmt.Key("data_keys_cache_cleanup_interval").MustDuration(1 * time.Minute)
// We consider a "caution period" of 10m to be long enough for any database transaction that implied a data key creation to have finished successfully.
cfg.SecretsManagement.DataKeysCacheCautionPeriod = secretsMgmt.Key("data_keys_cache_caution_period").MustDuration(10 * time.Minute)
// Extract available KMS providers from configuration sections
providers := make(map[string]map[string]string)
for _, section := range cfg.Raw.Sections() {

View File

@@ -19,13 +19,18 @@ const (
defaultBufferSize = 10000
)
type notifier struct {
type notifier interface {
Watch(context.Context, watchOptions) <-chan Event
}
type pollingNotifier struct {
eventStore *eventStore
log logging.Logger
}
type notifierOptions struct {
log logging.Logger
log logging.Logger
useChannelNotifier bool
}
type watchOptions struct {
@@ -44,15 +49,26 @@ func defaultWatchOptions() watchOptions {
}
}
func newNotifier(eventStore *eventStore, opts notifierOptions) *notifier {
func newNotifier(eventStore *eventStore, opts notifierOptions) notifier {
if opts.log == nil {
opts.log = &logging.NoOpLogger{}
}
return &notifier{eventStore: eventStore, log: opts.log}
if opts.useChannelNotifier {
return &channelNotifier{}
}
return &pollingNotifier{eventStore: eventStore, log: opts.log}
}
type channelNotifier struct{}
func (cn *channelNotifier) Watch(ctx context.Context, opts watchOptions) <-chan Event {
return nil
}
// Return the last resource version from the event store
func (n *notifier) lastEventResourceVersion(ctx context.Context) (int64, error) {
func (n *pollingNotifier) lastEventResourceVersion(ctx context.Context) (int64, error) {
e, err := n.eventStore.LastEventKey(ctx)
if err != nil {
return 0, err
@@ -60,11 +76,11 @@ func (n *notifier) lastEventResourceVersion(ctx context.Context) (int64, error)
return e.ResourceVersion, nil
}
func (n *notifier) cacheKey(evt Event) string {
func (n *pollingNotifier) cacheKey(evt Event) string {
return fmt.Sprintf("%s~%s~%s~%s~%d", evt.Namespace, evt.Group, evt.Resource, evt.Name, evt.ResourceVersion)
}
func (n *notifier) Watch(ctx context.Context, opts watchOptions) <-chan Event {
func (n *pollingNotifier) Watch(ctx context.Context, opts watchOptions) <-chan Event {
if opts.MinBackoff <= 0 {
opts.MinBackoff = defaultMinBackoff
}

View File

@@ -13,7 +13,7 @@ import (
"github.com/stretchr/testify/require"
)
func setupTestNotifier(t *testing.T) (*notifier, *eventStore) {
func setupTestNotifier(t *testing.T) (*pollingNotifier, *eventStore) {
db := setupTestBadgerDB(t)
t.Cleanup(func() {
err := db.Close()
@@ -22,10 +22,10 @@ func setupTestNotifier(t *testing.T) (*notifier, *eventStore) {
kv := NewBadgerKV(db)
eventStore := newEventStore(kv)
notifier := newNotifier(eventStore, notifierOptions{log: &logging.NoOpLogger{}})
return notifier, eventStore
return notifier.(*pollingNotifier), eventStore
}
func setupTestNotifierSqlKv(t *testing.T) (*notifier, *eventStore) {
func setupTestNotifierSqlKv(t *testing.T) (*pollingNotifier, *eventStore) {
dbstore := db.InitTestDB(t)
eDB, err := dbimpl.ProvideResourceDB(dbstore, setting.NewCfg(), nil)
require.NoError(t, err)
@@ -33,7 +33,7 @@ func setupTestNotifierSqlKv(t *testing.T) (*notifier, *eventStore) {
require.NoError(t, err)
eventStore := newEventStore(kv)
notifier := newNotifier(eventStore, notifierOptions{log: &logging.NoOpLogger{}})
return notifier, eventStore
return notifier.(*pollingNotifier), eventStore
}
func TestNewNotifier(t *testing.T) {
@@ -49,7 +49,7 @@ func TestDefaultWatchOptions(t *testing.T) {
assert.Equal(t, defaultBufferSize, opts.BufferSize)
}
func runNotifierTestWith(t *testing.T, storeName string, newStoreFn func(*testing.T) (*notifier, *eventStore), testFn func(*testing.T, context.Context, *notifier, *eventStore)) {
func runNotifierTestWith(t *testing.T, storeName string, newStoreFn func(*testing.T) (*pollingNotifier, *eventStore), testFn func(*testing.T, context.Context, *pollingNotifier, *eventStore)) {
t.Run(storeName, func(t *testing.T) {
ctx := context.Background()
notifier, eventStore := newStoreFn(t)
@@ -62,7 +62,7 @@ func TestNotifier_lastEventResourceVersion(t *testing.T) {
runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierLastEventResourceVersion)
}
func testNotifierLastEventResourceVersion(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
func testNotifierLastEventResourceVersion(t *testing.T, ctx context.Context, notifier *pollingNotifier, eventStore *eventStore) {
// Test with no events
rv, err := notifier.lastEventResourceVersion(ctx)
assert.Error(t, err)
@@ -113,7 +113,7 @@ func TestNotifier_cachekey(t *testing.T) {
runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierCachekey)
}
func testNotifierCachekey(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
func testNotifierCachekey(t *testing.T, ctx context.Context, notifier *pollingNotifier, eventStore *eventStore) {
tests := []struct {
name string
event Event
@@ -167,7 +167,7 @@ func TestNotifier_Watch_NoEvents(t *testing.T) {
runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierWatchNoEvents)
}
func testNotifierWatchNoEvents(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
func testNotifierWatchNoEvents(t *testing.T, ctx context.Context, notifier *pollingNotifier, eventStore *eventStore) {
ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel()
@@ -208,7 +208,7 @@ func TestNotifier_Watch_WithExistingEvents(t *testing.T) {
runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierWatchWithExistingEvents)
}
func testNotifierWatchWithExistingEvents(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
func testNotifierWatchWithExistingEvents(t *testing.T, ctx context.Context, notifier *pollingNotifier, eventStore *eventStore) {
ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
defer cancel()
@@ -282,7 +282,7 @@ func TestNotifier_Watch_EventDeduplication(t *testing.T) {
runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierWatchEventDeduplication)
}
func testNotifierWatchEventDeduplication(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
func testNotifierWatchEventDeduplication(t *testing.T, ctx context.Context, notifier *pollingNotifier, eventStore *eventStore) {
ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
defer cancel()
@@ -348,7 +348,7 @@ func TestNotifier_Watch_ContextCancellation(t *testing.T) {
runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierWatchContextCancellation)
}
func testNotifierWatchContextCancellation(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
func testNotifierWatchContextCancellation(t *testing.T, ctx context.Context, notifier *pollingNotifier, eventStore *eventStore) {
ctx, cancel := context.WithCancel(ctx)
// Add an initial event so that lastEventResourceVersion doesn't return ErrNotFound
@@ -394,7 +394,7 @@ func TestNotifier_Watch_MultipleEvents(t *testing.T) {
runNotifierTestWith(t, "sqlkv", setupTestNotifierSqlKv, testNotifierWatchMultipleEvents)
}
func testNotifierWatchMultipleEvents(t *testing.T, ctx context.Context, notifier *notifier, eventStore *eventStore) {
func testNotifierWatchMultipleEvents(t *testing.T, ctx context.Context, notifier *pollingNotifier, eventStore *eventStore) {
ctx, cancel := context.WithTimeout(ctx, 3*time.Second)
defer cancel()
rv := time.Now().UnixNano()

View File

@@ -61,7 +61,7 @@ type kvStorageBackend struct {
bulkLock *BulkLock
dataStore *dataStore
eventStore *eventStore
notifier *notifier
notifier notifier
builder DocumentBuilder
log logging.Logger
withPruner bool
@@ -91,6 +91,7 @@ type KVBackendOptions struct {
Tracer trace.Tracer // TODO add tracing
Reg prometheus.Registerer // TODO add metrics
UseChannelNotifier bool
// Adding RvManager overrides the RV generated with snowflake in order to keep backwards compatibility with
// unified/sql
RvManager *rvmanager.ResourceVersionManager
@@ -121,7 +122,7 @@ func NewKVStorageBackend(opts KVBackendOptions) (KVBackend, error) {
bulkLock: NewBulkLock(),
dataStore: newDataStore(kv),
eventStore: eventStore,
notifier: newNotifier(eventStore, notifierOptions{}),
notifier: newNotifier(eventStore, notifierOptions{useChannelNotifier: opts.UseChannelNotifier}),
snowflake: s,
builder: StandardDocumentBuilder(), // For now we use the standard document builder.
log: &logging.NoOpLogger{}, // Make this configurable

View File

@@ -99,6 +99,9 @@ func NewResourceServer(opts ServerOptions) (resource.ResourceServer, error) {
return nil, err
}
isHA := isHighAvailabilityEnabled(opts.Cfg.SectionWithEnvOverrides("database"),
opts.Cfg.SectionWithEnvOverrides("resource_api"))
if opts.Cfg.EnableSQLKVBackend {
sqlkv, err := resource.NewSQLKV(eDB)
if err != nil {
@@ -106,9 +109,10 @@ func NewResourceServer(opts ServerOptions) (resource.ResourceServer, error) {
}
kvBackendOpts := resource.KVBackendOptions{
KvStore: sqlkv,
Tracer: opts.Tracer,
Reg: opts.Reg,
KvStore: sqlkv,
Tracer: opts.Tracer,
Reg: opts.Reg,
UseChannelNotifier: !isHA,
}
ctx := context.Background()
@@ -140,9 +144,6 @@ func NewResourceServer(opts ServerOptions) (resource.ResourceServer, error) {
serverOptions.Backend = kvBackend
serverOptions.Diagnostics = kvBackend
} else {
isHA := isHighAvailabilityEnabled(opts.Cfg.SectionWithEnvOverrides("database"),
opts.Cfg.SectionWithEnvOverrides("resource_api"))
backend, err := NewBackend(BackendOptions{
DBProvider: eDB,
Reg: opts.Reg,

View File

@@ -8,6 +8,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/grafana/grafana/pkg/util/testutil"
)
func TestBadgerKVStorageBackend(t *testing.T) {
@@ -36,7 +37,9 @@ func TestBadgerKVStorageBackend(t *testing.T) {
})
}
func TestSQLKVStorageBackend(t *testing.T) {
func TestIntegrationSQLKVStorageBackend(t *testing.T) {
testutil.SkipIntegrationTestInShortMode(t)
skipTests := map[string]bool{
TestWatchWriteEvents: true,
TestList: true,

View File

@@ -25,6 +25,10 @@ export class ExportAsCode extends ShareExportTab {
public getTabLabel(): string {
return t('export.json.title', 'Export dashboard');
}
public getSubtitle(): string | undefined {
return t('export.json.info-text', 'Copy or download a file containing the definition of your dashboard');
}
}
function ExportAsCodeRenderer({ model }: SceneComponentProps<ExportAsCode>) {
@@ -53,12 +57,6 @@ function ExportAsCodeRenderer({ model }: SceneComponentProps<ExportAsCode>) {
return (
<div data-testid={selector.container} className={styles.container}>
<p>
<Trans i18nKey="export.json.info-text">
Copy or download a file containing the definition of your dashboard
</Trans>
</p>
{config.featureToggles.kubernetesDashboards ? (
<ResourceExport
dashboardJson={dashboardJson}

View File

@@ -0,0 +1,189 @@
import { render, screen, within } from '@testing-library/react';
import userEvent from '@testing-library/user-event';
import { AsyncState } from 'react-use/lib/useAsync';
import { selectors as e2eSelectors } from '@grafana/e2e-selectors';
import { Dashboard } from '@grafana/schema';
import { Spec as DashboardV2Spec } from '@grafana/schema/dist/esm/schema/dashboard/v2';
import { ExportMode, ResourceExport } from './ResourceExport';
type DashboardJsonState = AsyncState<{
json: Dashboard | DashboardV2Spec | { error: unknown };
hasLibraryPanels?: boolean;
initialSaveModelVersion: 'v1' | 'v2';
}>;
const selector = e2eSelectors.pages.ExportDashboardDrawer.ExportAsJson;
const createDefaultProps = (overrides?: Partial<Parameters<typeof ResourceExport>[0]>) => {
const defaultProps: Parameters<typeof ResourceExport>[0] = {
dashboardJson: {
loading: false,
value: {
json: { title: 'Test Dashboard' } as Dashboard,
hasLibraryPanels: false,
initialSaveModelVersion: 'v1',
},
} as DashboardJsonState,
isSharingExternally: false,
exportMode: ExportMode.Classic,
isViewingYAML: false,
onExportModeChange: jest.fn(),
onShareExternallyChange: jest.fn(),
onViewYAML: jest.fn(),
};
return { ...defaultProps, ...overrides };
};
const createV2DashboardJson = (hasLibraryPanels = false): DashboardJsonState => ({
loading: false,
value: {
json: {
title: 'Test V2 Dashboard',
spec: {
elements: {},
},
} as unknown as DashboardV2Spec,
hasLibraryPanels,
initialSaveModelVersion: 'v2',
},
});
const expandOptions = async () => {
const button = screen.getByRole('button', { expanded: false });
await userEvent.click(button);
};
describe('ResourceExport', () => {
describe('export mode options for v1 dashboard', () => {
it('should show three export mode options in correct order: Classic, V1 Resource, V2 Resource', async () => {
render(<ResourceExport {...createDefaultProps()} />);
await expandOptions();
const radioGroup = screen.getByRole('radiogroup', { name: /model/i });
const labels = within(radioGroup)
.getAllByRole('radio')
.map((radio) => radio.parentElement?.textContent?.trim());
expect(labels).toHaveLength(3);
expect(labels).toEqual(['Classic', 'V1 Resource', 'V2 Resource']);
});
it('should have first option selected by default when exportMode is Classic', async () => {
render(<ResourceExport {...createDefaultProps({ exportMode: ExportMode.Classic })} />);
await expandOptions();
const radioGroup = screen.getByRole('radiogroup', { name: /model/i });
const radios = within(radioGroup).getAllByRole('radio');
expect(radios[0]).toBeChecked();
});
it('should call onExportModeChange when export mode is changed', async () => {
const onExportModeChange = jest.fn();
render(<ResourceExport {...createDefaultProps({ onExportModeChange })} />);
await expandOptions();
const radioGroup = screen.getByRole('radiogroup', { name: /model/i });
const radios = within(radioGroup).getAllByRole('radio');
await userEvent.click(radios[1]); // V1 Resource
expect(onExportModeChange).toHaveBeenCalledWith(ExportMode.V1Resource);
});
});
describe('export mode options for v2 dashboard', () => {
it('should not show export mode options', async () => {
render(<ResourceExport {...createDefaultProps({ dashboardJson: createV2DashboardJson() })} />);
await expandOptions();
expect(screen.queryByRole('radiogroup', { name: /model/i })).not.toBeInTheDocument();
});
});
describe('format options', () => {
it('should not show format options when export mode is Classic', async () => {
render(<ResourceExport {...createDefaultProps({ exportMode: ExportMode.Classic })} />);
await expandOptions();
expect(screen.getByRole('radiogroup', { name: /model/i })).toBeInTheDocument();
expect(screen.queryByRole('radiogroup', { name: /format/i })).not.toBeInTheDocument();
});
it.each([ExportMode.V1Resource, ExportMode.V2Resource])(
'should show format options when export mode is %s',
async (exportMode) => {
render(<ResourceExport {...createDefaultProps({ exportMode })} />);
await expandOptions();
expect(screen.getByRole('radiogroup', { name: /model/i })).toBeInTheDocument();
expect(screen.getByRole('radiogroup', { name: /format/i })).toBeInTheDocument();
}
);
it('should have first format option selected when isViewingYAML is false', async () => {
render(<ResourceExport {...createDefaultProps({ exportMode: ExportMode.V1Resource, isViewingYAML: false })} />);
await expandOptions();
const formatGroup = screen.getByRole('radiogroup', { name: /format/i });
const formatRadios = within(formatGroup).getAllByRole('radio');
expect(formatRadios[0]).toBeChecked(); // JSON
});
it('should have second format option selected when isViewingYAML is true', async () => {
render(<ResourceExport {...createDefaultProps({ exportMode: ExportMode.V1Resource, isViewingYAML: true })} />);
await expandOptions();
const formatGroup = screen.getByRole('radiogroup', { name: /format/i });
const formatRadios = within(formatGroup).getAllByRole('radio');
expect(formatRadios[1]).toBeChecked(); // YAML
});
it('should call onViewYAML when format is changed', async () => {
const onViewYAML = jest.fn();
render(<ResourceExport {...createDefaultProps({ exportMode: ExportMode.V1Resource, onViewYAML })} />);
await expandOptions();
const formatGroup = screen.getByRole('radiogroup', { name: /format/i });
const formatRadios = within(formatGroup).getAllByRole('radio');
await userEvent.click(formatRadios[1]); // YAML
expect(onViewYAML).toHaveBeenCalled();
});
});
describe('share externally switch', () => {
it('should show share externally switch for Classic mode', () => {
render(<ResourceExport {...createDefaultProps({ exportMode: ExportMode.Classic })} />);
expect(screen.getByTestId(selector.exportExternallyToggle)).toBeInTheDocument();
});
it('should show share externally switch for V2Resource mode with V2 dashboard', () => {
render(
<ResourceExport
{...createDefaultProps({
dashboardJson: createV2DashboardJson(),
exportMode: ExportMode.V2Resource,
})}
/>
);
expect(screen.getByTestId(selector.exportExternallyToggle)).toBeInTheDocument();
});
it('should call onShareExternallyChange when switch is toggled', async () => {
const onShareExternallyChange = jest.fn();
render(<ResourceExport {...createDefaultProps({ exportMode: ExportMode.Classic, onShareExternallyChange })} />);
const switchElement = screen.getByTestId(selector.exportExternallyToggle);
await userEvent.click(switchElement);
expect(onShareExternallyChange).toHaveBeenCalled();
});
it('should reflect isSharingExternally value in switch', () => {
render(<ResourceExport {...createDefaultProps({ exportMode: ExportMode.Classic, isSharingExternally: true })} />);
expect(screen.getByTestId(selector.exportExternallyToggle)).toBeChecked();
});
});
});

View File

@@ -4,7 +4,8 @@ import { selectors as e2eSelectors } from '@grafana/e2e-selectors';
import { Trans, t } from '@grafana/i18n';
import { Dashboard } from '@grafana/schema';
import { Spec as DashboardV2Spec } from '@grafana/schema/dist/esm/schema/dashboard/v2';
import { Alert, Label, RadioButtonGroup, Stack, Switch } from '@grafana/ui';
import { Alert, Icon, Label, RadioButtonGroup, Stack, Switch, Box, Tooltip } from '@grafana/ui';
import { QueryOperationRow } from 'app/core/components/QueryOperationRow/QueryOperationRow';
import { DashboardJson } from 'app/features/manage-dashboards/types';
import { ExportableResource } from '../ShareExportTab';
@@ -48,80 +49,90 @@ export function ResourceExport({
const switchExportLabel =
exportMode === ExportMode.V2Resource
? t('export.json.export-remove-ds-refs', 'Remove deployment details')
: t('share-modal.export.share-externally-label', `Export for sharing externally`);
? t('dashboard-scene.resource-export.share-externally', 'Share dashboard with another instance')
: t('share-modal.export.share-externally-label', 'Export for sharing externally');
const switchExportTooltip = t(
'dashboard-scene.resource-export.share-externally-tooltip',
'Removes all instance-specific metadata and data source references from the resource before export.'
);
const switchExportModeLabel = t('export.json.export-mode', 'Model');
const switchExportFormatLabel = t('export.json.export-format', 'Format');
const exportResourceOptions = [
{
label: t('dashboard-scene.resource-export.label.classic', 'Classic'),
value: ExportMode.Classic,
},
{
label: t('dashboard-scene.resource-export.label.v1-resource', 'V1 Resource'),
value: ExportMode.V1Resource,
},
{
label: t('dashboard-scene.resource-export.label.v2-resource', 'V2 Resource'),
value: ExportMode.V2Resource,
},
];
return (
<Stack gap={2} direction="column">
<Stack gap={1} direction="column">
{initialSaveModelVersion === 'v1' && (
<Stack alignItems="center">
<Label>{switchExportModeLabel}</Label>
<RadioButtonGroup
options={[
{ label: t('dashboard-scene.resource-export.label.classic', 'Classic'), value: ExportMode.Classic },
{
label: t('dashboard-scene.resource-export.label.v1-resource', 'V1 Resource'),
value: ExportMode.V1Resource,
},
{
label: t('dashboard-scene.resource-export.label.v2-resource', 'V2 Resource'),
value: ExportMode.V2Resource,
},
]}
value={exportMode}
onChange={(value) => onExportModeChange(value)}
/>
<>
<QueryOperationRow
id="Advanced options"
index={0}
title={t('dashboard-scene.resource-export.label.advanced-options', 'Advanced options')}
isOpen={false}
>
<Box marginTop={2}>
<Stack gap={1} direction="column">
{initialSaveModelVersion === 'v1' && (
<Stack gap={1} alignItems="center">
<Label>{switchExportModeLabel}</Label>
<RadioButtonGroup
options={exportResourceOptions}
value={exportMode}
onChange={(value) => onExportModeChange(value)}
aria-label={switchExportModeLabel}
/>
</Stack>
)}
{exportMode !== ExportMode.Classic && (
<Stack gap={1} alignItems="center">
<Label>{switchExportFormatLabel}</Label>
<RadioButtonGroup
options={[
{ label: t('dashboard-scene.resource-export.label.json', 'JSON'), value: 'json' },
{ label: t('dashboard-scene.resource-export.label.yaml', 'YAML'), value: 'yaml' },
]}
value={isViewingYAML ? 'yaml' : 'json'}
onChange={onViewYAML}
aria-label={switchExportFormatLabel}
/>
</Stack>
)}
</Stack>
)}
{initialSaveModelVersion === 'v2' && (
<Stack alignItems="center">
<Label>{switchExportModeLabel}</Label>
<RadioButtonGroup
options={[
{
label: t('dashboard-scene.resource-export.label.v2-resource', 'V2 Resource'),
value: ExportMode.V2Resource,
},
{
label: t('dashboard-scene.resource-export.label.v1-resource', 'V1 Resource'),
value: ExportMode.V1Resource,
},
]}
value={exportMode}
onChange={(value) => onExportModeChange(value)}
/>
</Stack>
)}
{exportMode !== ExportMode.Classic && (
<Stack gap={1} alignItems="center">
<Label>{switchExportFormatLabel}</Label>
<RadioButtonGroup
options={[
{ label: t('dashboard-scene.resource-export.label.json', 'JSON'), value: 'json' },
{ label: t('dashboard-scene.resource-export.label.yaml', 'YAML'), value: 'yaml' },
]}
value={isViewingYAML ? 'yaml' : 'json'}
onChange={onViewYAML}
/>
</Stack>
)}
{(isV2Dashboard ||
exportMode === ExportMode.Classic ||
(initialSaveModelVersion === 'v2' && exportMode === ExportMode.V1Resource)) && (
<Stack gap={1} alignItems="start">
<Label>{switchExportLabel}</Label>
<Switch
label={switchExportLabel}
value={isSharingExternally}
onChange={onShareExternallyChange}
data-testid={selector.exportExternallyToggle}
/>
</Stack>
)}
</Stack>
</Box>
</QueryOperationRow>
{(isV2Dashboard ||
exportMode === ExportMode.Classic ||
(initialSaveModelVersion === 'v2' && exportMode === ExportMode.V1Resource)) && (
<Stack gap={1} alignItems="start">
<Label>
<Stack gap={0.5} alignItems="center">
<Tooltip content={switchExportTooltip} placement="bottom">
<Icon name="info-circle" size="sm" />
</Tooltip>
{switchExportLabel}
</Stack>
</Label>
<Switch
label={switchExportLabel}
value={isSharingExternally}
onChange={onShareExternallyChange}
data-testid={selector.exportExternallyToggle}
/>
</Stack>
)}
{showV2LibPanelAlert && (
<Alert
@@ -130,6 +141,7 @@ export function ResourceExport({
'Library panels will be converted to regular panels'
)}
severity="warning"
topSpacing={2}
>
<Trans i18nKey="dashboard-scene.save-dashboard-form.schema-v2-library-panels-export">
Due to limitations in the new dashboard schema (V2), library panels will be converted to regular panels with
@@ -137,6 +149,6 @@ export function ResourceExport({
</Trans>
</Alert>
)}
</Stack>
</>
);
}

View File

@@ -66,7 +66,12 @@ function ShareDrawerRenderer({ model }: SceneComponentProps<ShareDrawer>) {
const dashboard = getDashboardSceneFor(model);
return (
<Drawer title={activeShare?.getTabLabel()} onClose={model.onDismiss} size="md">
<Drawer
title={activeShare?.getTabLabel()}
subtitle={activeShare?.getSubtitle?.()}
onClose={model.onDismiss}
size="md"
>
<ShareDrawerContext.Provider value={{ dashboard, onDismiss: model.onDismiss }}>
{activeShare && <activeShare.Component model={activeShare} />}
</ShareDrawerContext.Provider>

View File

@@ -66,6 +66,10 @@ export class ShareExportTab extends SceneObjectBase<ShareExportTabState> impleme
return t('share-modal.tab-title.export', 'Export');
}
public getSubtitle(): string | undefined {
return undefined;
}
public onShareExternallyChange = () => {
this.setState({
isSharingExternally: !this.state.isSharingExternally,

View File

@@ -15,5 +15,6 @@ export interface SceneShareTab<T extends SceneShareTabState = SceneShareTabState
export interface ShareView extends SceneObject {
getTabLabel(): string;
getSubtitle?(): string | undefined;
onDismiss?: () => void;
}

View File

@@ -2,8 +2,9 @@ import { render, screen } from '@testing-library/react';
import { defaultsDeep } from 'lodash';
import { Provider } from 'react-redux';
import { FieldType, getDefaultTimeRange, LoadingState } from '@grafana/data';
import { PanelDataErrorViewProps } from '@grafana/runtime';
import { CoreApp, EventBusSrv, FieldType, getDefaultTimeRange, LoadingState } from '@grafana/data';
import { config, PanelDataErrorViewProps } from '@grafana/runtime';
import { usePanelContext } from '@grafana/ui';
import { configureStore } from 'app/store/configureStore';
import { PanelDataErrorView } from './PanelDataErrorView';
@@ -16,7 +17,24 @@ jest.mock('app/features/dashboard/services/DashboardSrv', () => ({
},
}));
jest.mock('@grafana/ui', () => ({
...jest.requireActual('@grafana/ui'),
usePanelContext: jest.fn(),
}));
const mockUsePanelContext = jest.mocked(usePanelContext);
const RUN_QUERY_MESSAGE = 'Run a query to visualize it here or go to all visualizations to add other panel types';
const panelContextRoot = {
app: CoreApp.Dashboard,
eventsScope: 'global',
eventBus: new EventBusSrv(),
};
describe('PanelDataErrorView', () => {
beforeEach(() => {
mockUsePanelContext.mockReturnValue(panelContextRoot);
});
it('show No data when there is no data', () => {
renderWithProps();
@@ -70,6 +88,45 @@ describe('PanelDataErrorView', () => {
expect(screen.getByText('Query returned nothing')).toBeInTheDocument();
});
it('should show "Run a query..." message when no query is configured and feature toggle is enabled', () => {
mockUsePanelContext.mockReturnValue(panelContextRoot);
const originalFeatureToggle = config.featureToggles.newVizSuggestions;
config.featureToggles.newVizSuggestions = true;
renderWithProps({
data: {
state: LoadingState.Done,
series: [],
timeRange: getDefaultTimeRange(),
},
});
expect(screen.getByText(RUN_QUERY_MESSAGE)).toBeInTheDocument();
config.featureToggles.newVizSuggestions = originalFeatureToggle;
});
it('should show "No data" message when feature toggle is disabled even without queries', () => {
mockUsePanelContext.mockReturnValue(panelContextRoot);
const originalFeatureToggle = config.featureToggles.newVizSuggestions;
config.featureToggles.newVizSuggestions = false;
renderWithProps({
data: {
state: LoadingState.Done,
series: [],
timeRange: getDefaultTimeRange(),
},
});
expect(screen.getByText('No data')).toBeInTheDocument();
expect(screen.queryByText(RUN_QUERY_MESSAGE)).not.toBeInTheDocument();
config.featureToggles.newVizSuggestions = originalFeatureToggle;
});
});
function renderWithProps(overrides?: Partial<PanelDataErrorViewProps>) {

View File

@@ -5,14 +5,15 @@ import {
FieldType,
getPanelDataSummary,
GrafanaTheme2,
PanelData,
PanelDataSummary,
PanelPluginVisualizationSuggestion,
} from '@grafana/data';
import { selectors } from '@grafana/e2e-selectors';
import { t, Trans } from '@grafana/i18n';
import { PanelDataErrorViewProps, locationService } from '@grafana/runtime';
import { PanelDataErrorViewProps, locationService, config } from '@grafana/runtime';
import { VizPanel } from '@grafana/scenes';
import { usePanelContext, useStyles2 } from '@grafana/ui';
import { Icon, usePanelContext, useStyles2 } from '@grafana/ui';
import { CardButton } from 'app/core/components/CardButton';
import { LS_VISUALIZATION_SELECT_TAB_KEY } from 'app/core/constants';
import store from 'app/core/store';
@@ -24,6 +25,11 @@ import { findVizPanelByKey, getVizPanelKeyForPanelId } from 'app/features/dashbo
import { useDispatch } from 'app/types/store';
import { changePanelPlugin } from '../state/actions';
import { hasData } from '../suggestions/utils';
function hasNoQueryConfigured(data: PanelData): boolean {
return !data.request?.targets || data.request.targets.length === 0;
}
export function PanelDataErrorView(props: PanelDataErrorViewProps) {
const styles = useStyles2(getStyles);
@@ -93,8 +99,14 @@ export function PanelDataErrorView(props: PanelDataErrorViewProps) {
}
};
const noData = !hasData(props.data);
const noQueryConfigured = hasNoQueryConfigured(props.data);
const showEmptyState =
config.featureToggles.newVizSuggestions && context.app === CoreApp.PanelEditor && noQueryConfigured && noData;
return (
<div className={styles.wrapper}>
{showEmptyState && <Icon name="chart-line" size="xxxl" className={styles.emptyStateIcon} />}
<div className={styles.message} data-testid={selectors.components.Panels.Panel.PanelDataErrorMessage}>
{message}
</div>
@@ -131,7 +143,17 @@ function getMessageFor(
return message;
}
if (!data.series || data.series.length === 0 || data.series.every((frame) => frame.length === 0)) {
const noData = !hasData(data);
const noQueryConfigured = hasNoQueryConfigured(data);
if (config.featureToggles.newVizSuggestions && noQueryConfigured && noData) {
return t(
'dashboard.new-panel.empty-state-message',
'Run a query to visualize it here or go to all visualizations to add other panel types'
);
}
if (noData) {
return fieldConfig?.defaults.noValue ?? t('panel.panel-data-error-view.no-value.default', 'No data');
}
@@ -176,5 +198,9 @@ const getStyles = (theme: GrafanaTheme2) => {
width: '100%',
maxWidth: '600px',
}),
emptyStateIcon: css({
color: theme.colors.text.secondary,
marginBottom: theme.spacing(2),
}),
};
};

View File

@@ -1,29 +1,26 @@
import { SelectableValue } from '@grafana/data';
import { RadioButtonGroup } from '@grafana/ui';
import { useDispatch } from '../../hooks/useStatelessReducer';
import { EditorType } from '../../types';
import { useQuery } from './ElasticsearchQueryContext';
import { changeEditorTypeAndResetQuery } from './state';
const BASE_OPTIONS: Array<SelectableValue<EditorType>> = [
{ value: 'builder', label: 'Builder' },
{ value: 'code', label: 'Code' },
];
export const EditorTypeSelector = () => {
const query = useQuery();
const dispatch = useDispatch();
// Default to 'builder' if editorType is empty
const editorType: EditorType = query.editorType === 'code' ? 'code' : 'builder';
const onChange = (newEditorType: EditorType) => {
dispatch(changeEditorTypeAndResetQuery(newEditorType));
};
interface Props {
value: EditorType;
onChange: (editorType: EditorType) => void;
}
export const EditorTypeSelector = ({ value, onChange }: Props) => {
return (
<RadioButtonGroup<EditorType> fullWidth={false} options={BASE_OPTIONS} value={editorType} onChange={onChange} />
<RadioButtonGroup<EditorType>
data-testid="elasticsearch-editor-type-toggle"
size="sm"
options={BASE_OPTIONS}
value={value}
onChange={onChange}
/>
);
};

View File

@@ -10,9 +10,13 @@ interface Props {
onRunQuery: () => void;
}
// This offset was chosen by testing to match Prometheus behavior
const EDITOR_HEIGHT_OFFSET = 2;
export function RawQueryEditor({ value, onChange, onRunQuery }: Props) {
const styles = useStyles2(getStyles);
const editorRef = useRef<monacoTypes.editor.IStandaloneCodeEditor | null>(null);
const containerRef = useRef<HTMLDivElement | null>(null);
const handleEditorDidMount = useCallback(
(editor: monacoTypes.editor.IStandaloneCodeEditor, monaco: Monaco) => {
@@ -22,6 +26,22 @@ export function RawQueryEditor({ value, onChange, onRunQuery }: Props) {
editor.addCommand(monaco.KeyMod.CtrlCmd | monaco.KeyCode.Enter, () => {
onRunQuery();
});
// Make the editor resize itself so that the content fits (grows taller when necessary)
// this code comes from the Prometheus query editor.
// We may wish to consider abstracting it into the grafana/ui repo in the future
const updateElementHeight = () => {
const containerDiv = containerRef.current;
if (containerDiv !== null) {
const pixelHeight = editor.getContentHeight();
containerDiv.style.height = `${pixelHeight + EDITOR_HEIGHT_OFFSET}px`;
const pixelWidth = containerDiv.clientWidth;
editor.layout({ width: pixelWidth, height: pixelHeight });
}
};
editor.onDidContentSizeChange(updateElementHeight);
updateElementHeight();
},
[onRunQuery]
);
@@ -65,7 +85,17 @@ export function RawQueryEditor({ value, onChange, onRunQuery }: Props) {
return (
<Box>
<div className={styles.header}>
<div ref={containerRef} className={styles.editorContainer}>
<CodeEditor
value={value ?? ''}
language="json"
width="100%"
onBlur={handleQueryChange}
monacoOptions={monacoOptions}
onEditorDidMount={handleEditorDidMount}
/>
</div>
<div className={styles.footer}>
<Stack gap={1}>
<Button
size="sm"
@@ -76,20 +106,8 @@ export function RawQueryEditor({ value, onChange, onRunQuery }: Props) {
>
Format
</Button>
<Button size="sm" variant="primary" icon="play" onClick={onRunQuery} tooltip="Run query (Ctrl/Cmd+Enter)">
Run
</Button>
</Stack>
</div>
<CodeEditor
value={value ?? ''}
language="json"
height={200}
width="100%"
onBlur={handleQueryChange}
monacoOptions={monacoOptions}
onEditorDidMount={handleEditorDidMount}
/>
</Box>
);
}
@@ -100,7 +118,11 @@ const getStyles = (theme: GrafanaTheme2) => ({
flexDirection: 'column',
gap: theme.spacing(1),
}),
header: css({
editorContainer: css({
width: '100%',
overflow: 'hidden',
}),
footer: css({
display: 'flex',
justifyContent: 'flex-end',
padding: theme.spacing(0.5, 0),

View File

@@ -1,16 +1,16 @@
import { css } from '@emotion/css';
import { useEffect, useId, useState } from 'react';
import { useCallback, useEffect, useId, useState } from 'react';
import { SemVer } from 'semver';
import { getDefaultTimeRange, GrafanaTheme2, QueryEditorProps } from '@grafana/data';
import { config } from '@grafana/runtime';
import { Alert, InlineField, InlineLabel, Input, QueryField, useStyles2 } from '@grafana/ui';
import { Alert, ConfirmModal, InlineField, InlineLabel, Input, QueryField, useStyles2 } from '@grafana/ui';
import { ElasticsearchDataQuery } from '../../dataquery.gen';
import { ElasticDatasource } from '../../datasource';
import { useNextId } from '../../hooks/useNextId';
import { useDispatch } from '../../hooks/useStatelessReducer';
import { ElasticsearchOptions } from '../../types';
import { EditorType, ElasticsearchOptions } from '../../types';
import { isSupportedVersion, isTimeSeriesQuery, unsupportedVersionMessage } from '../../utils';
import { BucketAggregationsEditor } from './BucketAggregationsEditor';
@@ -20,7 +20,7 @@ import { MetricAggregationsEditor } from './MetricAggregationsEditor';
import { metricAggregationConfig } from './MetricAggregationsEditor/utils';
import { QueryTypeSelector } from './QueryTypeSelector';
import { RawQueryEditor } from './RawQueryEditor';
import { changeAliasPattern, changeQuery, changeRawDSLQuery } from './state';
import { changeAliasPattern, changeEditorTypeAndResetQuery, changeQuery, changeRawDSLQuery } from './state';
export type ElasticQueryEditorProps = QueryEditorProps<ElasticDatasource, ElasticsearchDataQuery, ElasticsearchOptions>;
@@ -97,31 +97,61 @@ const QueryEditorForm = ({ value, onRunQuery }: Props & { onRunQuery: () => void
const inputId = useId();
const styles = useStyles2(getStyles);
const [switchModalOpen, setSwitchModalOpen] = useState(false);
const [pendingEditorType, setPendingEditorType] = useState<EditorType | null>(null);
const isTimeSeries = isTimeSeriesQuery(value);
const isCodeEditor = value.editorType === 'code';
const rawDSLFeatureEnabled = config.featureToggles.elasticsearchRawDSLQuery;
// Default to 'builder' if editorType is empty
const currentEditorType: EditorType = value.editorType === 'code' ? 'code' : 'builder';
const showBucketAggregationsEditor = value.metrics?.every(
(metric) => metricAggregationConfig[metric.type].impliedQueryType === 'metrics'
);
const onEditorTypeChange = useCallback((newEditorType: EditorType) => {
// Show warning modal when switching modes
setPendingEditorType(newEditorType);
setSwitchModalOpen(true);
}, []);
const confirmEditorTypeChange = useCallback(() => {
if (pendingEditorType) {
dispatch(changeEditorTypeAndResetQuery(pendingEditorType));
}
setSwitchModalOpen(false);
setPendingEditorType(null);
}, [dispatch, pendingEditorType]);
const cancelEditorTypeChange = useCallback(() => {
setSwitchModalOpen(false);
setPendingEditorType(null);
}, []);
return (
<>
<ConfirmModal
isOpen={switchModalOpen}
title="Switch editor"
body="Switching between editors will reset your query. Are you sure you want to continue?"
confirmText="Continue"
onConfirm={confirmEditorTypeChange}
onDismiss={cancelEditorTypeChange}
/>
<div className={styles.root}>
<InlineLabel width={17}>Query type</InlineLabel>
<div className={styles.queryItem}>
<QueryTypeSelector />
</div>
</div>
{rawDSLFeatureEnabled && (
<div className={styles.root}>
<InlineLabel width={17}>Editor type</InlineLabel>
<div className={styles.queryItem}>
<EditorTypeSelector />
{rawDSLFeatureEnabled && (
<div style={{ marginLeft: 'auto' }}>
<EditorTypeSelector value={currentEditorType} onChange={onEditorTypeChange} />
</div>
</div>
)}
)}
</div>
{isCodeEditor && rawDSLFeatureEnabled && (
<RawQueryEditor

View File

@@ -6383,12 +6383,15 @@
},
"resource-export": {
"label": {
"advanced-options": "Advanced options",
"classic": "Classic",
"json": "JSON",
"v1-resource": "V1 Resource",
"v2-resource": "V2 Resource",
"yaml": "YAML"
}
},
"share-externally": "Share dashboard with another instance",
"share-externally-tooltip": "Removes all instance-specific metadata and data source references from the resource before export."
},
"revert-dashboard-modal": {
"body-restore-version": "Are you sure you want to restore the dashboard to version {{version}}? All unsaved changes will be lost.",
@@ -7842,7 +7845,6 @@
"export-externally-label": "Export the dashboard to use in another instance",
"export-format": "Format",
"export-mode": "Model",
"export-remove-ds-refs": "Remove deployment details",
"info-text": "Copy or download a file containing the definition of your dashboard",
"title": "Export dashboard"
},