Merge branch 'main' into svennergr/drawer-sidebar-aware

This commit is contained in:
Sven Grossmann
2025-11-24 10:53:05 +01:00
256 changed files with 6319 additions and 7862 deletions
+3 -7
View File
@@ -29,13 +29,6 @@ SecureValueSpec: {
// +optional
ref?: string & strings.MinRunes(1) & strings.MaxRunes(1024)
// Name of the keeper, being the actual storage of the secure value.
// If not specified, the default keeper for the namespace will be used.
// +k8s:validation:minLength=1
// +k8s:validation:maxLength=253
// +optional
keeper?: string & strings.MinRunes(1) & strings.MaxRunes(253)
// The Decrypters that are allowed to decrypt this secret.
// An empty list means no service can decrypt it.
// +k8s:validation:maxItems=64
@@ -53,4 +46,7 @@ SecureValueStatus: {
// External ID where the secret is stored. Cannot be set.
// +optional
externalID: string
// The name of the keeper used to create the secure value. Cannot be set.
keeper: string
}
@@ -28,7 +28,7 @@ var SecureValuesResourceInfo = utils.NewResourceInfo(
},
Reader: func(obj any) ([]any, error) {
if r, ok := obj.(*SecureValue); ok {
return []any{r.Name, r.Spec.Description, r.Spec.Keeper, r.Spec.Ref}, nil
return []any{r.Name, r.Spec.Description, r.Status.Keeper, r.Spec.Ref}, nil
}
return nil, fmt.Errorf("expected SecureValue but got %T", obj)
@@ -25,12 +25,6 @@ type SecureValueSpec struct {
// +k8s:validation:maxLength=1024
// +optional
Ref *string `json:"ref,omitempty"`
// Name of the keeper, being the actual storage of the secure value.
// If not specified, the default keeper for the namespace will be used.
// +k8s:validation:minLength=1
// +k8s:validation:maxLength=253
// +optional
Keeper *string `json:"keeper,omitempty"`
// The Decrypters that are allowed to decrypt this secret.
// An empty list means no service can decrypt it.
// +k8s:validation:maxItems=64
@@ -25,12 +25,14 @@ type SecureValueStatus struct {
// Version of the secure value. Cannot be set.
// +optional
Version int64 `json:"version"`
// operatorStates is a map of operator ID to operator state evaluations.
// Any operator which consumes this kind SHOULD add its state evaluation information to this field.
OperatorStates map[string]SecureValuestatusOperatorState `json:"operatorStates,omitempty"`
// External ID where the secret is stored. Cannot be set.
// +optional
ExternalID string `json:"externalID"`
// operatorStates is a map of operator ID to operator state evaluations.
// Any operator which consumes this kind SHOULD add its state evaluation information to this field.
OperatorStates map[string]SecureValuestatusOperatorState `json:"operatorStates,omitempty"`
// The name of the keeper used to create the secure value. Cannot be set.
Keeper string `json:"keeper"`
// additionalFields is reserved for future use
AdditionalFields map[string]interface{} `json:"additionalFields,omitempty"`
}
+11 -11
View File
@@ -587,15 +587,6 @@ func schema_pkg_apis_secret_v1beta1_SecureValueSpec(ref common.ReferenceCallback
Format: "",
},
},
"keeper": {
SchemaProps: spec.SchemaProps{
Description: "Name of the keeper, being the actual storage of the secure value. If not specified, the default keeper for the namespace will be used.",
MinLength: ptr.To[int64](1),
MaxLength: ptr.To[int64](253),
Type: []string{"string"},
Format: "",
},
},
"decrypters": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
@@ -639,6 +630,14 @@ func schema_pkg_apis_secret_v1beta1_SecureValueStatus(ref common.ReferenceCallba
Format: "int64",
},
},
"externalID": {
SchemaProps: spec.SchemaProps{
Description: "External ID where the secret is stored. Cannot be set.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"operatorStates": {
SchemaProps: spec.SchemaProps{
Description: "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.",
@@ -654,9 +653,9 @@ func schema_pkg_apis_secret_v1beta1_SecureValueStatus(ref common.ReferenceCallba
},
},
},
"externalID": {
"keeper": {
SchemaProps: spec.SchemaProps{
Description: "External ID where the secret is stored. Cannot be set.",
Description: "The name of the keeper used to create the secure value. Cannot be set.",
Default: "",
Type: []string{"string"},
Format: "",
@@ -678,6 +677,7 @@ func schema_pkg_apis_secret_v1beta1_SecureValueStatus(ref common.ReferenceCallba
},
},
},
Required: []string{"keeper"},
},
},
Dependencies: []string{
@@ -29,7 +29,7 @@ refs:
- pattern: /docs/grafana/
destination: docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/access-control/custom-role-actions-scopes/#cloud-access-policies-action-definitions
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/access-control/custom-role-actions-scopes/#cloud-access-policies-action-definitions
destination: /docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/access-control/custom-role-actions-scopes/#create-access-policies
rbac-role-definitions:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/roles-and-permissions/access-control/rbac-fixed-basic-role-definitions/
@@ -66,16 +66,17 @@ Please refer to plugin documentation to see what RBAC permissions the plugin has
The following list contains app plugins that have fine-grained RBAC support.
| App plugin | App plugin ID | App plugin permission documentation |
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| [Access policies](https://grafana.com/docs/grafana-cloud/account-management/authentication-and-permissions/access-policies/) | `grafana-auth-app` | [RBAC actions for Access Policies](ref:cloud-access-policies-action-definitions) |
| [Adaptive metrics](https://grafana.com/docs/grafana-cloud/cost-management-and-billing/reduce-costs/metrics-costs/control-metrics-usage-via-adaptive-metrics/adaptive-metrics-plugin/) | `grafana-adaptive-metrics-app` | [RBAC actions for Adaptive Metrics](ref:adaptive-metrics-permissions) |
| [Incident](https://grafana.com/docs/grafana-cloud/alerting-and-irm/irm/incident/) | `grafana-incident-app` | n/a |
| [OnCall](https://grafana.com/docs/grafana-cloud/alerting-and-irm/irm/oncall/) | `grafana-oncall-app` | [Configure RBAC for OnCall](https://grafana.com/docs/grafana-cloud/alerting-and-irm/irm/oncall/manage/user-and-team-management/#manage-users-and-teams-for-grafana-oncall) |
| [Performance Testing (K6)](https://grafana.com/docs/grafana-cloud/testing/k6/) | `k6-app` | [Configure RBAC for K6](https://grafana.com/docs/grafana-cloud/testing/k6/projects-and-users/configure-rbac/) |
| [Private data source connect (PDC)](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) | `grafana-pdc-app` | n/a |
| [Service Level Objective (SLO)](https://grafana.com/docs/grafana-cloud/alerting-and-irm/slo/) | `grafana-slo-app` | [Configure RBAC for SLO](https://grafana.com/docs/grafana-cloud/alerting-and-irm/slo/set-up/rbac/) |
| [Cloud Provider](https://grafana.com/docs/grafana-cloud/monitor-infrastructure/monitor-cloud-provider/) | `grafana-csp-app` | [Cloud Provider Observability role-based access control](https://grafana.com/docs/grafana-cloud/monitor-infrastructure/monitor-cloud-provider/rbac/) |
| App plugin | App plugin ID | App plugin permission documentation |
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| [Access policies](https://grafana.com/docs/grafana-cloud/account-management/authentication-and-permissions/access-policies/) | `grafana-auth-app` | [RBAC actions for Access Policies](ref:cloud-access-policies-action-definitions) |
| [Adaptive Metrics](https://grafana.com/docs/grafana-cloud/cost-management-and-billing/reduce-costs/metrics-costs/control-metrics-usage-via-adaptive-metrics/adaptive-metrics-plugin/) | `grafana-adaptive-metrics-app` | [RBAC actions for Adaptive Metrics](ref:adaptive-metrics-permissions) |
| [Cloud Provider](https://grafana.com/docs/grafana-cloud/monitor-infrastructure/monitor-cloud-provider/) | `grafana-csp-app` | [Cloud Provider Observability role-based access control](https://grafana.com/docs/grafana-cloud/monitor-infrastructure/monitor-cloud-provider/rbac/) |
| [Incident](https://grafana.com/docs/grafana-cloud/alerting-and-irm/irm/incident/) | `grafana-incident-app` | n/a |
| [Kubernetes Monitoring](/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/) | `grafana-k8s-app` | [Kubernetes Monitoring role-based access control](/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/control-access/#precision-access-with-rbac-custom-plugin-roles) |
| [OnCall](https://grafana.com/docs/grafana-cloud/alerting-and-irm/irm/oncall/) | `grafana-oncall-app` | [Configure RBAC for OnCall](https://grafana.com/docs/grafana-cloud/alerting-and-irm/irm/oncall/manage/user-and-team-management/#manage-users-and-teams-for-grafana-oncall) |
| [Performance Testing (K6)](https://grafana.com/docs/grafana-cloud/testing/k6/) | `k6-app` | [Configure RBAC for K6](https://grafana.com/docs/grafana-cloud/testing/k6/projects-and-users/configure-rbac/) |
| [Private data source connect (PDC)](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) | `grafana-pdc-app` | n/a |
| [Service Level Objective (SLO)](https://grafana.com/docs/grafana-cloud/alerting-and-irm/slo/) | `grafana-slo-app` | [Configure RBAC for SLO](https://grafana.com/docs/grafana-cloud/alerting-and-irm/slo/set-up/rbac/) |
### Revoke fine-grained access from app plugins
@@ -62,9 +62,9 @@ refs:
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-CloudWatch/aws-authentication/
private-data-source-connect:
- pattern: /docs/grafana/
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
- pattern: /docs/grafana-cloud/
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
configure-pdc:
- pattern: /docs/grafana/
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
+20 -10
View File
@@ -7,13 +7,13 @@ test.describe(
tag: ['@acceptance'],
},
() => {
test.skip('Tests each panel type in the panel edit view to ensure no crash', async ({
test('Tests each panel type in the panel edit view to ensure no crash', async ({
gotoDashboardPage,
selectors,
page,
}) => {
// this test can absolutely take longer than the default 30s timeout
test.setTimeout(60000);
test.setTimeout(120000);
// Create new dashboard
const dashboardPage = await gotoDashboardPage({});
@@ -29,22 +29,32 @@ test.describe(
return win.grafanaBootData?.settings?.panels ?? {};
});
const vizPicker = dashboardPage.getByGrafanaSelector(selectors.components.PanelEditor.toggleVizPicker);
// Loop through every panel type and ensure no crash
for (const [_, panel] of Object.entries(panelTypes)) {
if (panel.hideFromList || panel.state === 'deprecated') {
continue; // Skip hidden and deprecated panels
}
// Select the panel type in the viz picker
const vizPicker = dashboardPage.getByGrafanaSelector(selectors.components.PanelEditor.toggleVizPicker);
await vizPicker.click();
await dashboardPage.getByGrafanaSelector(selectors.components.PluginVisualization.item(panel.name)).click();
try {
// Select the panel type in the viz picker
await expect(vizPicker).toBeVisible();
await vizPicker.click({ force: true });
// Verify panel type is selected
await expect(vizPicker).toHaveText(panel.name);
await dashboardPage.getByGrafanaSelector(selectors.components.PluginVisualization.item(panel.name)).click();
// Ensure no unexpected error occurred
await expect(page.getByText('An unexpected error happened')).toBeHidden();
// Verify panel type is selected
await expect(vizPicker).toHaveText(panel.name, { timeout: 10000 });
// Wait for panel to finish rendering
await expect(page.getByLabel('Panel loading bar')).toHaveCount(0, { timeout: 10000 });
// Ensure no unexpected error occurred
await expect(page.getByText('An unexpected error happened')).toBeHidden();
} catch (error) {
throw new Error(`Panel '${panel.name}' failed: ${error}`);
}
}
});
}
-10
View File
@@ -3706,16 +3706,6 @@
"count": 7
}
},
"public/app/plugins/datasource/azuremonitor/types/query.ts": {
"no-barrel-files/no-barrel-files": {
"count": 3
}
},
"public/app/plugins/datasource/azuremonitor/types/templateVariables.ts": {
"no-barrel-files/no-barrel-files": {
"count": 1
}
},
"public/app/plugins/datasource/azuremonitor/utils/common.ts": {
"@typescript-eslint/consistent-type-assertions": {
"count": 1
+2 -2
View File
@@ -52,6 +52,7 @@ require (
github.com/crewjam/saml v0.4.14 // @grafana/identity-access-team
github.com/dgraph-io/badger/v4 v4.7.0 // @grafana/grafana-search-and-storage
github.com/dlmiddlecote/sqlstats v1.0.2 // @grafana/grafana-backend-group
github.com/docker/go-connections v0.6.0 // @grafana/grafana-app-platform-squad
github.com/dolthub/go-mysql-server v0.19.1-0.20250410182021-5632d67cd46e // @grafana/grafana-datasources-core-services
github.com/dolthub/vitess v0.0.0-20250930230441-70c2c6a98e33 // @grafana/grafana-datasources-core-services
github.com/dustin/go-humanize v1.0.1 // @grafana/observability-traces-and-profiling
@@ -138,7 +139,6 @@ require (
github.com/matttproud/golang_protobuf_extensions v1.0.4 // @grafana/alerting-backend
github.com/microsoft/go-mssqldb v1.9.2 // @grafana/partner-datasources
github.com/migueleliasweb/go-github-mock v1.1.0 // @grafana/grafana-git-ui-sync-team
github.com/mitchellh/copystructure v1.2.0 // @grafana/grafana-operator-experience-squad
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c //@grafana/identity-access-team
github.com/mocktools/go-smtp-mock/v2 v2.5.1 // @grafana/grafana-backend-group
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // @grafana/alerting-backend
@@ -405,7 +405,6 @@ require (
github.com/diegoholiveira/jsonlogic/v3 v3.7.4 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/docker v28.4.0+incompatible // indirect
github.com/docker/go-connections v0.6.0 // indirect; @grafana/grafana-app-platform-squad
github.com/docker/go-units v0.5.0 // indirect
github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 // indirect
github.com/dolthub/jsonpath v0.0.2-0.20240227200619-19675ab05c71 // indirect
@@ -515,6 +514,7 @@ require (
github.com/miekg/dns v1.1.63 // indirect
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
+2 -2
View File
@@ -296,8 +296,8 @@
"@grafana/plugin-ui": "^0.11.1",
"@grafana/prometheus": "workspace:*",
"@grafana/runtime": "workspace:*",
"@grafana/scenes": "^6.46.0",
"@grafana/scenes-react": "^6.46.0",
"@grafana/scenes": "6.47.1",
"@grafana/scenes-react": "6.47.1",
"@grafana/schema": "workspace:*",
"@grafana/sql": "workspace:*",
"@grafana/ui": "workspace:*",
@@ -646,7 +646,7 @@ export interface MetricFindValue {
}
export interface DataSourceGetDrilldownsApplicabilityOptions<TQuery extends DataQuery = DataQuery> {
filters: AdHocVariableFilter[];
filters?: AdHocVariableFilter[];
groupByKeys?: string[];
timeRange?: TimeRange;
queries?: TQuery[];
+9 -5
View File
@@ -365,6 +365,10 @@ export interface FeatureToggles {
*/
unlimitedLayoutsNesting?: boolean;
/**
* Enables viewing non-applicable drilldowns on a panel level
*/
perPanelNonApplicableDrilldowns?: boolean;
/**
* Enables use of the `systemPanelFilterVar` variable to filter panels in a dashboard
*/
panelFilterVariable?: boolean;
@@ -596,7 +600,7 @@ export interface FeatureToggles {
*/
alertingPrometheusRulesPrimary?: boolean;
/**
* Used in Logs Drilldown to split queries into multiple queries based on the number of shards
* Deprecated. Replace with lokiShardSplitting. Used in Logs Drilldown to split queries into multiple queries based on the number of shards
*/
exploreLogsShardSplitting?: boolean;
/**
@@ -918,10 +922,6 @@ export interface FeatureToggles {
*/
grafanaAssistantInProfilesDrilldown?: boolean;
/**
* Enables using PGX instead of libpq for PostgreSQL datasource
*/
postgresDSUsePGX?: boolean;
/**
* Enables creating alerts from Tempo data source
*/
tempoAlerting?: boolean;
@@ -1178,6 +1178,10 @@ export interface FeatureToggles {
*/
ttlPluginInstanceManager?: boolean;
/**
* Send X-Loki-Query-Limits-Context header to Loki on first split request
*/
lokiQueryLimitsContext?: boolean;
/**
* Enables the new version of rudderstack
* @default false
*/
@@ -426,6 +426,9 @@ export const versionedComponents = {
loadingBar: {
'10.0.0': () => `Panel loading bar`,
},
PanelNonApplicableDrilldownsSubHeader: {
'12.4.0': 'Panel non-applicable drilldowns subheader',
},
HoverWidget: {
container: {
'10.1.0': 'data-testid hover-header-container',
@@ -50,6 +50,14 @@ export interface LokiDataQuery extends common.DataQuery {
* Used to override the name of the series.
*/
legendFormat?: string;
/**
* The full query plan for split/shard queries. Encoded and sent to Loki via `X-Loki-Query-Limits-Context` header. Requires "lokiQueryLimitsContext" feature flag
*/
limitsContext?: {
expr: string;
from: number;
to: number;
};
/**
* Used to limit the number of log rows returned.
*/
@@ -421,6 +421,70 @@ Component used for rendering content wrapped in the same style as grafana panels
</PanelChrome>
</ExampleFrame>
### Sub-header content
The panel supports displaying additional content below the main header using the `subHeaderContent` prop. This can be used to show context-specific information.
```tsx
<PanelChrome
title="My awesome panel title"
subHeaderContent={
<div style={{ display: 'flex', gap: '8px', padding: '4px 8px' }}>
<span style={{ fontSize: '12px', color: '#999' }}>Additional info</span>
</div>
}
width={400}
height={200}
>
{(innerwidth, innerheight) => {
return (
<div
style={{
width: innerwidth,
height: innerheight,
background: 'rgba(230,0,0,0.05)',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
}}
>
Content
</div>
);
}}
</PanelChrome>
```
<ExampleFrame>
<PanelChrome
title="My awesome panel title"
subHeaderContent={
<div style={{ display: 'flex', gap: '8px', padding: '4px 8px' }}>
<span style={{ fontSize: '12px', color: '#999' }}>Additional info</span>
</div>
}
width={400}
height={200}
>
{(innerwidth, innerheight) => {
return (
<div
style={{
width: innerwidth,
height: innerheight,
background: 'rgba(230,0,0,0.05)',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
}}
>
Content
</div>
);
}}
</PanelChrome>
</ExampleFrame>
### Collapsible
The panel can be collapsed/expanded by clicking on the chevron or the title.
@@ -196,3 +196,30 @@ it('collapses the uncontrolled panel when user clicks on the chevron or the titl
expect(button).not.toHaveAttribute('aria-controlls');
expect(screen.queryByTestId(selectors.components.Panels.Panel.content)?.id).toBe(undefined);
});
it('renders panel with a header if prop subHeaderContent', () => {
setup({
subHeaderContent: <div key="sub-header-test">This should be a sub-header node</div>,
});
expect(screen.getByTestId(selectors.components.Panels.Panel.headerContainer)).toBeInTheDocument();
});
it('renders panel with sub-header content in place if prop subHeaderContent', () => {
setup({
subHeaderContent: <div key="sub-header-test">This should be a sub-header node</div>,
});
expect(screen.getByText('This should be a sub-header node')).toBeInTheDocument();
});
it('does not render sub-header content when panel is collapsed', () => {
setup({
title: 'Test Panel',
collapsible: true,
collapsed: true,
subHeaderContent: <div key="sub-header-test">This should be a sub-header node</div>,
});
expect(screen.queryByText('This should be a sub-header node')).not.toBeInTheDocument();
});
@@ -76,6 +76,11 @@ interface BaseProps {
* If true, the VizPanelMenu will always be visible in the panel header. Defaults to false.
*/
showMenuAlways?: boolean;
/**
* Content to display in the sub-header area below the main header.
* Can contain text, pills, links, buttons, or any other React elements.
*/
subHeaderContent?: ReactNode;
}
interface FixedDimensions extends BaseProps {
@@ -157,6 +162,7 @@ export function PanelChrome({
onMouseEnter,
onDragStart,
showMenuAlways = false,
subHeaderContent,
}: PanelChromeProps) {
const theme = useTheme2();
const styles = useStyles2(getStyles);
@@ -164,6 +170,7 @@ export function PanelChrome({
const panelTitleId = useId().replace(/:/g, '_');
const { isSelected, onSelect, isSelectable } = useElementSelection(selectionId);
const pointerDistance = usePointerDistance();
const [subHeaderRef, { height: measuredSubHeaderHeight }] = useMeasure<HTMLDivElement>();
const hasHeader = !hoverHeader;
@@ -184,11 +191,13 @@ export function PanelChrome({
const isPanelTransparent = displayMode === 'transparent';
const headerHeight = getHeaderHeight(theme, hasHeader);
const subHeaderHeight = Math.min(measuredSubHeaderHeight, headerHeight);
const { contentStyle, innerWidth, innerHeight } = getContentStyle(
padding,
theme,
headerHeight,
collapsed,
subHeaderHeight,
height,
width
);
@@ -395,37 +404,44 @@ export function PanelChrome({
)}
{hasHeader && (
<div
className={cx(styles.headerContainer, dragClass)}
style={headerStyles}
data-testid={selectors.components.Panels.Panel.headerContainer}
onPointerDown={onPointerDown}
onMouseEnter={isSelectable ? onHeaderEnter : undefined}
onMouseLeave={isSelectable ? onHeaderLeave : undefined}
onPointerUp={onPointerUp}
>
{statusMessage && (
<div className={dragClassCancel}>
<PanelStatus
message={statusMessage}
onClick={statusMessageOnClick}
ariaLabel={t('grafana-ui.panel-chrome.ariaLabel-panel-status', 'Panel status')}
<>
<div
className={cx(styles.headerContainer, dragClass)}
style={headerStyles}
data-testid={selectors.components.Panels.Panel.headerContainer}
onPointerDown={onPointerDown}
onMouseEnter={isSelectable ? onHeaderEnter : undefined}
onMouseLeave={isSelectable ? onHeaderLeave : undefined}
onPointerUp={onPointerUp}
>
{statusMessage && (
<div className={dragClassCancel}>
<PanelStatus
message={statusMessage}
onClick={statusMessageOnClick}
ariaLabel={t('grafana-ui.panel-chrome.ariaLabel-panel-status', 'Panel status')}
/>
</div>
)}
{headerContent}
{menu && (
<PanelMenu
menu={menu}
title={typeof title === 'string' ? title : undefined}
placement="bottom-end"
menuButtonClass={cx(styles.menuItem, dragClassCancel, showOnHoverClass)}
onOpenMenu={onOpenMenu}
/>
)}
</div>
{!collapsed && subHeaderContent && (
<div className={styles.subHeader} ref={subHeaderRef}>
{subHeaderContent}
</div>
)}
{headerContent}
{menu && (
<PanelMenu
menu={menu}
title={typeof title === 'string' ? title : undefined}
placement="bottom-end"
menuButtonClass={cx(styles.menuItem, dragClassCancel, showOnHoverClass)}
onOpenMenu={onOpenMenu}
/>
)}
</div>
</>
)}
{!collapsed && (
@@ -466,6 +482,7 @@ const getContentStyle = (
theme: GrafanaTheme2,
headerHeight: number,
collapsed: boolean,
subHeaderHeight: number,
height?: number,
width?: number
) => {
@@ -481,7 +498,7 @@ const getContentStyle = (
let innerHeight = 0;
if (height) {
innerHeight = height - headerHeight - panelPadding - panelBorder;
innerHeight = height - headerHeight - panelPadding - panelBorder - subHeaderHeight;
}
if (collapsed) {
@@ -579,6 +596,15 @@ const getStyles = (theme: GrafanaTheme2) => {
padding: newPanelPadding ? theme.spacing(0, 1, 0, 1.5) : theme.spacing(0, 0.5, 0, 1),
gap: theme.spacing(1),
}),
subHeader: css({
label: 'panel-sub-header',
display: 'flex',
alignItems: 'center',
maxHeight: theme.spacing.gridSize * theme.components.panel.headerHeight,
padding: newPanelPadding ? theme.spacing(0, 1, 0, 1.5) : theme.spacing(0, 0.5, 0, 1),
overflow: 'hidden',
gap: theme.spacing(1),
}),
pointer: css({
cursor: 'pointer',
}),
+2
View File
@@ -313,6 +313,7 @@ func (hs *HTTPServer) declareFixedRoles() error {
Grants: []string{string(org.RoleEditor)},
}
//nolint:staticcheck // not yet migrated to OpenFeature
if hs.Features.IsEnabled(context.Background(), featuremgmt.FlagAnnotationPermissionUpdate) {
// Keeping the name to avoid breaking changes (for users who have assigned this role to grant permissions on organization annotations)
annotationsReaderRole = ac.RoleRegistration{
@@ -619,6 +620,7 @@ func (hs *HTTPServer) declareFixedRoles() error {
libraryPanelsReaderRole, libraryPanelsWriterRole, libraryPanelsGeneralReaderRole, libraryPanelsGeneralWriterRole,
snapshotsCreatorRole, snapshotsDeleterRole, snapshotsReaderRole}
//nolint:staticcheck // not yet migrated to OpenFeature
if hs.Features.IsEnabled(context.Background(), featuremgmt.FlagAnnotationPermissionUpdate) {
allAnnotationsReaderRole := ac.RoleRegistration{
Role: ac.RoleDTO{
+9
View File
@@ -126,6 +126,7 @@ func (hs *HTTPServer) PostAnnotation(c *contextmodel.ReqContext) response.Respon
}
if canSave, err := hs.canCreateAnnotation(c, cmd.DashboardUID); err != nil || !canSave {
//nolint:staticcheck // not yet migrated to OpenFeature
if !hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagAnnotationPermissionUpdate) {
return dashboardGuardianResponse(err)
} else if err != nil {
@@ -271,6 +272,7 @@ func (hs *HTTPServer) UpdateAnnotation(c *contextmodel.ReqContext) response.Resp
return resp
}
//nolint:staticcheck // not yet migrated to OpenFeature
if !hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagAnnotationPermissionUpdate) {
if canSave, err := hs.canSaveAnnotation(c, hs.AccessControl, annotation); err != nil || !canSave {
return dashboardGuardianResponse(err)
@@ -329,6 +331,7 @@ func (hs *HTTPServer) PatchAnnotation(c *contextmodel.ReqContext) response.Respo
return resp
}
//nolint:staticcheck // not yet migrated to OpenFeature
if !hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagAnnotationPermissionUpdate) {
if canSave, err := hs.canSaveAnnotation(c, hs.AccessControl, annotation); err != nil || !canSave {
return dashboardGuardianResponse(err)
@@ -439,6 +442,7 @@ func (hs *HTTPServer) MassDeleteAnnotations(c *contextmodel.ReqContext) response
canSave, err := hs.canMassDeleteAnnotations(c, dashboardUID)
if err != nil || !canSave {
//nolint:staticcheck // not yet migrated to OpenFeature
if !hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagAnnotationPermissionUpdate) {
return dashboardGuardianResponse(err)
} else if err != nil {
@@ -500,6 +504,7 @@ func (hs *HTTPServer) DeleteAnnotationByID(c *contextmodel.ReqContext) response.
return response.Error(http.StatusBadRequest, "annotationId is invalid", err)
}
//nolint:staticcheck // not yet migrated to OpenFeature
if !hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagAnnotationPermissionUpdate) {
annotation, resp := findAnnotationByID(c.Req.Context(), hs.annotationsRepo, annotationID, c.SignedInUser)
if resp != nil {
@@ -610,6 +615,7 @@ func AnnotationTypeScopeResolver(annotationsRepo annotations.Repository, feature
},
}
//nolint:staticcheck // not yet migrated to OpenFeature
if features.IsEnabled(ctx, featuremgmt.FlagAnnotationPermissionUpdate) {
tempUser = &user.SignedInUser{
OrgID: orgID,
@@ -626,6 +632,7 @@ func AnnotationTypeScopeResolver(annotationsRepo annotations.Repository, feature
return nil, errors.New("could not resolve annotation type")
}
//nolint:staticcheck // not yet migrated to OpenFeature
if !features.IsEnabled(ctx, featuremgmt.FlagAnnotationPermissionUpdate) {
switch annotation.GetType() {
case annotations.Organization:
@@ -662,6 +669,7 @@ func AnnotationTypeScopeResolver(annotationsRepo annotations.Repository, feature
}
func (hs *HTTPServer) canCreateAnnotation(c *contextmodel.ReqContext, dashboardUID string) (bool, error) {
//nolint:staticcheck // not yet migrated to OpenFeature
if hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagAnnotationPermissionUpdate) {
if dashboardUID != "" {
evaluator := accesscontrol.EvalPermission(accesscontrol.ActionAnnotationsCreate, dashboards.ScopeDashboardsProvider.GetResourceScopeUID(dashboardUID))
@@ -686,6 +694,7 @@ func (hs *HTTPServer) canCreateAnnotation(c *contextmodel.ReqContext, dashboardU
}
func (hs *HTTPServer) canMassDeleteAnnotations(c *contextmodel.ReqContext, dashboardUID string) (bool, error) {
//nolint:staticcheck // not yet migrated to OpenFeature
if hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagAnnotationPermissionUpdate) {
if dashboardUID == "" {
evaluator := accesscontrol.EvalPermission(accesscontrol.ActionAnnotationsDelete, accesscontrol.ScopeAnnotationsTypeOrganization)
+1
View File
@@ -166,6 +166,7 @@ func (hs *HTTPServer) GetDashboard(c *contextmodel.ReqContext) response.Response
}
annotationPermissions := &dashboardsV1.AnnotationPermission{}
//nolint:staticcheck // not yet migrated to OpenFeature
if hs.Features.IsEnabled(ctx, featuremgmt.FlagAnnotationPermissionUpdate) {
hs.getAnnotationPermissionsByScope(c, &annotationPermissions.Dashboard, dashboards.ScopeDashboardsProvider.GetResourceScopeUID(dash.UID))
} else {
+4
View File
@@ -150,6 +150,7 @@ func (hs *HTTPServer) getFrontendSettings(c *contextmodel.ReqContext) (*dtos.Fro
continue
}
//nolint:staticcheck // not yet migrated to OpenFeature
if panel.ID == "datagrid" && !hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagEnableDatagridEditing) {
continue
}
@@ -190,6 +191,7 @@ func (hs *HTTPServer) getFrontendSettings(c *contextmodel.ReqContext) (*dtos.Fro
hasAccess := accesscontrol.HasAccess(hs.AccessControl, c)
trustedTypesDefaultPolicyEnabled := (hs.Cfg.CSPEnabled && strings.Contains(hs.Cfg.CSPTemplate, "require-trusted-types-for")) || (hs.Cfg.CSPReportOnlyEnabled && strings.Contains(hs.Cfg.CSPReportOnlyTemplate, "require-trusted-types-for"))
//nolint:staticcheck // not yet migrated to OpenFeature
isCloudMigrationTarget := hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagOnPremToCloudMigrations) && hs.Cfg.CloudMigration.IsTarget
featureToggles := hs.Features.GetEnabled(c.Req.Context())
// this is needed for backwards compatibility with external plugins
@@ -406,6 +408,7 @@ func (hs *HTTPServer) getFrontendSettings(c *contextmodel.ReqContext) (*dtos.Fro
DisableSignoutMenu: hs.Cfg.DisableSignoutMenu,
}
//nolint:staticcheck // not yet migrated to OpenFeature
if hs.Cfg.PasswordlessMagicLinkAuth.Enabled && hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagPasswordlessMagicLinkAuthentication) {
hasEnabledProviders := hs.samlEnabled() || hs.authnService.IsClientEnabled(authn.ClientLDAP)
@@ -444,6 +447,7 @@ func (hs *HTTPServer) getFrontendSettings(c *contextmodel.ReqContext) (*dtos.Fro
frontendSettings.Namespace = hs.namespacer(c.OrgID)
// experimental scope features
//nolint:staticcheck // not yet migrated to OpenFeature
if hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagScopeFilters) {
frontendSettings.ListScopesEndpoint = hs.Cfg.ScopesListScopesURL
frontendSettings.ListDashboardScopesEndpoint = hs.Cfg.ScopesListDashboardsURL
+2
View File
@@ -62,6 +62,7 @@ func (hs *HTTPServer) setIndexViewData(c *contextmodel.ReqContext) (*dtos.IndexV
return nil, err
}
//nolint:staticcheck // not yet migrated to OpenFeature
if hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagIndividualCookiePreferences) {
if !prefs.Cookies("analytics") {
settings.GoogleAnalytics4Id = ""
@@ -94,6 +95,7 @@ func (hs *HTTPServer) setIndexViewData(c *contextmodel.ReqContext) (*dtos.IndexV
}
var regionalFormat string
//nolint:staticcheck // not yet migrated to OpenFeature
if hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagLocaleFormatPreference) {
regionalFormat = "en"
+1
View File
@@ -361,6 +361,7 @@ func (hs *HTTPServer) RedirectResponseWithError(c *contextmodel.ReqContext, err
func (hs *HTTPServer) redirectURLWithErrorCookie(c *contextmodel.ReqContext, err error) string {
setCookie := true
//nolint:staticcheck // not yet migrated to OpenFeature
if hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagIndividualCookiePreferences) {
var userID int64
if c.SignedInUser != nil && !c.IsNil() {
+1
View File
@@ -81,6 +81,7 @@ func (proxy *PluginProxy) HandleRequest() {
hasSlash := strings.HasSuffix(proxy.proxyPath, "/")
proxy.proxyPath = path
//nolint:staticcheck // not yet migrated to OpenFeature
if hasSlash && !strings.HasSuffix(path, "/") && proxy.features.IsEnabled(proxy.ctx.Req.Context(), featuremgmt.FlagPluginProxyPreserveTrailingSlash) {
proxy.proxyPath += "/"
}
+2
View File
@@ -144,6 +144,7 @@ func (hs *HTTPServer) GetPluginList(c *contextmodel.ReqContext) response.Respons
AngularDetected: pluginDef.Angular.Detected,
}
//nolint:staticcheck // not yet migrated to OpenFeature
if hs.Cfg.ManagedServiceAccountsEnabled && hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagExternalServiceAccounts) {
listItem.IAM = pluginDef.IAM
}
@@ -490,6 +491,7 @@ func (hs *HTTPServer) InstallPlugin(c *contextmodel.ReqContext) response.Respons
return response.ErrOrFallback(http.StatusInternalServerError, "Failed to install plugin", err)
}
//nolint:staticcheck // not yet migrated to OpenFeature
if hs.Cfg.ManagedServiceAccountsEnabled && hs.Features.IsEnabled(c.Req.Context(), featuremgmt.FlagExternalServiceAccounts) {
// This is a non-blocking function that verifies that the installer has
// the permissions that the plugin requests to have on Grafana.
+1
View File
@@ -29,6 +29,7 @@ func (c *ResultConverter) Convert(ctx context.Context,
}
var dt data.FrameType
//nolint:staticcheck // not yet migrated to OpenFeature
dt, useDataplane, _ := shouldUseDataplane(frames, logger, c.Features.IsEnabled(ctx, featuremgmt.FlagDisableSSEDataplane))
if useDataplane {
logger.Debug("Handling SSE data source query through dataplane", "datatype", dt)
+1
View File
@@ -70,6 +70,7 @@ func handleDataplaneFrames(ctx context.Context, tracer tracing.Tracer, features
case data.KindTimeSeries:
return handleDataplaneTimeseries(frames)
case data.KindNumeric:
//nolint:staticcheck // not yet migrated to OpenFeature
sortMetrics := !features.IsEnabled(ctx, featuremgmt.FlagDisableNumericMetricsSortingInExpressions)
return handleDataplaneNumeric(frames, sortMetrics)
default:
+1 -1
View File
@@ -68,7 +68,7 @@ type DataPipeline []Node
// map of the refId of the of each command
func (dp *DataPipeline) execute(c context.Context, now time.Time, s *Service) (mathexp.Vars, error) {
vars := make(mathexp.Vars)
//nolint:staticcheck // not yet migrated to OpenFeature
groupByDSFlag := s.features.IsEnabled(c, featuremgmt.FlagSseGroupByDatasource)
// Execute datasource nodes first, and grouped by datasource.
if groupByDSFlag {
+3 -3
View File
@@ -15,6 +15,7 @@ import (
_ "github.com/blugelabs/bluge"
_ "github.com/blugelabs/bluge_segment_api"
_ "github.com/crewjam/saml"
_ "github.com/docker/go-connections/nat"
_ "github.com/go-jose/go-jose/v4"
_ "github.com/gobwas/glob"
_ "github.com/googleapis/gax-go/v2"
@@ -30,6 +31,7 @@ import (
_ "github.com/spf13/cobra" // used by the standalone apiserver cli
_ "github.com/spyzhov/ajson"
_ "github.com/stretchr/testify/require"
_ "github.com/testcontainers/testcontainers-go"
_ "gocloud.dev/secrets/awskms"
_ "gocloud.dev/secrets/azurekeyvault"
_ "gocloud.dev/secrets/gcpkms"
@@ -54,9 +56,7 @@ import (
_ "github.com/grafana/e2e"
_ "github.com/grafana/gofpdf"
_ "github.com/grafana/gomemcache/memcache"
_ "github.com/grafana/tempo/pkg/traceql"
_ "github.com/grafana/grafana/apps/alerting/alertenrichment/pkg/apis/alertenrichment/v1beta1"
_ "github.com/grafana/grafana/apps/scope/pkg/apis/scope/v0alpha1"
_ "github.com/testcontainers/testcontainers-go"
_ "github.com/grafana/tempo/pkg/traceql"
)
+1
View File
@@ -64,6 +64,7 @@ func (l *loggerImpl) Middleware() web.Middleware {
// put the start time on context so we can measure it later.
r = r.WithContext(log.InitstartTime(r.Context(), time.Now()))
//nolint:staticcheck // not yet migrated to OpenFeature
if l.flags.IsEnabled(r.Context(), featuremgmt.FlagUnifiedRequestLog) {
r = r.WithContext(errutil.SetUnifiedLogging(r.Context()))
}
+1
View File
@@ -114,6 +114,7 @@ func RequestMetrics(features featuremgmt.FeatureToggles, cfg *setting.Cfg, promR
handler = "notfound"
} else {
// log requests where we could not identify handler so we can register them.
//nolint:staticcheck // not yet migrated to OpenFeature
if features.IsEnabled(r.Context(), featuremgmt.FlagLogRequestsInstrumentedAsUnknown) {
log.Warn("request instrumented as unknown", "path", r.URL.Path, "status_code", status)
}
@@ -237,7 +237,7 @@ func NewPlugin(pluginID string, cfg *setting.Cfg, httpClientProvider *httpclient
case Tempo:
svc = tempo.ProvideService(httpClientProvider, tracer)
case PostgreSQL:
svc = postgres.ProvideService(cfg)
svc = postgres.ProvideService()
case MySQL:
svc = mysql.ProvideService()
case MSSQL:
@@ -90,3 +90,8 @@ func (d *directResourceClient) Watch(ctx context.Context, in *resourcepb.WatchRe
func (d *directResourceClient) BulkProcess(ctx context.Context, opts ...grpc.CallOption) (resourcepb.BulkStore_BulkProcessClient, error) {
return nil, fmt.Errorf("BulkProcess not supported with direct resource client")
}
// RebuildIndexes implements resource.ResourceClient.
func (b *directResourceClient) RebuildIndexes(ctx context.Context, req *resourcepb.RebuildIndexesRequest, opts ...grpc.CallOption) (*resourcepb.RebuildIndexesResponse, error) {
return nil, fmt.Errorf("not implemented")
}
@@ -109,13 +109,14 @@ func ProvideMigratorDashboardAccessor(
features featuremgmt.FeatureToggles,
) MigrationDashboardAccessor {
return &dashboardSqlAccess{
sql: sql,
namespacer: claims.OrgNamespaceFormatter,
dashStore: nil, // not needed for migration
provisioning: provisioning,
dashboardPermissionSvc: nil, // not needed for migration
libraryPanelSvc: nil, // not needed for migration
accessControl: accessControl,
sql: sql,
namespacer: claims.OrgNamespaceFormatter,
dashStore: nil, // not needed for migration
provisioning: provisioning,
dashboardPermissionSvc: nil, // not needed for migration
libraryPanelSvc: nil, // not needed for migration
accessControl: accessControl,
//nolint:staticcheck // not yet migrated to OpenFeature
invalidDashboardParseFallbackEnabled: features.IsEnabled(context.Background(), featuremgmt.FlagScanRowInvalidDashboardParseFallbackEnabled),
}
}
@@ -132,14 +133,15 @@ func NewDashboardSQLAccess(sql legacysql.LegacyDatabaseProvider,
) *dashboardSqlAccess {
dashboardSearchClient := legacysearcher.NewDashboardSearchClient(dashStore, sorter)
return &dashboardSqlAccess{
sql: sql,
namespacer: namespacer,
dashStore: dashStore,
provisioning: provisioning,
dashboardSearchClient: *dashboardSearchClient,
dashboardPermissionSvc: dashboardPermissionSvc,
libraryPanelSvc: libraryPanelSvc,
accessControl: accessControl,
sql: sql,
namespacer: namespacer,
dashStore: dashStore,
provisioning: provisioning,
dashboardSearchClient: *dashboardSearchClient,
dashboardPermissionSvc: dashboardPermissionSvc,
libraryPanelSvc: libraryPanelSvc,
accessControl: accessControl,
//nolint:staticcheck // not yet migrated to OpenFeature
invalidDashboardParseFallbackEnabled: features.IsEnabled(context.Background(), featuremgmt.FlagScanRowInvalidDashboardParseFallbackEnabled),
}
}
@@ -1046,3 +1048,7 @@ func parseLibraryPanelRow(p panel) (dashboardV0.LibraryPanel, error) {
return item, nil
}
func (b *dashboardSqlAccess) RebuildIndexes(ctx context.Context, req *resourcepb.RebuildIndexesRequest) (*resourcepb.RebuildIndexesResponse, error) {
return nil, fmt.Errorf("not implemented")
}
@@ -33,9 +33,11 @@ func (b *DashboardsAPIBuilder) ValidateDashboardSpec(ctx context.Context, obj ru
case *v0.Dashboard:
errorOnSchemaMismatches = false // Never error for v0
case *v1.Dashboard:
//nolint:staticcheck // not yet migrated to OpenFeature
errorOnSchemaMismatches = !b.features.IsEnabled(ctx, featuremgmt.FlagDashboardDisableSchemaValidationV1)
case *v2alpha1.Dashboard:
case *v2beta1.Dashboard:
//nolint:staticcheck // not yet migrated to OpenFeature
errorOnSchemaMismatches = !b.features.IsEnabled(ctx, featuremgmt.FlagDashboardDisableSchemaValidationV2)
default:
return nil, fmt.Errorf("invalid dashboard type: %T", obj)
@@ -45,6 +47,7 @@ func (b *DashboardsAPIBuilder) ValidateDashboardSpec(ctx context.Context, obj ru
return nil, apierrors.NewBadRequest("Not supported: FieldValidationMode: Warn")
}
//nolint:staticcheck // not yet migrated to OpenFeature
alwaysLogSchemaValidationErrors := b.features.IsEnabled(ctx, featuremgmt.FlagDashboardSchemaValidationLogging)
var errors field.ErrorList
@@ -581,3 +581,8 @@ func (m *mockSearchClient) GetStats(ctx context.Context, in *resourcepb.Resource
func (m *mockSearchClient) Search(ctx context.Context, in *resourcepb.ResourceSearchRequest, opts ...grpc.CallOption) (*resourcepb.ResourceSearchResponse, error) {
return m.search, m.searchErr
}
// RebuildIndexes implements resourcepb.ResourceIndexClient.
func (m *mockSearchClient) RebuildIndexes(ctx context.Context, in *resourcepb.RebuildIndexesRequest, opts ...grpc.CallOption) (*resourcepb.RebuildIndexesResponse, error) {
return nil, fmt.Errorf("not implemented")
}
+6 -1
View File
@@ -10,6 +10,9 @@ import (
)
var (
// The name used to refer to the system keeper
SystemKeeperName = "system"
ErrKeeperNotFound = errors.New("keeper not found")
ErrKeeperAlreadyExists = errors.New("keeper already exists")
)
@@ -21,7 +24,9 @@ type KeeperMetadataStorage interface {
Update(ctx context.Context, keeper *secretv1beta1.Keeper, actorUID string) (*secretv1beta1.Keeper, error)
Delete(ctx context.Context, namespace xkube.Namespace, name string) error
List(ctx context.Context, namespace xkube.Namespace) ([]secretv1beta1.Keeper, error)
GetKeeperConfig(ctx context.Context, namespace string, name *string, opts ReadOpts) (secretv1beta1.KeeperConfig, error)
GetKeeperConfig(ctx context.Context, namespace string, name string, opts ReadOpts) (secretv1beta1.KeeperConfig, error)
SetAsActive(ctx context.Context, namespace xkube.Namespace, name string) error
GetActiveKeeperConfig(ctx context.Context, namespace string) (string, secretv1beta1.KeeperConfig, error)
}
// ErrKeeperInvalidSecureValues is returned when a Keeper references SecureValues that do not exist.
@@ -31,7 +31,7 @@ type ReadOpts struct {
// SecureValueMetadataStorage is the interface for wiring and dependency injection.
type SecureValueMetadataStorage interface {
Create(ctx context.Context, sv *secretv1beta1.SecureValue, actorUID string) (*secretv1beta1.SecureValue, error)
Create(ctx context.Context, keeper string, sv *secretv1beta1.SecureValue, actorUID string) (*secretv1beta1.SecureValue, error)
Read(ctx context.Context, namespace xkube.Namespace, name string, opts ReadOpts) (*secretv1beta1.SecureValue, error)
List(ctx context.Context, namespace xkube.Namespace) ([]secretv1beta1.SecureValue, error)
SetVersionToActive(ctx context.Context, namespace xkube.Namespace, name string, version int64) error
@@ -47,6 +47,7 @@ type SecureValueService interface {
List(ctx context.Context, namespace xkube.Namespace) (*secretv1beta1.SecureValueList, error)
Update(ctx context.Context, newSecureValue *secretv1beta1.SecureValue, actorUID string) (*secretv1beta1.SecureValue, bool, error)
Delete(ctx context.Context, namespace xkube.Namespace, name string) (*secretv1beta1.SecureValue, error)
SetKeeperAsActive(ctx context.Context, namespace xkube.Namespace, keeperName string) error
}
type SecureValueClient interface {
@@ -93,14 +93,14 @@ func (w *Worker) CleanupInactiveSecureValues(ctx context.Context) ([]secretv1bet
}
func (w *Worker) Cleanup(ctx context.Context, sv *secretv1beta1.SecureValue) error {
keeperCfg, err := w.keeperMetadataStorage.GetKeeperConfig(ctx, sv.Namespace, sv.Spec.Keeper, contracts.ReadOpts{ForUpdate: false})
keeperCfg, err := w.keeperMetadataStorage.GetKeeperConfig(ctx, sv.Namespace, sv.Status.Keeper, contracts.ReadOpts{ForUpdate: false})
if err != nil {
return fmt.Errorf("fetching keeper config: namespace=%+v keeperName=%+v %w", sv.Namespace, sv.Spec.Keeper, err)
return fmt.Errorf("fetching keeper config: namespace=%+v keeperName=%+v %w", sv.Namespace, sv.Status.Keeper, err)
}
keeper, err := w.keeperService.KeeperForConfig(keeperCfg)
if err != nil {
return fmt.Errorf("getting keeper for config: namespace=%+v keeperName=%+v %w", sv.Namespace, sv.Spec.Keeper, err)
return fmt.Errorf("getting keeper for config: namespace=%+v keeperName=%+v %w", sv.Namespace, sv.Status.Keeper, err)
}
// Keeper deletion is idempotent
@@ -1,7 +1,6 @@
package garbagecollectionworker_test
import (
"fmt"
"slices"
"testing"
"time"
@@ -11,7 +10,6 @@ import (
"github.com/grafana/grafana/pkg/registry/apis/secret/testutils"
"github.com/grafana/grafana/pkg/registry/apis/secret/xkube"
"github.com/grafana/grafana/pkg/storage/secret/encryption"
"github.com/mitchellh/copystructure"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -52,7 +50,7 @@ func TestBasic(t *testing.T) {
sv, err := sut.CreateSv(t.Context())
require.NoError(t, err)
keeperCfg, err := sut.KeeperMetadataStorage.GetKeeperConfig(t.Context(), sv.Namespace, sv.Spec.Keeper, contracts.ReadOpts{ForUpdate: false})
keeperCfg, err := sut.KeeperMetadataStorage.GetKeeperConfig(t.Context(), sv.Namespace, sv.Status.Keeper, contracts.ReadOpts{ForUpdate: false})
require.NoError(t, err)
keeper, err := sut.KeeperService.KeeperForConfig(keeperCfg)
@@ -133,7 +131,7 @@ func TestProperty(t *testing.T) {
t.Repeat(map[string]func(*rapid.T){
"create": func(t *rapid.T) {
sv := anySecureValueGen.Draw(t, "sv")
svCopy := deepCopy(sv)
svCopy := sv.DeepCopy()
createdSv, err := sut.CreateSv(t.Context(), testutils.CreateSvWithSv(sv))
svCopy.UID = createdSv.UID
@@ -194,13 +192,15 @@ func newModel() *model {
}
func (m *model) create(now time.Time, sv *secretv1beta1.SecureValue) error {
created := now
for _, item := range m.items {
if item.active && item.Namespace == sv.Namespace && item.Name == sv.Name {
item.active = false
created = item.created
break
}
}
m.items = append(m.items, &modelSecureValue{SecureValue: sv, active: true, created: now})
m.items = append(m.items, &modelSecureValue{SecureValue: sv, active: true, created: created})
return nil
}
@@ -219,6 +219,16 @@ func (m *model) cleanupInactiveSecureValues(now time.Time, minAge time.Duration,
// Using a slice to allow duplicates
toDelete := make([]*modelSecureValue, 0)
// The implementation query sorts by created time ascending
slices.SortFunc(m.items, func(a, b *modelSecureValue) int {
if a.created.Before(b.created) {
return -1
} else if a.created.After(b.created) {
return 1
}
return 0
})
for _, sv := range m.items {
if len(toDelete) >= int(maxBatchSize) {
break
@@ -238,11 +248,3 @@ func (m *model) cleanupInactiveSecureValues(now time.Time, minAge time.Duration,
return toDelete, nil
}
func deepCopy[T any](sv T) T {
copied, err := copystructure.Copy(sv)
if err != nil {
panic(fmt.Sprintf("failed to copy secure value: %v", err))
}
return copied.(T)
}
@@ -93,7 +93,13 @@ func (s *SecureValueService) Create(ctx context.Context, sv *secretv1beta1.Secur
s.metrics.SecureValueCreateDuration.WithLabelValues(strconv.FormatBool(success)).Observe(time.Since(start).Seconds())
}()
return s.createNewVersion(ctx, sv, actorUID)
// Secure value creation uses the active keeper
keeperName, keeperCfg, err := s.keeperMetadataStorage.GetActiveKeeperConfig(ctx, sv.Namespace)
if err != nil {
return nil, fmt.Errorf("fetching active keeper config: namespace=%+v %w", sv.Namespace, err)
}
return s.createNewVersion(ctx, keeperName, keeperCfg, sv, actorUID)
}
func (s *SecureValueService) Update(ctx context.Context, newSecureValue *secretv1beta1.SecureValue, actorUID string) (_ *secretv1beta1.SecureValue, sync bool, updateErr error) {
@@ -128,23 +134,22 @@ func (s *SecureValueService) Update(ctx context.Context, newSecureValue *secretv
s.metrics.SecureValueUpdateDuration.WithLabelValues(strconv.FormatBool(success)).Observe(time.Since(start).Seconds())
}()
currentVersion, err := s.secureValueMetadataStorage.Read(ctx, xkube.Namespace(newSecureValue.Namespace), newSecureValue.Name, contracts.ReadOpts{})
if err != nil {
return nil, false, fmt.Errorf("reading secure value secret: %+w", err)
}
keeperCfg, err := s.keeperMetadataStorage.GetKeeperConfig(ctx, currentVersion.Namespace, currentVersion.Status.Keeper, contracts.ReadOpts{})
if err != nil {
return nil, false, fmt.Errorf("fetching keeper config: namespace=%+v keeper: %q %w", newSecureValue.Namespace, currentVersion.Status.Keeper, err)
}
if newSecureValue.Spec.Value == nil {
currentVersion, err := s.secureValueMetadataStorage.Read(ctx, xkube.Namespace(newSecureValue.Namespace), newSecureValue.Name, contracts.ReadOpts{})
if err != nil {
return nil, false, fmt.Errorf("reading secure value secret: %+w", err)
}
// TODO: does this need to be for update?
keeperCfg, err := s.keeperMetadataStorage.GetKeeperConfig(ctx, newSecureValue.Namespace, newSecureValue.Spec.Keeper, contracts.ReadOpts{ForUpdate: true})
if err != nil {
return nil, false, fmt.Errorf("fetching keeper config: namespace=%+v keeperName=%+v %w", newSecureValue.Namespace, newSecureValue.Spec.Keeper, err)
}
keeper, err := s.keeperService.KeeperForConfig(keeperCfg)
if err != nil {
return nil, false, fmt.Errorf("getting keeper for config: namespace=%+v keeperName=%+v %w", newSecureValue.Namespace, newSecureValue.Spec.Keeper, err)
return nil, false, fmt.Errorf("getting keeper for config: namespace=%+v keeperName=%+v %w", newSecureValue.Namespace, newSecureValue.Status.Keeper, err)
}
logging.FromContext(ctx).Debug("retrieved keeper", "namespace", newSecureValue.Namespace, "keeperName", newSecureValue.Spec.Keeper, "type", keeperCfg.Type())
logging.FromContext(ctx).Debug("retrieved keeper", "namespace", newSecureValue.Namespace, "type", keeperCfg.Type())
secret, err := keeper.Expose(ctx, keeperCfg, xkube.Namespace(newSecureValue.Namespace), newSecureValue.Name, currentVersion.Status.Version)
if err != nil {
@@ -154,12 +159,16 @@ func (s *SecureValueService) Update(ctx context.Context, newSecureValue *secretv
newSecureValue.Spec.Value = &secret
}
// Secure value updates use the keeper used to create the secure value
const updateIsSync = true
createdSv, err := s.createNewVersion(ctx, newSecureValue, actorUID)
createdSv, err := s.createNewVersion(ctx, currentVersion.Status.Keeper, keeperCfg, newSecureValue, actorUID)
return createdSv, updateIsSync, err
}
func (s *SecureValueService) createNewVersion(ctx context.Context, sv *secretv1beta1.SecureValue, actorUID string) (*secretv1beta1.SecureValue, error) {
func (s *SecureValueService) createNewVersion(ctx context.Context, keeperName string, keeperCfg secretv1beta1.KeeperConfig, sv *secretv1beta1.SecureValue, actorUID string) (*secretv1beta1.SecureValue, error) {
if keeperName == "" {
return nil, fmt.Errorf("keeper name is required, got empty string")
}
if err := s.secureValueMutator.Mutate(sv, admission.Create); err != nil {
return nil, err
}
@@ -168,25 +177,21 @@ func (s *SecureValueService) createNewVersion(ctx context.Context, sv *secretv1b
return nil, contracts.NewErrValidateSecureValue(errorList)
}
createdSv, err := s.secureValueMetadataStorage.Create(ctx, sv, actorUID)
createdSv, err := s.secureValueMetadataStorage.Create(ctx, keeperName, sv, actorUID)
if err != nil {
return nil, fmt.Errorf("creating secure value: %w", err)
}
createdSv.Status = secretv1beta1.SecureValueStatus{
Version: createdSv.Status.Version,
}
// TODO: does this need to be for update?
keeperCfg, err := s.keeperMetadataStorage.GetKeeperConfig(ctx, createdSv.Namespace, createdSv.Spec.Keeper, contracts.ReadOpts{ForUpdate: true})
if err != nil {
return nil, fmt.Errorf("fetching keeper config: namespace=%+v keeperName=%+v %w", createdSv.Namespace, createdSv.Spec.Keeper, err)
Keeper: keeperName,
}
keeper, err := s.keeperService.KeeperForConfig(keeperCfg)
if err != nil {
return nil, fmt.Errorf("getting keeper for config: namespace=%+v keeperName=%+v %w", createdSv.Namespace, createdSv.Spec.Keeper, err)
return nil, fmt.Errorf("getting keeper for config: namespace=%+v keeperName=%+v %w", createdSv.Namespace, keeperName, err)
}
logging.FromContext(ctx).Debug("retrieved keeper", "namespace", createdSv.Namespace, "keeperName", createdSv.Spec.Keeper, "type", keeperCfg.Type())
logging.FromContext(ctx).Debug("retrieved keeper", "namespace", createdSv.Namespace, "type", keeperCfg.Type())
// TODO: can we stop using external id?
// TODO: store uses only the namespace and returns and id. It could be a kv instead.
@@ -364,3 +369,10 @@ func (s *SecureValueService) Delete(ctx context.Context, namespace xkube.Namespa
return sv, nil
}
func (s *SecureValueService) SetKeeperAsActive(ctx context.Context, namespace xkube.Namespace, name string) error {
if err := s.keeperMetadataStorage.SetAsActive(ctx, namespace, name); err != nil {
return fmt.Errorf("calling keeper metadata storage to set keeper as active: %w", err)
}
return nil
}
@@ -41,6 +41,10 @@ func (v *keeperValidator) Validate(keeper *secretv1beta1.Keeper, oldKeeper *secr
return errs
}
if keeper.Name == contracts.SystemKeeperName {
errs = append(errs, field.Forbidden(field.NewPath("name"), "the keeper name `system` is reserved"))
}
if keeper.Spec.Description == "" {
errs = append(errs, field.Required(field.NewPath("spec", "description"), "a `description` is required"))
}
@@ -35,36 +35,6 @@ func TestValidateKeeper(t *testing.T) {
})
})
t.Run("only one `keeper` must be present", func(t *testing.T) {
keeper := &secretv1beta1.Keeper{
ObjectMeta: objectMeta,
Spec: secretv1beta1.KeeperSpec{
Description: "short description",
Aws: &secretv1beta1.KeeperAWSConfig{},
Azure: &secretv1beta1.KeeperAzureConfig{},
Gcp: &secretv1beta1.KeeperGCPConfig{},
HashiCorpVault: &secretv1beta1.KeeperHashiCorpConfig{},
},
}
errs := validator.Validate(keeper, nil, admission.Create)
require.Len(t, errs, 1)
require.Equal(t, "spec", errs[0].Field)
})
t.Run("at least one `keeper` must be present", func(t *testing.T) {
keeper := &secretv1beta1.Keeper{
ObjectMeta: objectMeta,
Spec: secretv1beta1.KeeperSpec{
Description: "description",
},
}
errs := validator.Validate(keeper, nil, admission.Create)
require.Len(t, errs, 1)
require.Equal(t, "spec", errs[0].Field)
})
t.Run("aws keeper validation", func(t *testing.T) {
validKeeperAWS := &secretv1beta1.Keeper{
ObjectMeta: objectMeta,
@@ -341,4 +311,27 @@ func TestValidateKeeper(t *testing.T) {
require.Len(t, errs, 1)
require.Equal(t, "metadata.namespace", errs[0].Field)
})
t.Run("keeper name `system` is reserved", func(t *testing.T) {
keeper := &secretv1beta1.Keeper{
ObjectMeta: metav1.ObjectMeta{
Name: "system",
Namespace: "ns1",
},
Spec: secretv1beta1.KeeperSpec{
Description: "description",
HashiCorpVault: &secretv1beta1.KeeperHashiCorpConfig{
Address: "http://address",
Token: secretv1beta1.KeeperCredentialValue{
ValueFromConfig: "config.path.value",
},
},
},
}
errs := validator.Validate(keeper, nil, admission.Create)
require.Len(t, errs, 1)
require.Equal(t, "name", errs[0].Field)
require.Equal(t, "the keeper name `system` is reserved", errs[0].Detail)
})
}
@@ -110,11 +110,6 @@ func validateSecureValueUpdate(sv, oldSv *secretv1beta1.SecureValue) field.Error
}
}
// Keeper cannot be changed.
if sv.Spec.Keeper != oldSv.Spec.Keeper {
errs = append(errs, field.Forbidden(field.NewPath("spec"), "the `keeper` cannot be changed"))
}
return errs
}
@@ -25,9 +25,9 @@ func TestValidateSecureValue(t *testing.T) {
Spec: secretv1beta1.SecureValueSpec{
Description: "description",
Value: ptr.To(secretv1beta1.NewExposedSecureValue("value")),
Keeper: &keeper,
Decrypters: []string{"app1", "app2"},
},
Status: secretv1beta1.SecureValueStatus{Keeper: keeper},
}
t.Run("the `description` must be present", func(t *testing.T) {
@@ -182,28 +182,6 @@ func TestValidateSecureValue(t *testing.T) {
require.Len(t, errs, 1)
require.Equal(t, "spec", errs[0].Field)
})
t.Run("when trying to change the `keeper`, it returns an error", func(t *testing.T) {
keeperA := "a-keeper"
keeperAnother := "another-keeper"
oldSv := &secretv1beta1.SecureValue{
ObjectMeta: objectMeta,
Spec: secretv1beta1.SecureValueSpec{
Keeper: &keeperA,
},
}
sv := &secretv1beta1.SecureValue{
ObjectMeta: objectMeta,
Spec: secretv1beta1.SecureValueSpec{
Keeper: &keeperAnother,
},
}
errs := validator.Validate(sv, oldSv, admission.Update)
require.Len(t, errs, 1)
require.Equal(t, "spec", errs[0].Field)
})
})
t.Run("`decrypters` must have unique items", func(t *testing.T) {
@@ -8,6 +8,7 @@ import (
"net"
"net/http"
"strconv"
"strings"
"sync"
"testing"
"time"
@@ -175,6 +176,28 @@ func TestIntegrationDistributor(t *testing.T) {
}
})
t.Run("RebuildIndexes", func(t *testing.T) {
instanceResponseCount := make(map[string]int)
// simulate RebuildIndexes for a single namespace
testNamespace := testNamespaces[0]
req := &resourcepb.RebuildIndexesRequest{
Namespace: testNamespace,
Keys: []*resourcepb.ResourceKey{{
Namespace: testNamespace,
Group: "folder.grafana.app",
Resource: "folders",
}},
}
distributorRes := getDistributorResponse(t, req, distributorServer.resourceClient.RebuildIndexes, instanceResponseCount)
require.Nil(t, distributorRes.Error)
// assert all instances got the response by looking at the merged details
count := strings.Count(distributorRes.Details, "{instance:")
require.Equal(t, len(testServers), count)
})
var wg sync.WaitGroup
for _, testServer := range testServers {
wg.Add(1)
+4 -4
View File
@@ -400,7 +400,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
prometheusService := prometheus.ProvideService(httpclientProvider)
tempoService := tempo.ProvideService(httpclientProvider, tracer)
testdatasourceService := testdatasource.ProvideService()
postgresService := postgres.ProvideService(cfg)
postgresService := postgres.ProvideService()
mysqlService := mysql.ProvideService()
mssqlService := mssql.ProvideService(cfg)
entityEventsService := store.ProvideEntityEventsService(cfg, sqlStore, featureToggles)
@@ -534,7 +534,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
}
migrationDashboardAccessor := legacy.ProvideMigratorDashboardAccessor(legacyDatabaseProvider, stubProvisioningService, accessControl, featureToggles)
unifiedMigrator := migrations2.ProvideUnifiedMigrator(migrationDashboardAccessor, resourceClient)
unifiedStorageMigrationService := migrations2.ProvideUnifiedStorageMigrationService(unifiedMigrator, cfg, sqlStore, kvStore)
unifiedStorageMigrationService := migrations2.ProvideUnifiedStorageMigrationService(unifiedMigrator, cfg, sqlStore, kvStore, resourceClient)
dualwriteService, err := dualwrite.ProvideService(featureToggles, kvStore, cfg, unifiedStorageMigrationService)
if err != nil {
return nil, err
@@ -1047,7 +1047,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
prometheusService := prometheus.ProvideService(httpclientProvider)
tempoService := tempo.ProvideService(httpclientProvider, tracer)
testdatasourceService := testdatasource.ProvideService()
postgresService := postgres.ProvideService(cfg)
postgresService := postgres.ProvideService()
mysqlService := mysql.ProvideService()
mssqlService := mssql.ProvideService(cfg)
entityEventsService := store.ProvideEntityEventsService(cfg, sqlStore, featureToggles)
@@ -1181,7 +1181,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
}
migrationDashboardAccessor := legacy.ProvideMigratorDashboardAccessor(legacyDatabaseProvider, stubProvisioningService, accessControl, featureToggles)
unifiedMigrator := migrations2.ProvideUnifiedMigrator(migrationDashboardAccessor, resourceClient)
unifiedStorageMigrationService := migrations2.ProvideUnifiedStorageMigrationService(unifiedMigrator, cfg, sqlStore, kvStore)
unifiedStorageMigrationService := migrations2.ProvideUnifiedStorageMigrationService(unifiedMigrator, cfg, sqlStore, kvStore, resourceClient)
dualwriteService, err := dualwrite.ProvideService(featureToggles, kvStore, cfg, unifiedStorageMigrationService)
if err != nil {
return nil, err
@@ -741,6 +741,7 @@ func (s *Service) SaveExternalServiceRole(ctx context.Context, cmd accesscontrol
ctx, span := tracer.Start(ctx, "accesscontrol.acimpl.SaveExternalServiceRole")
defer span.End()
//nolint:staticcheck // not yet migrated to OpenFeature
if !s.cfg.ManagedServiceAccountsEnabled || !s.features.IsEnabled(ctx, featuremgmt.FlagExternalServiceAccounts) {
s.log.Debug("Registering an external service role is behind a feature flag, enable it to use this feature.")
return nil
@@ -757,6 +758,7 @@ func (s *Service) DeleteExternalServiceRole(ctx context.Context, externalService
ctx, span := tracer.Start(ctx, "accesscontrol.acimpl.DeleteExternalServiceRole")
defer span.End()
//nolint:staticcheck // not yet migrated to OpenFeature
if !s.cfg.ManagedServiceAccountsEnabled || !s.features.IsEnabled(ctx, featuremgmt.FlagExternalServiceAccounts) {
s.log.Debug("Deleting an external service role is behind a feature flag, enable it to use this feature.")
return nil
@@ -28,6 +28,7 @@ var DashboardEditActions = append(DashboardViewActions, []string{dashboards.Acti
var DashboardAdminActions = append(DashboardEditActions, []string{dashboards.ActionDashboardsPermissionsRead, dashboards.ActionDashboardsPermissionsWrite}...)
func getDashboardViewActions(features featuremgmt.FeatureToggles) []string {
//nolint:staticcheck // not yet migrated to OpenFeature
if features.IsEnabled(context.Background(), featuremgmt.FlagAnnotationPermissionUpdate) {
return append(DashboardViewActions, accesscontrol.ActionAnnotationsRead)
}
@@ -35,6 +36,7 @@ func getDashboardViewActions(features featuremgmt.FeatureToggles) []string {
}
func getDashboardEditActions(features featuremgmt.FeatureToggles) []string {
//nolint:staticcheck // not yet migrated to OpenFeature
if features.IsEnabled(context.Background(), featuremgmt.FlagAnnotationPermissionUpdate) {
return append(DashboardEditActions, []string{accesscontrol.ActionAnnotationsRead, accesscontrol.ActionAnnotationsWrite, accesscontrol.ActionAnnotationsDelete, accesscontrol.ActionAnnotationsCreate}...)
}
@@ -42,6 +44,7 @@ func getDashboardEditActions(features featuremgmt.FeatureToggles) []string {
}
func getDashboardAdminActions(features featuremgmt.FeatureToggles) []string {
//nolint:staticcheck // not yet migrated to OpenFeature
if features.IsEnabled(context.Background(), featuremgmt.FlagAnnotationPermissionUpdate) {
return append(DashboardAdminActions, []string{accesscontrol.ActionAnnotationsRead, accesscontrol.ActionAnnotationsWrite, accesscontrol.ActionAnnotationsDelete, accesscontrol.ActionAnnotationsCreate}...)
}
@@ -61,6 +61,7 @@ func (authz *AuthService) Authorize(ctx context.Context, query annotations.ItemQ
scopeTypes := annotationScopeTypes(scopes)
_, canAccessOrgAnnotations := scopeTypes[annotations.Organization.String()]
_, canAccessDashAnnotations := scopeTypes[annotations.Dashboard.String()]
//nolint:staticcheck // not yet migrated to OpenFeature
if authz.features.IsEnabled(ctx, featuremgmt.FlagAnnotationPermissionUpdate) {
canAccessDashAnnotations = true
}
@@ -122,6 +123,7 @@ func (authz *AuthService) dashboardsWithVisibleAnnotations(ctx context.Context,
}
filterType := searchstore.TypeDashboard
//nolint:staticcheck // not yet migrated to OpenFeature
if authz.features.IsEnabled(ctx, featuremgmt.FlagAnnotationPermissionUpdate) {
filterType = searchstore.TypeAnnotation
}
@@ -3,6 +3,7 @@ package authorizer
import (
"context"
"fmt"
"slices"
"k8s.io/apiserver/pkg/authorization/authorizer"
@@ -12,6 +13,10 @@ import (
var _ authorizer.Authorizer = &roleAuthorizer{}
var orgRoleNoneAsViewerAPIGroups = []string{
"productactivation.ext.grafana.com",
}
type roleAuthorizer struct{}
func newRoleAuthorizer() *roleAuthorizer {
@@ -43,6 +48,16 @@ func (auth roleAuthorizer) Authorize(ctx context.Context, a authorizer.Attribute
return authorizer.DecisionDeny, errorMessageForGrafanaOrgRole(orgRole, a), nil
}
case org.RoleNone:
// HOTFIX: granting Viewer actions to None roles to a fixed group of APIs,
// while we work on a proper fix.
if slices.Contains(orgRoleNoneAsViewerAPIGroups, a.GetAPIGroup()) {
switch a.GetVerb() {
case "get", "list", "watch":
return authorizer.DecisionAllow, "", nil
default:
return authorizer.DecisionDeny, errorMessageForGrafanaOrgRole(orgRole, a), nil
}
}
return authorizer.DecisionDeny, errorMessageForGrafanaOrgRole(orgRole, a), nil
}
return authorizer.DecisionDeny, "", nil
@@ -83,6 +83,7 @@ func ProvideRegistration(
}
}
//nolint:staticcheck // not yet migrated to OpenFeature
if cfg.PasswordlessMagicLinkAuth.Enabled && features.IsEnabled(context.Background(), featuremgmt.FlagPasswordlessMagicLinkAuthentication) {
hasEnabledProviders := authnSvc.IsClientEnabled(authn.ClientSAML) || authnSvc.IsClientEnabled(authn.ClientLDAP)
if !hasEnabledProviders {
+1
View File
@@ -210,6 +210,7 @@ func (c *CachingServiceClient) WithQueryDataCaching(ctx context.Context, req *ba
// Update the query cache with the result for this metrics request
if err == nil && cr.UpdateCacheFn != nil {
// If AWS async caching is not enabled, use the old code path
//nolint:staticcheck // not yet migrated to OpenFeature
if c.features == nil || !c.features.IsEnabled(ctx, featuremgmt.FlagAwsAsyncQueryCaching) {
cr.UpdateCacheFn(ctx, resp)
} else if reqCtx != nil {
+28 -20
View File
@@ -207,6 +207,33 @@ type AuthHTTPHeaderList struct {
Items []string
}
func GetAuthHTTPHeaders(jwtAuth *setting.AuthJWTSettings, authProxy *setting.AuthProxySettings) []string {
var items []string
// used by basic auth, api keys and potentially jwt auth
items = append(items, "Authorization")
// remove X-Grafana-Device-Id as it is only used for auth in authn clients.
items = append(items, "X-Grafana-Device-Id")
// if jwt is enabled we add it to the list. We can ignore in case it is set to Authorization
if jwtAuth.Enabled && jwtAuth.HeaderName != "" && jwtAuth.HeaderName != "Authorization" {
items = append(items, jwtAuth.HeaderName)
}
// if auth proxy is enabled add the main proxy header and all configured headers
if authProxy.Enabled {
items = append(items, authProxy.HeaderName)
for _, header := range authProxy.Headers {
if header != "" {
items = append(items, header)
}
}
}
return items
}
// WithAuthHTTPHeaders returns a new context in which all possible configured auth header will be included
// and later retrievable by AuthHTTPHeaderListFromContext.
func WithAuthHTTPHeaders(ctx context.Context, cfg *setting.Cfg) context.Context {
@@ -217,26 +244,7 @@ func WithAuthHTTPHeaders(ctx context.Context, cfg *setting.Cfg) context.Context
}
}
// used by basic auth, api keys and potentially jwt auth
list.Items = append(list.Items, "Authorization")
// remove X-Grafana-Device-Id as it is only used for auth in authn clients.
list.Items = append(list.Items, "X-Grafana-Device-Id")
// if jwt is enabled we add it to the list. We can ignore in case it is set to Authorization
if cfg.JWTAuth.Enabled && cfg.JWTAuth.HeaderName != "" && cfg.JWTAuth.HeaderName != "Authorization" {
list.Items = append(list.Items, cfg.JWTAuth.HeaderName)
}
// if auth proxy is enabled add the main proxy header and all configured headers
if cfg.AuthProxy.Enabled {
list.Items = append(list.Items, cfg.AuthProxy.HeaderName)
for _, header := range cfg.AuthProxy.Headers {
if header != "" {
list.Items = append(list.Items, header)
}
}
}
list.Items = append(list.Items, GetAuthHTTPHeaders(&cfg.JWTAuth, &cfg.AuthProxy)...)
return context.WithValue(ctx, authHTTPHeaderListKey, list)
}
+7 -1
View File
@@ -10,6 +10,10 @@ import (
type FeatureToggles interface {
// IsEnabled checks if a feature is enabled for a given context.
// The settings may be per user, tenant, or globally set in the cloud
//
// Deprecated: FeatureToggles.IsEnabled is deprecated and will be removed in a future release.
// Evaluate with OpenFeature instead (see [github.com/open-feature/go-sdk/openfeature.Client]), for example:
// openfeature.NewDefaultClient().Boolean(ctx, "your-flag", false, openfeature.TransactionContext(ctx))
IsEnabled(ctx context.Context, flag string) bool
// IsEnabledGlobally checks if a flag is configured globally. For now, this is the same
@@ -19,7 +23,9 @@ type FeatureToggles interface {
// a full server restart for a change to take place.
//
// Deprecated: FeatureToggles.IsEnabledGlobally is deprecated and will be removed in a future release.
// Evaluate with OpenFeature instead (see [github.com/open-feature/go-sdk/openfeature.Client])
// Toggles that must be reliably evaluated at the service startup should be
// changed to settings (see setting.StartupSettings), and/or removed entirely.
// For app registration please use `grafana-apiserver.runtime_config` in settings.ini
IsEnabledGlobally(flag string) bool
// Get the enabled flags -- this *may* also include disabled flags (with value false)
+15 -7
View File
@@ -586,6 +586,13 @@ var (
FrontendOnly: true,
Owner: grafanaDashboardsSquad,
},
{
Name: "perPanelNonApplicableDrilldowns",
Description: "Enables viewing non-applicable drilldowns on a panel level",
Stage: FeatureStageExperimental,
FrontendOnly: true,
Owner: grafanaDashboardsSquad,
},
{
Name: "panelFilterVariable",
Description: "Enables use of the `systemPanelFilterVar` variable to filter panels in a dashboard",
@@ -983,7 +990,7 @@ var (
},
{
Name: "exploreLogsShardSplitting",
Description: "Used in Logs Drilldown to split queries into multiple queries based on the number of shards",
Description: "Deprecated. Replace with lokiShardSplitting. Used in Logs Drilldown to split queries into multiple queries based on the number of shards",
Stage: FeatureStageExperimental,
FrontendOnly: true,
Owner: grafanaObservabilityLogsSquad,
@@ -1511,12 +1518,6 @@ var (
FrontendOnly: true,
Expression: "true",
},
{
Name: "postgresDSUsePGX",
Description: "Enables using PGX instead of libpq for PostgreSQL datasource",
Stage: FeatureStageExperimental,
Owner: grafanaOSSBigTent,
},
{
Name: "tempoAlerting",
Description: "Enables creating alerts from Tempo data source",
@@ -1938,6 +1939,13 @@ var (
FrontendOnly: true,
Owner: grafanaPluginsPlatformSquad,
},
{
Name: "lokiQueryLimitsContext",
Description: "Send X-Loki-Query-Limits-Context header to Loki on first split request",
Stage: FeatureStageExperimental,
FrontendOnly: true,
Owner: grafanaObservabilityLogsSquad,
},
{
Name: "rudderstackUpgrade",
Description: "Enables the new version of rudderstack",
+2 -1
View File
@@ -81,6 +81,7 @@ dashboardScene,GA,@grafana/dashboards-squad,false,false,true
dashboardNewLayouts,experimental,@grafana/dashboards-squad,false,false,false
dashboardUndoRedo,experimental,@grafana/dashboards-squad,false,false,true
unlimitedLayoutsNesting,experimental,@grafana/dashboards-squad,false,false,true
perPanelNonApplicableDrilldowns,experimental,@grafana/dashboards-squad,false,false,true
panelFilterVariable,experimental,@grafana/dashboards-squad,false,false,true
pdfTables,preview,@grafana/grafana-operator-experience-squad,false,false,false
canvasPanelPanZoom,preview,@grafana/dataviz-squad,false,false,true
@@ -207,7 +208,6 @@ unifiedNavbars,GA,@grafana/plugins-platform-backend,false,false,true
logsPanelControls,preview,@grafana/observability-logs,false,false,true
metricsFromProfiles,experimental,@grafana/observability-traces-and-profiling,false,false,true
grafanaAssistantInProfilesDrilldown,GA,@grafana/observability-traces-and-profiling,false,false,true
postgresDSUsePGX,experimental,@grafana/oss-big-tent,false,false,false
tempoAlerting,experimental,@grafana/observability-traces-and-profiling,false,false,false
pluginsAutoUpdate,experimental,@grafana/plugins-platform-backend,false,false,false
alertingListViewV2PreviewToggle,privatePreview,@grafana/alerting-squad,false,false,true
@@ -263,4 +263,5 @@ kubernetesAnnotations,experimental,@grafana/grafana-backend-services-squad,false
awsDatasourcesHttpProxy,experimental,@grafana/aws-datasources,false,false,false
transformationsEmptyPlaceholder,preview,@grafana/datapro,false,false,true
ttlPluginInstanceManager,experimental,@grafana/plugins-platform-backend,false,false,true
lokiQueryLimitsContext,experimental,@grafana/observability-logs,false,false,true
rudderstackUpgrade,experimental,@grafana/grafana-frontend-platform,false,false,true
1 Name Stage Owner requiresDevMode RequiresRestart FrontendOnly
81 dashboardNewLayouts experimental @grafana/dashboards-squad false false false
82 dashboardUndoRedo experimental @grafana/dashboards-squad false false true
83 unlimitedLayoutsNesting experimental @grafana/dashboards-squad false false true
84 perPanelNonApplicableDrilldowns experimental @grafana/dashboards-squad false false true
85 panelFilterVariable experimental @grafana/dashboards-squad false false true
86 pdfTables preview @grafana/grafana-operator-experience-squad false false false
87 canvasPanelPanZoom preview @grafana/dataviz-squad false false true
208 logsPanelControls preview @grafana/observability-logs false false true
209 metricsFromProfiles experimental @grafana/observability-traces-and-profiling false false true
210 grafanaAssistantInProfilesDrilldown GA @grafana/observability-traces-and-profiling false false true
postgresDSUsePGX experimental @grafana/oss-big-tent false false false
211 tempoAlerting experimental @grafana/observability-traces-and-profiling false false false
212 pluginsAutoUpdate experimental @grafana/plugins-platform-backend false false false
213 alertingListViewV2PreviewToggle privatePreview @grafana/alerting-squad false false true
263 awsDatasourcesHttpProxy experimental @grafana/aws-datasources false false false
264 transformationsEmptyPlaceholder preview @grafana/datapro false false true
265 ttlPluginInstanceManager experimental @grafana/plugins-platform-backend false false true
266 lokiQueryLimitsContext experimental @grafana/observability-logs false false true
267 rudderstackUpgrade experimental @grafana/grafana-frontend-platform false false true
-4
View File
@@ -606,10 +606,6 @@ const (
// use multi-tenant path for awsTempCredentials
FlagMultiTenantTempCredentials = "multiTenantTempCredentials"
// FlagPostgresDSUsePGX
// Enables using PGX instead of libpq for PostgreSQL datasource
FlagPostgresDSUsePGX = "postgresDSUsePGX"
// FlagTempoAlerting
// Enables creating alerts from Tempo data source
FlagTempoAlerting = "tempoAlerting"
+337 -1263
View File
File diff suppressed because it is too large Load Diff
+1
View File
@@ -148,6 +148,7 @@ func (l *LibraryElementService) deleteHandler(c *contextmodel.ReqContext) respon
// 404: notFoundError
// 500: internalServerError
func (l *LibraryElementService) getHandler(c *contextmodel.ReqContext) response.Response {
//nolint:staticcheck // not yet migrated to OpenFeature
if l.features.IsEnabled(c.Req.Context(), featuremgmt.FlagKubernetesLibraryPanels) {
l.k8sHandler.getK8sLibraryElement(c)
return nil // already handled in the k8s handler
@@ -44,6 +44,7 @@ func (s *ServiceImpl) getAdminNode(c *contextmodel.ReqContext) (*navtree.NavLink
Text: "Organizations", SubTitle: "Isolated instances of Grafana running on the same server", Id: "global-orgs", Url: s.cfg.AppSubURL + "/admin/orgs", Icon: "building",
})
}
//nolint:staticcheck // not yet migrated to OpenFeature
if hasAccess(cloudmigration.MigrationAssistantAccess) && s.features.IsEnabled(ctx, featuremgmt.FlagOnPremToCloudMigrations) {
generalNodeLinks = append(generalNodeLinks, &navtree.NavLink{
Text: "Migrate to Grafana Cloud",
@@ -99,6 +100,7 @@ func (s *ServiceImpl) getAdminNode(c *contextmodel.ReqContext) (*navtree.NavLink
})
}
//nolint:staticcheck // not yet migrated to OpenFeature
if (s.cfg.Env == setting.Dev) || s.features.IsEnabled(ctx, featuremgmt.FlagEnableExtensionsAdminPage) && hasAccess(pluginaccesscontrol.AdminAccessEvaluator) {
pluginsNodeLinks = append(pluginsNodeLinks, &navtree.NavLink{
Text: "Extensions",
@@ -147,6 +149,7 @@ func (s *ServiceImpl) getAdminNode(c *contextmodel.ReqContext) (*navtree.NavLink
})
}
//nolint:staticcheck // not yet migrated to OpenFeature
if s.license.FeatureEnabled("groupsync") &&
s.features.IsEnabled(ctx, featuremgmt.FlagGroupAttributeSync) &&
hasAccess(ac.EvalAny(
@@ -407,6 +407,7 @@ func (s *ServiceImpl) buildDashboardNavLinks(c *contextmodel.ReqContext) []*navt
})
}
//nolint:staticcheck // not yet migrated to OpenFeature
if s.features.IsEnabled(c.Req.Context(), featuremgmt.FlagRestoreDashboards) && (c.GetOrgRole() == org.RoleAdmin || c.IsGrafanaAdmin) {
dashboardChildNavs = append(dashboardChildNavs, &navtree.NavLink{
Text: "Recently deleted",
@@ -435,6 +436,7 @@ func (s *ServiceImpl) buildAlertNavLinks(c *contextmodel.ReqContext) *navtree.Na
hasAccess := ac.HasAccess(s.accessControl, c)
var alertChildNavs []*navtree.NavLink
//nolint:staticcheck // not yet migrated to OpenFeature
if s.features.IsEnabled(c.Req.Context(), featuremgmt.FlagAlertingTriage) {
if hasAccess(ac.EvalAny(ac.EvalPermission(ac.ActionAlertingRuleRead), ac.EvalPermission(ac.ActionAlertingRuleExternalRead))) {
alertChildNavs = append(alertChildNavs, &navtree.NavLink{
@@ -492,6 +494,7 @@ func (s *ServiceImpl) buildAlertNavLinks(c *contextmodel.ReqContext) *navtree.Na
alertChildNavs = append(alertChildNavs, &navtree.NavLink{Text: "Alert groups", SubTitle: "See grouped alerts with active notifications", Id: "groups", Url: s.cfg.AppSubURL + "/alerting/groups", Icon: "layer-group"})
}
//nolint:staticcheck // not yet migrated to OpenFeature
if s.features.IsEnabled(c.Req.Context(), featuremgmt.FlagAlertingCentralAlertHistory) {
if hasAccess(ac.EvalAny(ac.EvalPermission(ac.ActionAlertingRuleRead))) {
alertChildNavs = append(alertChildNavs, &navtree.NavLink{
@@ -503,6 +506,7 @@ func (s *ServiceImpl) buildAlertNavLinks(c *contextmodel.ReqContext) *navtree.Na
})
}
}
//nolint:staticcheck // not yet migrated to OpenFeature
if c.GetOrgRole() == org.RoleAdmin && s.features.IsEnabled(c.Req.Context(), featuremgmt.FlagAlertRuleRestore) && s.features.IsEnabled(c.Req.Context(), featuremgmt.FlagAlertingRuleRecoverDeleted) {
alertChildNavs = append(alertChildNavs, &navtree.NavLink{
Text: "Recently deleted",
@@ -77,6 +77,7 @@ func (srv ConfigSrv) RoutePostNGalertConfig(c *contextmodel.ReqContext, body api
return response.Error(http.StatusBadRequest, "Invalid alertmanager choice specified", err)
}
//nolint:staticcheck // not yet migrated to OpenFeature
disableExternal := srv.featureManager.IsEnabled(c.Req.Context(), featuremgmt.FlagAlertingDisableSendAlertsExternal)
if disableExternal && sendAlertsTo != ngmodels.InternalAlertmanager {
return response.Error(http.StatusBadRequest, "Sending alerts to external alertmanagers is disallowed on this instance", err)
+3
View File
@@ -79,6 +79,7 @@ func (srv TestingApiSrv) RouteTestGrafanaRuleConfig(c *contextmodel.ReqContext,
return response.ErrOrFallback(http.StatusInternalServerError, "failed to authorize access to rule group", err)
}
//nolint:staticcheck // not yet migrated to OpenFeature
if srv.featureManager.IsEnabled(c.Req.Context(), featuremgmt.FlagAlertingQueryOptimization) {
if _, err := store.OptimizeAlertQueries(rule.Data); err != nil {
return ErrResp(http.StatusInternalServerError, err, "Failed to optimize query")
@@ -178,6 +179,7 @@ func (srv TestingApiSrv) RouteEvalQueries(c *contextmodel.ReqContext, cmd apimod
}
var optimizations []store.Optimization
//nolint:staticcheck // not yet migrated to OpenFeature
if srv.featureManager.IsEnabled(c.Req.Context(), featuremgmt.FlagAlertingQueryOptimization) {
var err error
optimizations, err = store.OptimizeAlertQueries(cond.Data)
@@ -223,6 +225,7 @@ func addOptimizedQueryWarnings(evalResults *backend.QueryDataResponse, optimizat
}
func (srv TestingApiSrv) BacktestAlertRule(c *contextmodel.ReqContext, cmd apimodels.BacktestConfig) response.Response {
//nolint:staticcheck // not yet migrated to OpenFeature
if !srv.featureManager.IsEnabled(c.Req.Context(), featuremgmt.FlagAlertingBacktesting) {
return ErrResp(http.StatusNotFound, nil, "Backgtesting API is not enabled")
}
@@ -6,6 +6,7 @@ import (
"encoding/json"
"errors"
"fmt"
"maps"
"net/url"
"slices"
"sort"
@@ -26,6 +27,9 @@ import (
ngmodels "github.com/grafana/grafana/pkg/services/ngalert/models"
"github.com/grafana/grafana/pkg/services/ngalert/state"
"github.com/grafana/grafana/pkg/util"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
)
type RuleStoreReader interface {
@@ -54,6 +58,9 @@ type PrometheusSrv struct {
provenanceStore ProvenanceStore
}
// Package-level OpenTelemetry tracer per Grafana instrumentation conventions.
var tracer = otel.Tracer("github.com/grafana/grafana/pkg/services/ngalert/api/prometheus")
func NewPrometheusSrv(log log.Logger, manager state.AlertInstanceManager, status StatusReader, store RuleStoreReader, authz RuleGroupAccessControlService, provenanceStore ProvenanceStore) *PrometheusSrv {
return &PrometheusSrv{
log,
@@ -219,6 +226,14 @@ func GetStatesFromQuery(v url.Values) (map[eval.State]struct{}, error) {
return states, nil
}
func MapStateSetToStrings(stateSet map[eval.State]struct{}) []string {
states := make([]string, 0, len(stateSet))
for state := range stateSet {
states = append(states, state.String())
}
return states
}
func GetHealthFromQuery(v url.Values) (map[string]struct{}, error) {
health := make(map[string]struct{})
for _, s := range v["health"] {
@@ -252,6 +267,13 @@ func (srv PrometheusSrv) RouteGetRuleStatuses(c *contextmodel.ReqContext) respon
// As we are using req.Form directly, this triggers a call to ParseForm() if needed.
c.Query("")
ctx, span := tracer.Start(c.Req.Context(), "api.prometheus.RouteGetRuleStatuses")
defer span.End()
// Propagate the new context so child spans can attach to it.
c.Req = c.Req.WithContext(ctx)
orgID := c.GetOrgID()
span.SetAttributes(attribute.Int64("org_id", orgID))
ruleResponse := apimodels.RuleResponse{
DiscoveryBase: apimodels.DiscoveryBase{
Status: "success",
@@ -261,13 +283,14 @@ func (srv PrometheusSrv) RouteGetRuleStatuses(c *contextmodel.ReqContext) respon
},
}
namespaceMap, err := srv.store.GetUserVisibleNamespaces(c.Req.Context(), c.GetOrgID(), c.SignedInUser)
namespaceMap, err := srv.store.GetUserVisibleNamespaces(c.Req.Context(), orgID, c.SignedInUser)
if err != nil {
ruleResponse.Status = "error"
ruleResponse.Error = fmt.Sprintf("failed to get namespaces visible to the user: %s", err.Error())
ruleResponse.ErrorType = apiv1.ErrServer
return response.JSON(ruleResponse.HTTPStatusCode(), ruleResponse)
}
span.AddEvent("User visible namespaces retrieved")
allowedNamespaces := map[string]string{}
for namespaceUID, folder := range namespaceMap {
@@ -283,6 +306,8 @@ func (srv PrometheusSrv) RouteGetRuleStatuses(c *contextmodel.ReqContext) respon
allowedNamespaces[namespaceUID] = folder.Fullpath
}
}
span.AddEvent("User permissions checked")
span.SetAttributes(attribute.Int("allowedNamespaces", len(allowedNamespaces)))
provenanceRecords, err := srv.provenanceStore.GetProvenances(c.Req.Context(), c.GetOrgID(), (&ngmodels.AlertRule{}).ResourceType())
if err != nil {
@@ -297,7 +322,7 @@ func (srv PrometheusSrv) RouteGetRuleStatuses(c *contextmodel.ReqContext) respon
srv.store,
RuleGroupStatusesOptions{
Ctx: c.Req.Context(),
OrgID: c.OrgID,
OrgID: orgID,
Query: c.Req.Form,
AllowedNamespaces: allowedNamespaces,
},
@@ -405,6 +430,10 @@ func RuleAlertStateMutatorGenerator(manager state.AlertInstanceManager) RuleAler
}
func PrepareRuleGroupStatusesV2(log log.Logger, store ListAlertRulesStoreV2, opts RuleGroupStatusesOptions, ruleStatusMutator RuleStatusMutator, alertStateMutator RuleAlertStateMutator, provenanceRecords map[string]ngmodels.Provenance) apimodels.RuleResponse {
ctx, span := tracer.Start(opts.Ctx, "api.prometheus.PrepareRuleGroupStatusesV2")
defer span.End()
opts.Ctx = ctx
ruleResponse := apimodels.RuleResponse{
DiscoveryBase: apimodels.DiscoveryBase{
Status: "success",
@@ -428,9 +457,17 @@ func PrepareRuleGroupStatusesV2(log log.Logger, store ListAlertRulesStoreV2, opt
ruleResponse.ErrorType = apiv1.ErrBadData
return ruleResponse
}
span.SetAttributes(
attribute.String("dashboard_uid", dashboardUID),
attribute.Int64("panel_id", panelID),
)
limitRulesPerGroup := getInt64WithDefault(opts.Query, "limit_rules", -1)
limitAlertsPerRule := getInt64WithDefault(opts.Query, "limit_alerts", -1)
span.SetAttributes(
attribute.Int64("limit_rules", limitRulesPerGroup),
attribute.Int64("limit_alerts", limitAlertsPerRule),
)
matchers, err := getMatchersFromQuery(opts.Query)
if err != nil {
ruleResponse.Status = "error"
@@ -438,6 +475,8 @@ func PrepareRuleGroupStatusesV2(log log.Logger, store ListAlertRulesStoreV2, opt
ruleResponse.ErrorType = apiv1.ErrBadData
return ruleResponse
}
span.SetAttributes(attribute.Int("matcher_count", len(matchers)))
stateFilterSet, err := GetStatesFromQuery(opts.Query)
if err != nil {
ruleResponse.Status = "error"
@@ -445,6 +484,10 @@ func PrepareRuleGroupStatusesV2(log log.Logger, store ListAlertRulesStoreV2, opt
ruleResponse.ErrorType = apiv1.ErrBadData
return ruleResponse
}
span.SetAttributes(
attribute.Int("state_filter_count", len(stateFilterSet)),
attribute.StringSlice("state_filter", MapStateSetToStrings(stateFilterSet)),
)
healthFilterSet, err := GetHealthFromQuery(opts.Query)
if err != nil {
@@ -453,11 +496,18 @@ func PrepareRuleGroupStatusesV2(log log.Logger, store ListAlertRulesStoreV2, opt
ruleResponse.ErrorType = apiv1.ErrBadData
return ruleResponse
}
span.SetAttributes(
attribute.Int("health_filter_count", len(healthFilterSet)),
attribute.StringSlice("health_filter", slices.Collect(maps.Keys(healthFilterSet))),
)
var labelOptions []ngmodels.LabelOption
if !getBoolWithDefault(opts.Query, queryIncludeInternalLabels, false) {
labelOptions = append(labelOptions, ngmodels.WithoutInternalLabels())
}
span.SetAttributes(
attribute.Bool("include_internal_labels", len(labelOptions) == 0),
)
if len(opts.AllowedNamespaces) == 0 {
log.Debug("User does not have access to any namespaces")
@@ -476,19 +526,36 @@ func PrepareRuleGroupStatusesV2(log log.Logger, store ListAlertRulesStoreV2, opt
}
}
span.SetAttributes(
attribute.Bool("folder_uid_set", folderUID != ""),
attribute.Int("namespace_count", len(namespaceUIDs)),
)
ruleGroups := opts.Query["rule_group"]
ruleUIDs := opts.Query["rule_uid"]
span.SetAttributes(
attribute.Int("rule_group_count", len(ruleGroups)),
attribute.Int("rule_uid_count", len(ruleUIDs)),
)
receiverName := opts.Query.Get("receiver_name")
span.SetAttributes(attribute.Bool("receiver_name_set", receiverName != ""))
title := opts.Query.Get("search.rule_name")
span.SetAttributes(attribute.Bool("search_rule_name_set", title != ""))
searchRuleGroup := opts.Query.Get("search.rule_group")
span.SetAttributes(attribute.Bool("search_rule_group_set", searchRuleGroup != ""))
var ruleType ngmodels.RuleTypeFilter
switch ngmodels.RuleType(opts.Query.Get("rule_type")) {
case ngmodels.RuleTypeAlerting:
ruleType = ngmodels.RuleTypeFilterAlerting
span.SetAttributes(attribute.Bool("alerting_only", true))
case ngmodels.RuleTypeRecording:
ruleType = ngmodels.RuleTypeFilterRecording
span.SetAttributes(attribute.Bool("recording_only", true))
default:
ruleType = ngmodels.RuleTypeFilterAll
}
@@ -507,11 +574,23 @@ func PrepareRuleGroupStatusesV2(log log.Logger, store ListAlertRulesStoreV2, opt
maxGroups := getInt64WithDefault(opts.Query, "group_limit", -1)
maxRules := getInt64WithDefault(opts.Query, "rule_limit", -1)
nextToken := opts.Query.Get("group_next_token")
span.SetAttributes(
attribute.Int64("group_limit", maxGroups),
attribute.Int64("rule_limit", maxRules),
attribute.Bool("group_next_token_set", nextToken != ""),
)
if maxGroups == 0 || maxRules == 0 {
return ruleResponse
}
ruleNames := opts.Query["rule_name"]
ruleNamesSet := make(map[string]struct{}, len(ruleNames))
for _, rn := range ruleNames {
ruleNamesSet[rn] = struct{}{}
}
span.SetAttributes(attribute.Int("rule_name_count", len(ruleNamesSet)))
byGroupQuery := ngmodels.ListAlertRulesExtendedQuery{
ListAlertRulesQuery: ngmodels.ListAlertRulesQuery{
OrgID: opts.OrgID,
@@ -536,12 +615,11 @@ func PrepareRuleGroupStatusesV2(log log.Logger, store ListAlertRulesStoreV2, opt
ruleResponse.ErrorType = apiv1.ErrServer
return ruleResponse
}
ruleNames := opts.Query["rule_name"]
ruleNamesSet := make(map[string]struct{}, len(ruleNames))
for _, rn := range ruleNames {
ruleNamesSet[rn] = struct{}{}
}
span.SetAttributes(
attribute.Int("store_rule_list_len", len(ruleList)),
attribute.Bool("store_continue_token_set", continueToken != ""),
)
span.AddEvent("Alert rules retrieved from store")
groupedRules := getGroupedRules(log, ruleList, ruleNamesSet, opts.AllowedNamespaces)
rulesTotals := make(map[string]int64, len(groupedRules))
+4
View File
@@ -194,8 +194,11 @@ func (ng *AlertNG) init() error {
var opts []notifier.Option
moaLogger := log.New("ngalert.multiorg.alertmanager")
crypto := notifier.NewCrypto(ng.SecretsService, ng.store, moaLogger)
//nolint:staticcheck // not yet migrated to OpenFeature
remotePrimary := ng.FeatureToggles.IsEnabled(initCtx, featuremgmt.FlagAlertmanagerRemotePrimary)
//nolint:staticcheck // not yet migrated to OpenFeature
remoteSecondary := ng.FeatureToggles.IsEnabled(initCtx, featuremgmt.FlagAlertmanagerRemoteSecondary)
//nolint:staticcheck // not yet migrated to OpenFeature
remoteSecondaryWithRemoteState := ng.FeatureToggles.IsEnabled(initCtx, featuremgmt.FlagAlertmanagerRemoteSecondaryWithRemoteState)
if remotePrimary || remoteSecondary || remoteSecondaryWithRemoteState {
m := ng.Metrics.GetRemoteAlertmanagerMetrics()
@@ -717,6 +720,7 @@ func configureNotificationHistorian(
l log.Logger,
tracer tracing.Tracer,
) (nfstatus.NotificationHistorian, error) {
//nolint:staticcheck // not yet migrated to OpenFeature
if !featureToggles.IsEnabled(ctx, featuremgmt.FlagAlertingNotificationHistory) || !cfg.Enabled {
met.Info.Set(0)
return nil, nil
+1
View File
@@ -89,6 +89,7 @@ func (d *AlertsRouter) SyncAndApplyConfigFromDatabase(ctx context.Context) error
d.logger.Debug("Attempting to sync admin configs", "count", len(cfgs))
//nolint:staticcheck // not yet migrated to OpenFeature
disableExternal := d.featureManager.IsEnabled(ctx, featuremgmt.FlagAlertingDisableSendAlertsExternal)
orgsFound := make(map[int64]struct{}, len(cfgs))
+1
View File
@@ -1097,6 +1097,7 @@ func (st DBstore) GetAlertRulesForScheduling(ctx context.Context, query *ngmodel
continue
}
}
//nolint:staticcheck // not yet migrated to OpenFeature
if st.FeatureToggles.IsEnabled(ctx, featuremgmt.FlagAlertingQueryOptimization) {
if optimizations, err := OptimizeAlertQueries(converted.Data); err != nil {
st.Logger.Error("Could not migrate rule from range to instant query", "rule", rule.UID, "err", err)
@@ -6,21 +6,26 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana/pkg/services/contexthandler"
"github.com/grafana/grafana/pkg/setting"
)
// NewClearAuthHeadersMiddleware creates a new backend.HandlerMiddleware
// that will clear any outgoing HTTP headers that was part of the incoming
// HTTP request and used when authenticating to Grafana.
func NewClearAuthHeadersMiddleware() backend.HandlerMiddleware {
func NewClearAuthHeadersMiddleware(cfgJWTAuth *setting.AuthJWTSettings, cfgAuthProxy *setting.AuthProxySettings) backend.HandlerMiddleware {
return backend.HandlerMiddlewareFunc(func(next backend.Handler) backend.Handler {
return &ClearAuthHeadersMiddleware{
BaseHandler: backend.NewBaseHandler(next),
BaseHandler: backend.NewBaseHandler(next),
cfgJWTAuth: cfgJWTAuth,
cfgAuthProxy: cfgAuthProxy,
}
})
}
type ClearAuthHeadersMiddleware struct {
backend.BaseHandler
cfgJWTAuth *setting.AuthJWTSettings
cfgAuthProxy *setting.AuthProxySettings
}
func (m *ClearAuthHeadersMiddleware) clearHeaders(ctx context.Context, h backend.ForwardHTTPHeaders) {
@@ -30,11 +35,9 @@ func (m *ClearAuthHeadersMiddleware) clearHeaders(ctx context.Context, h backend
return
}
list := contexthandler.AuthHTTPHeaderListFromContext(ctx)
if list != nil {
for _, k := range list.Items {
h.DeleteHTTPHeader(k)
}
items := contexthandler.GetAuthHTTPHeaders(m.cfgJWTAuth, m.cfgAuthProxy)
for _, k := range items {
h.DeleteHTTPHeader(k)
}
}
@@ -8,7 +8,6 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/backend/handlertest"
"github.com/grafana/grafana/pkg/services/contexthandler"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/setting"
)
@@ -16,397 +15,227 @@ import (
func TestClearAuthHeadersMiddleware(t *testing.T) {
const otherHeader = "test"
t.Run("When no auth headers in reqContext", func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, "/some/thing", nil)
require.NoError(t, err)
req, err := http.NewRequest(http.MethodGet, "/some/thing", nil)
require.NoError(t, err)
req.Header.Set(otherHeader, "test")
t.Run("When requests are for a datasource", func(t *testing.T) {
cfg := setting.NewCfg()
cdt := handlertest.NewHandlerMiddlewareTest(t,
WithReqContext(req, &user.SignedInUser{}),
handlertest.WithMiddlewares(NewClearAuthHeadersMiddleware(&cfg.JWTAuth, &cfg.AuthProxy)),
)
t.Run("And requests are for a datasource", func(t *testing.T) {
cdt := handlertest.NewHandlerMiddlewareTest(t,
WithReqContext(req, &user.SignedInUser{}),
handlertest.WithMiddlewares(NewClearAuthHeadersMiddleware()),
)
pluginCtx := backend.PluginContext{
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
}
pluginCtx := backend.PluginContext{
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
}
t.Run("No auth headers to clear when calling QueryData", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.QueryData(req.Context(), &backend.QueryDataRequest{
PluginContext: pluginCtx,
Headers: map[string]string{otherHeader: "test"},
})
require.NoError(t, err)
require.NotNil(t, cdt.QueryDataReq)
require.Len(t, cdt.QueryDataReq.Headers, 1)
require.Empty(t, cdt.QueryDataReq.GetHTTPHeaders())
})
t.Run("No auth headers to clear when calling CallResource", func(t *testing.T) {
err = cdt.MiddlewareHandler.CallResource(req.Context(), &backend.CallResourceRequest{
PluginContext: pluginCtx,
Headers: map[string][]string{otherHeader: {"test"}},
}, nopCallResourceSender)
require.NoError(t, err)
require.NotNil(t, cdt.CallResourceReq)
require.Len(t, cdt.CallResourceReq.Headers, 1)
require.Equal(t, http.Header{http.CanonicalHeaderKey(otherHeader): {"test"}}, cdt.CallResourceReq.GetHTTPHeaders())
})
t.Run("No auth headers to clear when calling CheckHealth", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.CheckHealth(req.Context(), &backend.CheckHealthRequest{
PluginContext: pluginCtx,
Headers: map[string]string{otherHeader: "test"},
})
require.NoError(t, err)
require.NotNil(t, cdt.CheckHealthReq)
require.Len(t, cdt.CheckHealthReq.Headers, 1)
require.Empty(t, cdt.CheckHealthReq.GetHTTPHeaders())
})
t.Run("No auth headers to clear when calling SubscribeStream", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.SubscribeStream(req.Context(), &backend.SubscribeStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{otherHeader: "test"},
})
require.NoError(t, err)
require.NotNil(t, cdt.SubscribeStreamReq)
require.Len(t, cdt.SubscribeStreamReq.Headers, 1)
require.Empty(t, cdt.SubscribeStreamReq.GetHTTPHeaders())
})
t.Run("No auth headers to clear when calling PublishStream", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.PublishStream(req.Context(), &backend.PublishStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{otherHeader: "test"},
})
require.NoError(t, err)
require.NotNil(t, cdt.PublishStreamReq)
require.Len(t, cdt.PublishStreamReq.Headers, 1)
require.Empty(t, cdt.PublishStreamReq.GetHTTPHeaders())
})
t.Run("No auth headers to clear when calling RunStream", func(t *testing.T) {
err = cdt.MiddlewareHandler.RunStream(req.Context(), &backend.RunStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{otherHeader: "test"},
}, &backend.StreamSender{})
require.NoError(t, err)
require.NotNil(t, cdt.RunStreamReq)
require.Len(t, cdt.RunStreamReq.Headers, 1)
require.Empty(t, cdt.RunStreamReq.GetHTTPHeaders())
t.Run("Should clear auth headers when calling QueryData", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.QueryData(req.Context(), &backend.QueryDataRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.QueryDataReq)
require.Len(t, cdt.QueryDataReq.Headers, 1)
require.Equal(t, "test", cdt.QueryDataReq.Headers[otherHeader])
require.Empty(t, cdt.QueryDataReq.GetHTTPHeaders())
})
t.Run("And requests are for an app", func(t *testing.T) {
cdt := handlertest.NewHandlerMiddlewareTest(t,
WithReqContext(req, &user.SignedInUser{}),
handlertest.WithMiddlewares(NewClearAuthHeadersMiddleware()),
)
t.Run("Should clear auth headers when calling CallResource", func(t *testing.T) {
err = cdt.MiddlewareHandler.CallResource(req.Context(), &backend.CallResourceRequest{
PluginContext: pluginCtx,
Headers: map[string][]string{
otherHeader: {"test"},
"Authorization": {"secret"},
"X-Grafana-Device-Id": {"secret"},
},
}, nopCallResourceSender)
require.NoError(t, err)
require.NotNil(t, cdt.CallResourceReq)
require.Len(t, cdt.CallResourceReq.Headers, 1)
require.Equal(t, []string{"test"}, cdt.CallResourceReq.Headers[otherHeader])
require.Equal(t, "test", cdt.CallResourceReq.GetHTTPHeader(otherHeader))
})
pluginCtx := backend.PluginContext{
AppInstanceSettings: &backend.AppInstanceSettings{},
}
t.Run("No auth headers to clear when calling QueryData", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.QueryData(req.Context(), &backend.QueryDataRequest{
PluginContext: pluginCtx,
Headers: map[string]string{otherHeader: "test"},
})
require.NoError(t, err)
require.NotNil(t, cdt.QueryDataReq)
require.Len(t, cdt.QueryDataReq.Headers, 1)
require.Equal(t, "test", cdt.QueryDataReq.Headers[otherHeader])
require.Empty(t, cdt.QueryDataReq.GetHTTPHeaders())
t.Run("Should clear auth headers when calling CheckHealth", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.CheckHealth(req.Context(), &backend.CheckHealthRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.CheckHealthReq)
require.Len(t, cdt.CheckHealthReq.Headers, 1)
require.Equal(t, "test", cdt.CheckHealthReq.Headers[otherHeader])
require.Empty(t, cdt.CheckHealthReq.GetHTTPHeaders())
})
t.Run("No auth headers to clear when calling CallResource", func(t *testing.T) {
err = cdt.MiddlewareHandler.CallResource(req.Context(), &backend.CallResourceRequest{
PluginContext: pluginCtx,
Headers: map[string][]string{otherHeader: {"test"}},
}, nopCallResourceSender)
require.NoError(t, err)
require.NotNil(t, cdt.CallResourceReq)
require.Len(t, cdt.CallResourceReq.Headers, 1)
require.Equal(t, []string{"test"}, cdt.CallResourceReq.Headers[otherHeader])
require.Equal(t, http.Header{http.CanonicalHeaderKey(otherHeader): {"test"}}, cdt.CallResourceReq.GetHTTPHeaders())
t.Run("Should clear auth headers when calling SubscribeStream", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.SubscribeStream(req.Context(), &backend.SubscribeStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.SubscribeStreamReq)
require.Len(t, cdt.SubscribeStreamReq.Headers, 1)
require.Equal(t, "test", cdt.SubscribeStreamReq.Headers[otherHeader])
require.Empty(t, cdt.SubscribeStreamReq.GetHTTPHeaders())
})
t.Run("No auth headers to clear when calling CheckHealth", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.CheckHealth(req.Context(), &backend.CheckHealthRequest{
PluginContext: pluginCtx,
Headers: map[string]string{otherHeader: "test"},
})
require.NoError(t, err)
require.NotNil(t, cdt.CheckHealthReq)
require.Len(t, cdt.CheckHealthReq.Headers, 1)
require.Equal(t, "test", cdt.CheckHealthReq.Headers[otherHeader])
require.Empty(t, cdt.CheckHealthReq.GetHTTPHeaders())
t.Run("Should clear auth headers when calling PublishStream", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.PublishStream(req.Context(), &backend.PublishStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.PublishStreamReq)
require.Len(t, cdt.PublishStreamReq.Headers, 1)
require.Equal(t, "test", cdt.PublishStreamReq.Headers[otherHeader])
require.Empty(t, cdt.PublishStreamReq.GetHTTPHeaders())
})
t.Run("No auth headers to clear when calling SubscribeStream", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.SubscribeStream(req.Context(), &backend.SubscribeStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{otherHeader: "test"},
})
require.NoError(t, err)
require.NotNil(t, cdt.SubscribeStreamReq)
require.Len(t, cdt.SubscribeStreamReq.Headers, 1)
require.Equal(t, "test", cdt.SubscribeStreamReq.Headers[otherHeader])
require.Empty(t, cdt.SubscribeStreamReq.GetHTTPHeaders())
})
t.Run("No auth headers to clear when calling PublishStream", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.PublishStream(req.Context(), &backend.PublishStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{otherHeader: "test"},
})
require.NoError(t, err)
require.NotNil(t, cdt.PublishStreamReq)
require.Len(t, cdt.PublishStreamReq.Headers, 1)
require.Equal(t, "test", cdt.PublishStreamReq.Headers[otherHeader])
require.Empty(t, cdt.PublishStreamReq.GetHTTPHeaders())
})
t.Run("No auth headers to clear when calling RunStream", func(t *testing.T) {
err = cdt.MiddlewareHandler.RunStream(req.Context(), &backend.RunStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{otherHeader: "test"},
}, &backend.StreamSender{})
require.NoError(t, err)
require.NotNil(t, cdt.RunStreamReq)
require.Len(t, cdt.RunStreamReq.Headers, 1)
require.Equal(t, "test", cdt.RunStreamReq.Headers[otherHeader])
require.Empty(t, cdt.RunStreamReq.GetHTTPHeaders())
})
t.Run("Should clear auth headers when calling RunStream", func(t *testing.T) {
err = cdt.MiddlewareHandler.RunStream(req.Context(), &backend.RunStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
}, &backend.StreamSender{})
require.NoError(t, err)
require.NotNil(t, cdt.RunStreamReq)
require.Len(t, cdt.RunStreamReq.Headers, 1)
require.Equal(t, "test", cdt.RunStreamReq.Headers[otherHeader])
require.Empty(t, cdt.RunStreamReq.GetHTTPHeaders())
})
})
t.Run("When auth headers in reqContext", func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, "/some/thing", nil)
require.NoError(t, err)
t.Run("When requests are for an app", func(t *testing.T) {
cfg := setting.NewCfg()
cdt := handlertest.NewHandlerMiddlewareTest(t,
WithReqContext(req, &user.SignedInUser{}),
handlertest.WithMiddlewares(NewClearAuthHeadersMiddleware(&cfg.JWTAuth, &cfg.AuthProxy)),
)
t.Run("And requests are for a datasource", func(t *testing.T) {
cdt := handlertest.NewHandlerMiddlewareTest(t,
WithReqContext(req, &user.SignedInUser{}),
handlertest.WithMiddlewares(NewClearAuthHeadersMiddleware()),
)
req.Header.Set("Authorization", "val")
req := req.WithContext(contexthandler.WithAuthHTTPHeaders(req.Context(), setting.NewCfg()))
const otherHeader = "x-Other"
req.Header.Set(otherHeader, "test")
pluginCtx := backend.PluginContext{
DataSourceInstanceSettings: &backend.DataSourceInstanceSettings{},
}
pluginCtx := backend.PluginContext{
AppInstanceSettings: &backend.AppInstanceSettings{},
}
t.Run("Should clear auth headers when calling QueryData", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.QueryData(req.Context(), &backend.QueryDataRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.QueryDataReq)
require.Len(t, cdt.QueryDataReq.Headers, 1)
require.Equal(t, "test", cdt.QueryDataReq.Headers[otherHeader])
require.Empty(t, cdt.QueryDataReq.GetHTTPHeaders())
})
t.Run("Should clear auth headers when calling CallResource", func(t *testing.T) {
err = cdt.MiddlewareHandler.CallResource(req.Context(), &backend.CallResourceRequest{
PluginContext: pluginCtx,
Headers: map[string][]string{
otherHeader: {"test"},
"Authorization": {"secret"},
"X-Grafana-Device-Id": {"secret"},
},
}, nopCallResourceSender)
require.NoError(t, err)
require.NotNil(t, cdt.CallResourceReq)
require.Len(t, cdt.CallResourceReq.Headers, 1)
require.Equal(t, []string{"test"}, cdt.CallResourceReq.Headers[otherHeader])
require.Equal(t, "test", cdt.CallResourceReq.GetHTTPHeader(otherHeader))
})
t.Run("Should clear auth headers when calling CheckHealth", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.CheckHealth(req.Context(), &backend.CheckHealthRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.CheckHealthReq)
require.Len(t, cdt.CheckHealthReq.Headers, 1)
require.Equal(t, "test", cdt.CheckHealthReq.Headers[otherHeader])
require.Empty(t, cdt.CheckHealthReq.GetHTTPHeaders())
})
t.Run("Should clear auth headers when calling SubscribeStream", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.SubscribeStream(req.Context(), &backend.SubscribeStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.SubscribeStreamReq)
require.Len(t, cdt.SubscribeStreamReq.Headers, 1)
require.Equal(t, "test", cdt.SubscribeStreamReq.Headers[otherHeader])
require.Empty(t, cdt.SubscribeStreamReq.GetHTTPHeaders())
})
t.Run("Should clear auth headers when calling PublishStream", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.PublishStream(req.Context(), &backend.PublishStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.PublishStreamReq)
require.Len(t, cdt.PublishStreamReq.Headers, 1)
require.Equal(t, "test", cdt.PublishStreamReq.Headers[otherHeader])
require.Empty(t, cdt.PublishStreamReq.GetHTTPHeaders())
})
t.Run("Should clear auth headers when calling RunStream", func(t *testing.T) {
err = cdt.MiddlewareHandler.RunStream(req.Context(), &backend.RunStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
}, &backend.StreamSender{})
require.NoError(t, err)
require.NotNil(t, cdt.RunStreamReq)
require.Len(t, cdt.RunStreamReq.Headers, 1)
require.Equal(t, "test", cdt.RunStreamReq.Headers[otherHeader])
require.Empty(t, cdt.RunStreamReq.GetHTTPHeaders())
t.Run("Should clear auth headers when calling QueryData", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.QueryData(req.Context(), &backend.QueryDataRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.QueryDataReq)
require.Len(t, cdt.QueryDataReq.Headers, 1)
require.Equal(t, "test", cdt.QueryDataReq.Headers[otherHeader])
require.Empty(t, cdt.QueryDataReq.GetHTTPHeaders())
})
t.Run("And requests are for an app", func(t *testing.T) {
cdt := handlertest.NewHandlerMiddlewareTest(t,
WithReqContext(req, &user.SignedInUser{}),
handlertest.WithMiddlewares(NewClearAuthHeadersMiddleware()),
)
t.Run("Should clear auth headers when calling CallResource", func(t *testing.T) {
err = cdt.MiddlewareHandler.CallResource(req.Context(), &backend.CallResourceRequest{
PluginContext: pluginCtx,
Headers: map[string][]string{
otherHeader: {"test"},
"Authorization": {"secret"},
"X-Grafana-Device-Id": {"secret"},
},
}, nopCallResourceSender)
require.NoError(t, err)
require.NotNil(t, cdt.CallResourceReq)
require.Len(t, cdt.CallResourceReq.Headers, 1)
require.Equal(t, []string{"test"}, cdt.CallResourceReq.Headers[otherHeader])
require.Equal(t, "test", cdt.CallResourceReq.GetHTTPHeader(otherHeader))
})
req := req.WithContext(contexthandler.WithAuthHTTPHeaders(req.Context(), setting.NewCfg()))
req.Header.Set("Authorization", "val")
const otherHeader = "x-Other"
req.Header.Set(otherHeader, "test")
pluginCtx := backend.PluginContext{
AppInstanceSettings: &backend.AppInstanceSettings{},
}
t.Run("Should clear auth headers when calling QueryData", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.QueryData(req.Context(), &backend.QueryDataRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.QueryDataReq)
require.Len(t, cdt.QueryDataReq.Headers, 1)
require.Equal(t, "test", cdt.QueryDataReq.Headers[otherHeader])
require.Empty(t, cdt.QueryDataReq.GetHTTPHeaders())
t.Run("Should clear auth headers when calling CheckHealth", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.CheckHealth(req.Context(), &backend.CheckHealthRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.CheckHealthReq)
require.Len(t, cdt.CheckHealthReq.Headers, 1)
require.Equal(t, "test", cdt.CheckHealthReq.Headers[otherHeader])
require.Empty(t, cdt.CheckHealthReq.GetHTTPHeaders())
})
t.Run("Should clear auth headers when calling CallResource", func(t *testing.T) {
err = cdt.MiddlewareHandler.CallResource(req.Context(), &backend.CallResourceRequest{
PluginContext: pluginCtx,
Headers: map[string][]string{
otherHeader: {"test"},
"Authorization": {"secret"},
"X-Grafana-Device-Id": {"secret"},
},
}, nopCallResourceSender)
require.NoError(t, err)
require.NotNil(t, cdt.CallResourceReq)
require.Len(t, cdt.CallResourceReq.Headers, 1)
require.Equal(t, []string{"test"}, cdt.CallResourceReq.Headers[otherHeader])
require.Equal(t, "test", cdt.CallResourceReq.GetHTTPHeader(otherHeader))
t.Run("Should clear auth headers when calling SubscribeStream", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.SubscribeStream(req.Context(), &backend.SubscribeStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.SubscribeStreamReq)
require.Len(t, cdt.SubscribeStreamReq.Headers, 1)
require.Equal(t, "test", cdt.SubscribeStreamReq.Headers[otherHeader])
require.Empty(t, cdt.SubscribeStreamReq.GetHTTPHeaders())
})
t.Run("Should clear auth headers when calling CheckHealth", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.CheckHealth(req.Context(), &backend.CheckHealthRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.CheckHealthReq)
require.Len(t, cdt.CheckHealthReq.Headers, 1)
require.Equal(t, "test", cdt.CheckHealthReq.Headers[otherHeader])
require.Empty(t, cdt.CheckHealthReq.GetHTTPHeaders())
t.Run("Should clear auth headers when calling PublishStream", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.PublishStream(req.Context(), &backend.PublishStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.PublishStreamReq)
require.Len(t, cdt.PublishStreamReq.Headers, 1)
require.Equal(t, "test", cdt.PublishStreamReq.Headers[otherHeader])
require.Empty(t, cdt.PublishStreamReq.GetHTTPHeaders())
})
t.Run("Should clear auth headers when calling SubscribeStream", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.SubscribeStream(req.Context(), &backend.SubscribeStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.SubscribeStreamReq)
require.Len(t, cdt.SubscribeStreamReq.Headers, 1)
require.Equal(t, "test", cdt.SubscribeStreamReq.Headers[otherHeader])
require.Empty(t, cdt.SubscribeStreamReq.GetHTTPHeaders())
})
t.Run("Should clear auth headers when calling PublishStream", func(t *testing.T) {
_, err = cdt.MiddlewareHandler.PublishStream(req.Context(), &backend.PublishStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
})
require.NoError(t, err)
require.NotNil(t, cdt.PublishStreamReq)
require.Len(t, cdt.PublishStreamReq.Headers, 1)
require.Equal(t, "test", cdt.PublishStreamReq.Headers[otherHeader])
require.Empty(t, cdt.PublishStreamReq.GetHTTPHeaders())
})
t.Run("Should clear auth headers when calling RunStream", func(t *testing.T) {
err = cdt.MiddlewareHandler.RunStream(req.Context(), &backend.RunStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
}, &backend.StreamSender{})
require.NoError(t, err)
require.NotNil(t, cdt.RunStreamReq)
require.Len(t, cdt.RunStreamReq.Headers, 1)
require.Equal(t, "test", cdt.RunStreamReq.Headers[otherHeader])
require.Empty(t, cdt.RunStreamReq.GetHTTPHeaders())
})
t.Run("Should clear auth headers when calling RunStream", func(t *testing.T) {
err = cdt.MiddlewareHandler.RunStream(req.Context(), &backend.RunStreamRequest{
PluginContext: pluginCtx,
Headers: map[string]string{
otherHeader: "test",
"Authorization": "secret",
"X-Grafana-Device-Id": "secret",
},
}, &backend.StreamSender{})
require.NoError(t, err)
require.NotNil(t, cdt.RunStreamReq)
require.Len(t, cdt.RunStreamReq.Headers, 1)
require.Equal(t, "test", cdt.RunStreamReq.Headers[otherHeader])
require.Empty(t, cdt.RunStreamReq.GetHTTPHeaders())
})
})
}
@@ -87,10 +87,12 @@ func ProvideSyncer(
}
func (s *syncer) Sync(ctx context.Context, source install.Source, installedPlugins []*plugins.Plugin) error {
//nolint:staticcheck // not yet migrated to OpenFeature
if !s.featureToggles.IsEnabled(ctx, featuremgmt.FlagPluginInstallAPISync) {
return nil
}
//nolint:staticcheck // not yet migrated to OpenFeature
if !s.featureToggles.IsEnabled(ctx, featuremgmt.FlagPluginStoreServiceLoading) {
logging.DefaultLogger.Warn("pluginInstallAPISync is enabled, but pluginStoreServiceLoading is disabled. skipping plugin sync.")
return nil
@@ -200,7 +200,7 @@ func CreateMiddlewares(cfg *setting.Cfg, oAuthTokenService oauthtoken.OAuthToken
middlewares = append(middlewares,
clientmiddleware.NewTracingHeaderMiddleware(),
clientmiddleware.NewClearAuthHeadersMiddleware(),
clientmiddleware.NewClearAuthHeadersMiddleware(&cfg.JWTAuth, &cfg.AuthProxy),
clientmiddleware.NewOAuthTokenMiddleware(oAuthTokenService),
clientmiddleware.NewCookiesMiddleware(skipCookiesNames),
clientmiddleware.NewCachingMiddleware(cachingServiceClient),
@@ -161,7 +161,7 @@ func TestIntegrationPluginManager(t *testing.T) {
pr := prometheus.ProvideService(hcp)
tmpo := tempo.ProvideService(hcp, tracer)
td := testdatasource.ProvideService()
pg := postgres.ProvideService(cfg)
pg := postgres.ProvideService()
my := mysql.ProvideService()
ms := mssql.ProvideService(cfg)
db := db.InitTestDB(t, sqlstore.InitTestDBOpt{Cfg: cfg})
+2
View File
@@ -61,6 +61,7 @@ func UpdatePreferencesFor(ctx context.Context,
Navbar: dtoCmd.Navbar,
}
//nolint:staticcheck // not yet migrated to OpenFeature
if features.IsEnabled(ctx, featuremgmt.FlagLocaleFormatPreference) {
saveCmd.RegionalFormat = dtoCmd.RegionalFormat
}
@@ -101,6 +102,7 @@ func GetPreferencesFor(ctx context.Context,
dto.Language = &preference.JSONData.Language
}
//nolint:staticcheck // not yet migrated to OpenFeature
if features.IsEnabled(ctx, featuremgmt.FlagLocaleFormatPreference) {
if preference.JSONData.RegionalFormat != "" {
dto.RegionalFormat = &preference.JSONData.RegionalFormat
@@ -43,6 +43,7 @@ func ProvidePromTypeMigrationProvider(
}
func (s *PromTypeMigrationProviderImpl) Run(ctx context.Context) error {
//nolint:staticcheck // not yet migrated to OpenFeature
if !s.features.IsEnabled(ctx, featuremgmt.FlagPrometheusTypeMigration) {
return nil
}
+1
View File
@@ -37,6 +37,7 @@ func (rs *RenderingService) GetRenderUser(ctx context.Context, key string) (*Ren
var renderUser *RenderUser
//nolint:staticcheck // not yet migrated to OpenFeature
if looksLikeJWT(key) && rs.features.IsEnabled(ctx, featuremgmt.FlagRenderAuthJWT) {
from = "jwt"
renderUser = rs.getRenderUserFromJWT(key)
+4
View File
@@ -122,6 +122,7 @@ func (s *SecretsService) registerUsageMetrics() {
// Enabled / disabled
usageMetrics["stats.encryption.envelope_encryption_enabled.count"] = 0
//nolint:staticcheck // not yet migrated to OpenFeature
if !s.features.IsEnabled(ctx, featuremgmt.FlagDisableEnvelopeEncryption) {
usageMetrics["stats.encryption.envelope_encryption_enabled.count"] = 1
}
@@ -167,6 +168,7 @@ func (s *SecretsService) Encrypt(ctx context.Context, payload []byte, opt secret
defer span.End()
// Use legacy encryption service if featuremgmt.FlagDisableEnvelopeEncryption toggle is on
//nolint:staticcheck // not yet migrated to OpenFeature
if s.features.IsEnabled(ctx, featuremgmt.FlagDisableEnvelopeEncryption) {
return s.enc.Encrypt(ctx, payload, s.cfg.SecretKey)
}
@@ -343,6 +345,7 @@ func (s *SecretsService) Decrypt(ctx context.Context, payload []byte) ([]byte, e
// If encrypted with envelope encryption, the feature is disabled and
// no provider is initialized, then we throw an error.
//nolint:staticcheck // not yet migrated to OpenFeature
if s.encryptedWithEnvelopeEncryption(payload) &&
s.features.IsEnabled(ctx, featuremgmt.FlagDisableEnvelopeEncryption) &&
!s.providersInitialized() {
@@ -480,6 +483,7 @@ func (s *SecretsService) RotateDataKeys(ctx context.Context) error {
func (s *SecretsService) ReEncryptDataKeys(ctx context.Context) error {
s.log.Info("Data keys re-encryption triggered")
//nolint:staticcheck // not yet migrated to OpenFeature
if s.features.IsEnabled(ctx, featuremgmt.FlagDisableEnvelopeEncryption) {
s.log.Info("Envelope encryption is not enabled but trying to init providers anyway...")
+28 -3
View File
@@ -276,6 +276,12 @@ func readDashboardIter(jsonPath string, iter *jsoniter.Iterator, lookup Datasour
}
for sub := iter.ReadObject(); sub != ""; sub = iter.ReadObject() {
// Skip all null values silently.
if iter.WhatIsNext() == jsoniter.NilValue {
iter.Skip()
continue
}
if sub == "list" {
templatingListPath := templatingPath + ".list"
if !checkAndSkipUnexpectedElement(iter, templatingListPath, lc, jsoniter.ArrayValue) {
@@ -283,6 +289,12 @@ func readDashboardIter(jsonPath string, iter *jsoniter.Iterator, lookup Datasour
}
for ix := 0; iter.ReadArray(); ix++ {
// Skip all null elements silently.
if iter.WhatIsNext() == jsoniter.NilValue {
iter.Skip()
continue
}
tv := templateVariable{}
templatingListElementPath := fmt.Sprintf("%s[%d]", templatingListPath, ix)
@@ -308,7 +320,12 @@ func readDashboardIter(jsonPath string, iter *jsoniter.Iterator, lookup Datasour
case "query":
tv.query = iter.Read()
case "current":
if !checkAndSkipUnexpectedElement(iter, templatingListElementPath+".current", lc, jsoniter.ObjectValue) {
if !checkAndSkipUnexpectedElement(iter, templatingListElementPath+".current", lc, jsoniter.ObjectValue, jsoniter.ArrayValue) {
continue
}
if iter.WhatIsNext() == jsoniter.ArrayValue {
iter.Skip()
continue
}
@@ -527,10 +544,18 @@ func readpanelInfo(iter *jsoniter.Iterator, lookup DatasourceLookup, jsonPath st
switch field {
case "id":
if !checkAndSkipUnexpectedElement(iter, jsonPath+".id", lc, jsoniter.NumberValue) {
if !checkAndSkipUnexpectedElement(iter, jsonPath+".id", lc, jsoniter.NumberValue, jsoniter.StringValue) {
continue
}
panel.ID = iter.ReadInt64()
if iter.WhatIsNext() == jsoniter.StringValue {
id, err := strconv.ParseInt(iter.ReadString(), 10, 64)
if err == nil {
panel.ID = id
}
} else {
panel.ID = iter.ReadInt64()
}
case "type":
if !checkAndSkipUnexpectedElement(iter, jsonPath+".type", lc, jsoniter.StringValue) {
@@ -75,6 +75,7 @@ func TestReadDashboard(t *testing.T) {
"k8s-wrapper",
"k8s-wrapper-editable-string",
"k8s-wrapper-tags-string",
"k8s-wrapper-with-parsing-errors",
}
devdash := "../../../../../devenv/dev-dashboards/"
@@ -0,0 +1,74 @@
{
"id": 141,
"title": "pppp",
"tags": null,
"datasource": [
{
"uid": "default.uid",
"type": "default.type"
}
],
"panels": [
{
"id": 1,
"title": "green pie",
"libraryPanel": "a7975b7a-fb53-4ab7-951d-15810953b54f",
"datasource": [
{
"uid": "default.uid",
"type": "default.type"
}
]
},
{
"id": 2,
"title": "green pie",
"libraryPanel": "e1d5f519-dabd-47c6-9ad7-83d181ce1cee",
"datasource": [
{
"uid": "default.uid",
"type": "default.type"
}
]
},
{
"id": 7,
"title": "",
"type": "barchart",
"datasource": [
{
"uid": "default.uid",
"type": "default.type"
}
]
},
{
"id": 8,
"title": "",
"type": "graph",
"datasource": [
{
"uid": "default.uid",
"type": "default.type"
}
]
},
{
"id": 3,
"title": "collapsed row",
"type": "row",
"collapsed": [
{
"id": 42,
"title": "blue pie",
"libraryPanel": "l3d2s634-fdgf-75u4-3fg3-67j966ii7jur"
}
]
}
],
"schemaVersion": 38,
"linkCount": 0,
"timeFrom": "now-6h",
"timeTo": "now",
"timezone": ""
}
@@ -0,0 +1,133 @@
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v0alpha1",
"metadata": {
"name": "adfbg6f",
"namespace": "default",
"uid": "b396894e-56bf-4a01-837b-64157912ca00",
"creationTimestamp": "2024-10-30T18:30:54Z",
"annotations": {
"grafana.app/createdBy": "user:be2g71ke8yoe8b",
"grafana.app/originHash": "Grafana v9.2.0 (NA)",
"grafana.app/originName": "UI",
"grafana.app/originPath": "/dashboard/new"
}
},
"spec": {
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 141,
"links": [],
"liveNow": false,
"panels": [
{
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": "1",
"libraryPanel": {
"name": "green pie",
"uid": "a7975b7a-fb53-4ab7-951d-15810953b54f"
},
"title": "green pie"
},
{
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 2,
"libraryPanel": {
"name": "red pie",
"uid": "e1d5f519-dabd-47c6-9ad7-83d181ce1cee"
},
"title": "green pie"
},
{
"id": "7",
"type": "barchart"
},
{
"id": 8,
"type": "graph"
},
{
"collapsed": true,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 9
},
"id": 3,
"panels": [
{
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 42,
"libraryPanel": {
"name": "blue pie",
"uid": "l3d2s634-fdgf-75u4-3fg3-67j966ii7jur"
},
"title": "blue pie"
}
],
"title": "collapsed row",
"type": "row"
}
],
"refresh": "",
"schemaVersion": 38,
"tags": [],
"templating": {
"list": null,
"list": [
null,
{
"current": []
},
{
"current": {
"value": "default"
}
}
]
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "pppp",
"uid": "adfbg6f",
"version": 3,
"weekStart": ""
}
}
@@ -0,0 +1,20 @@
SELECT
{{ .Ident "guid" }},
{{ .Ident "name" }},
{{ .Ident "namespace" }},
{{ .Ident "annotations" }},
{{ .Ident "labels" }},
{{ .Ident "created" }},
{{ .Ident "created_by" }},
{{ .Ident "updated" }},
{{ .Ident "updated_by" }},
{{ .Ident "description" }},
{{ .Ident "type" }},
{{ .Ident "payload" }}
FROM
{{ .Ident "secret_keeper" }}
WHERE
{{ .Ident "namespace" }} = {{ .Arg .Namespace }} AND
{{ .Ident "active" }} = true
LIMIT 1
;
@@ -0,0 +1,5 @@
UPDATE {{ .Ident "secret_keeper" }}
SET {{ .Ident "active" }} = ({{ .Ident "name" }} = {{ .Arg .Name }})
WHERE
{{ .Ident "namespace" }} = {{ .Arg .Namespace }}
;
@@ -1,5 +1,7 @@
SELECT
{{ .Ident "version" }}
{{ .Ident "created" }},
{{ .Ident "version" }},
{{ .Ident "active" }}
FROM
{{ .Ident "secret_secure_value" }}
WHERE
+1 -1
View File
@@ -135,7 +135,7 @@ func (s *decryptStorage) Decrypt(ctx context.Context, namespace xkube.Namespace,
return "", fmt.Errorf("failed to authorize decryption with reason %v (%w)", reason, contracts.ErrDecryptNotAuthorized)
}
keeperConfig, err := s.keeperMetadataStorage.GetKeeperConfig(ctx, namespace.String(), sv.Spec.Keeper, contracts.ReadOpts{})
keeperConfig, err := s.keeperMetadataStorage.GetKeeperConfig(ctx, namespace.String(), sv.Status.Keeper, contracts.ReadOpts{})
if err != nil {
return "", fmt.Errorf("failed to read keeper config metadata storage: %v (%w)", err, contracts.ErrDecryptFailed)
}
@@ -280,3 +280,18 @@ func extractSecureValues(kp *secretv1beta1.Keeper) map[string]struct{} {
return nil
}
func getKeeperConfig(keeper *secretv1beta1.Keeper) secretv1beta1.KeeperConfig {
switch keeper.Spec.GetType() {
case secretv1beta1.AWSKeeperType:
return keeper.Spec.Aws
case secretv1beta1.AzureKeeperType:
return keeper.Spec.Azure
case secretv1beta1.GCPKeeperType:
return keeper.Spec.Gcp
case secretv1beta1.HashiCorpKeeperType:
return keeper.Spec.HashiCorpVault
default:
return nil
}
}
+113 -6
View File
@@ -2,6 +2,7 @@ package metadata
import (
"context"
"errors"
"fmt"
"strconv"
"time"
@@ -186,7 +187,7 @@ func (s *keeperMetadataStorage) read(ctx context.Context, namespace, name string
defer func() { _ = res.Close() }()
if !res.Next() {
return nil, contracts.ErrKeeperNotFound
return nil, fmt.Errorf("keeper=%s: %w", name, contracts.ErrKeeperNotFound)
}
var keeper keeperDB
@@ -568,9 +569,10 @@ func (s *keeperMetadataStorage) validateSecureValueReferences(ctx context.Contex
return nil
}
func (s *keeperMetadataStorage) GetKeeperConfig(ctx context.Context, namespace string, name *string, opts contracts.ReadOpts) (_ secretv1beta1.KeeperConfig, getErr error) {
func (s *keeperMetadataStorage) GetKeeperConfig(ctx context.Context, namespace string, name string, opts contracts.ReadOpts) (_ secretv1beta1.KeeperConfig, getErr error) {
ctx, span := s.tracer.Start(ctx, "KeeperMetadataStorage.GetKeeperConfig", trace.WithAttributes(
attribute.String("namespace", namespace),
attribute.String("name", name),
attribute.Bool("isForUpdate", opts.ForUpdate),
))
start := time.Now()
@@ -581,6 +583,7 @@ func (s *keeperMetadataStorage) GetKeeperConfig(ctx context.Context, namespace s
args := []any{
"namespace", namespace,
"name", name,
"isForUpdate", strconv.FormatBool(opts.ForUpdate),
}
@@ -597,14 +600,12 @@ func (s *keeperMetadataStorage) GetKeeperConfig(ctx context.Context, namespace s
}()
// Check if keeper is the systemwide one.
if name == nil {
if name == contracts.SystemKeeperName {
return &secretv1beta1.SystemKeeperConfig{}, nil
}
span.SetAttributes(attribute.String("name", *name))
// Load keeper config from metadata store, or TODO: keeper cache.
kp, err := s.read(ctx, namespace, *name, opts)
kp, err := s.read(ctx, namespace, name, opts)
if err != nil {
return nil, err
}
@@ -614,3 +615,109 @@ func (s *keeperMetadataStorage) GetKeeperConfig(ctx context.Context, namespace s
// TODO: this would be a good place to check if credentials are secure values and load them.
return keeperConfig, nil
}
func (s *keeperMetadataStorage) SetAsActive(ctx context.Context, namespace xkube.Namespace, name string) error {
req := setKeeperAsActive{
SQLTemplate: sqltemplate.New(s.dialect),
Namespace: namespace.String(),
Name: name,
}
query, err := sqltemplate.Execute(sqlKeeperSetAsActive, req)
if err != nil {
return fmt.Errorf("template %q: %w", sqlKeeperSetAsActive.Name(), err)
}
// Check keeper exists. No need to worry about time of check to time of use
// since trying to activate a just deleted keeper will result in all
// keepers being inactive and defaulting to the system keeper.
if _, err := s.read(ctx, namespace.String(), name, contracts.ReadOpts{}); err != nil {
return fmt.Errorf("reading keeper before setting as active: %w", err)
}
_, err = s.db.ExecContext(ctx, query, req.GetArgs()...)
if err != nil {
return fmt.Errorf("setting keeper as active %q: %w", query, err)
}
return nil
}
func (s *keeperMetadataStorage) GetActiveKeeper(ctx context.Context, namespace string) (keeper *secretv1beta1.Keeper, readErr error) {
start := time.Now()
ctx, span := s.tracer.Start(ctx, "KeeperMetadataStorage.GetActiveKeeper", trace.WithAttributes(
attribute.String("namespace", namespace),
))
defer span.End()
defer func() {
success := readErr == nil
args := []any{
"namespace", namespace,
}
args = append(args, "success", success)
if !success {
span.SetStatus(codes.Error, "KeeperMetadataStorage.GetActiveKeeper failed")
span.RecordError(readErr)
args = append(args, "error", readErr)
}
logging.FromContext(ctx).Info("KeeperMetadataStorage.GetActiveKeeper", args...)
s.metrics.KeeperMetadataGetDuration.WithLabelValues(strconv.FormatBool(success)).Observe(time.Since(start).Seconds())
}()
req := &readActiveKeeper{
SQLTemplate: sqltemplate.New(s.dialect),
Namespace: namespace,
}
query, err := sqltemplate.Execute(sqlKeeperReadActive, req)
if err != nil {
return nil, fmt.Errorf("execute template %q: %w", sqlKeeperReadActive.Name(), err)
}
res, err := s.db.QueryContext(ctx, query, req.GetArgs()...)
if err != nil {
return nil, fmt.Errorf("executing query to fetch active keeper in namespace %s: %w", namespace, err)
}
defer func() { _ = res.Close() }()
if !res.Next() {
return nil, contracts.ErrKeeperNotFound
}
var keeperDB keeperDB
err = res.Scan(
&keeperDB.GUID, &keeperDB.Name, &keeperDB.Namespace, &keeperDB.Annotations, &keeperDB.Labels, &keeperDB.Created,
&keeperDB.CreatedBy, &keeperDB.Updated, &keeperDB.UpdatedBy, &keeperDB.Description, &keeperDB.Type, &keeperDB.Payload,
)
if err != nil {
return nil, fmt.Errorf("failed to scan keeper row: %w", err)
}
if err := res.Err(); err != nil {
return nil, fmt.Errorf("read rows error: %w", err)
}
keeper, readErr = keeperDB.toKubernetes()
if readErr != nil {
return keeper, fmt.Errorf("converting from keeperDB to kubernetes struct: %w", err)
}
return keeper, nil
}
func (s *keeperMetadataStorage) GetActiveKeeperConfig(ctx context.Context, namespace string) (string, secretv1beta1.KeeperConfig, error) {
keeper, err := s.GetActiveKeeper(ctx, namespace)
if err != nil {
// When there are not active keepers, default to the system keeper
if errors.Is(err, contracts.ErrKeeperNotFound) {
return contracts.SystemKeeperName, &secretv1beta1.SystemKeeperConfig{}, nil
}
return "", nil, fmt.Errorf("fetching active keeper from db: %w", err)
}
return keeper.Name, getKeeperConfig(keeper), nil
}
@@ -6,6 +6,7 @@ import (
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/trace/noop"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
secretv1beta1 "github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1"
@@ -40,7 +41,7 @@ func Test_KeeperMetadataStorage_GetKeeperConfig(t *testing.T) {
keeperMetadataStorage := initStorage(t)
// get system keeper config
keeperConfig, err := keeperMetadataStorage.GetKeeperConfig(ctx, defaultKeeperNS, nil, contracts.ReadOpts{})
keeperConfig, err := keeperMetadataStorage.GetKeeperConfig(ctx, defaultKeeperNS, contracts.SystemKeeperName, contracts.ReadOpts{})
require.NoError(t, err)
require.IsType(t, &secretv1beta1.SystemKeeperConfig{}, keeperConfig)
})
@@ -55,7 +56,7 @@ func Test_KeeperMetadataStorage_GetKeeperConfig(t *testing.T) {
_, err := keeperMetadataStorage.Create(ctx, testKeeper, "testuser")
require.NoError(t, err)
keeperConfig, err := keeperMetadataStorage.GetKeeperConfig(ctx, defaultKeeperNS, &defaultKeeperName, contracts.ReadOpts{})
keeperConfig, err := keeperMetadataStorage.GetKeeperConfig(ctx, defaultKeeperNS, defaultKeeperName, contracts.ReadOpts{})
require.NoError(t, err)
require.NotNil(t, keeperConfig)
require.NotEmpty(t, keeperConfig.Type())
@@ -105,7 +106,7 @@ func Test_KeeperMetadataStorage_GetKeeperConfig(t *testing.T) {
require.NoError(t, err)
// we are able to get it
keeperConfig, err := keeperMetadataStorage.GetKeeperConfig(ctx, keeperNamespaceTest, &keeperTest, contracts.ReadOpts{})
keeperConfig, err := keeperMetadataStorage.GetKeeperConfig(ctx, keeperNamespaceTest, keeperTest, contracts.ReadOpts{})
require.NoError(t, err)
require.NotNil(t, keeperConfig)
require.NotEmpty(t, keeperConfig.Type())
@@ -115,7 +116,7 @@ func Test_KeeperMetadataStorage_GetKeeperConfig(t *testing.T) {
require.NoError(t, delErr)
// and we shouldn't be able to get it again
_, getErr := keeperMetadataStorage.GetKeeperConfig(ctx, keeperNamespaceTest, &keeperTest, contracts.ReadOpts{})
_, getErr := keeperMetadataStorage.GetKeeperConfig(ctx, keeperNamespaceTest, keeperTest, contracts.ReadOpts{})
require.Errorf(t, getErr, "keeper not found")
})
@@ -162,7 +163,7 @@ func Test_KeeperMetadataStorage_GetKeeperConfig(t *testing.T) {
require.NoError(t, err)
// Validate updated values
updatedConfig, err := keeperMetadataStorage.GetKeeperConfig(ctx, keeperNamespaceTest, &keeperTest, contracts.ReadOpts{})
updatedConfig, err := keeperMetadataStorage.GetKeeperConfig(ctx, keeperNamespaceTest, keeperTest, contracts.ReadOpts{})
require.NoError(t, err)
require.NotNil(t, updatedConfig)
require.NotEmpty(t, updatedConfig.Type())
@@ -260,7 +261,7 @@ func Test_KeeperMetadataStorage_GetKeeperConfig(t *testing.T) {
_, err := keeperMetadataStorage.Read(ctx, "ns", "non-existent", contracts.ReadOpts{})
require.Error(t, err)
require.Equal(t, contracts.ErrKeeperNotFound, err)
require.ErrorIs(t, err, contracts.ErrKeeperNotFound)
})
t.Run("update keeper with different namespace", func(t *testing.T) {
@@ -329,6 +330,40 @@ func Test_KeeperMetadataStorage_GetKeeperConfig(t *testing.T) {
})
}
func Test_KeeperMetadataStorage_SetAsActive(t *testing.T) {
t.Parallel()
keeperMetadataStorage := initStorage(t)
k1, err := keeperMetadataStorage.Create(t.Context(), &secretv1beta1.Keeper{
ObjectMeta: v1.ObjectMeta{Namespace: "ns1", Name: "k1"},
Spec: secretv1beta1.KeeperSpec{
Description: "description",
Aws: &secretv1beta1.KeeperAWSConfig{},
},
}, "actor-uid")
require.NoError(t, err)
k2, err := keeperMetadataStorage.Create(t.Context(), &secretv1beta1.Keeper{
ObjectMeta: v1.ObjectMeta{Namespace: "ns1", Name: "k2"},
Spec: secretv1beta1.KeeperSpec{
Description: "description",
Aws: &secretv1beta1.KeeperAWSConfig{},
},
}, "actor-uid")
require.NoError(t, err)
require.NoError(t, keeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(k1.Namespace), k1.Name))
keeperName, _, err := keeperMetadataStorage.GetActiveKeeperConfig(t.Context(), k1.Namespace)
require.NoError(t, err)
require.Equal(t, k1.Name, keeperName)
require.NoError(t, keeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(k2.Namespace), k2.Name))
keeperName, _, err = keeperMetadataStorage.GetActiveKeeperConfig(t.Context(), k2.Namespace)
require.NoError(t, err)
require.Equal(t, k2.Name, keeperName)
}
func initStorage(t *testing.T) contracts.KeeperMetadataStorage {
testDB := sqlstore.NewTestStore(t, sqlstore.WithMigrator(migrator.New()))
tracer := noop.NewTracerProvider().Tracer("test")
+35 -10
View File
@@ -15,11 +15,13 @@ var (
sqlTemplates = template.Must(template.New("sql").ParseFS(sqlTemplatesFS, `data/*.sql`))
// The SQL Commands
sqlKeeperCreate = mustTemplate("keeper_create.sql")
sqlKeeperRead = mustTemplate("keeper_read.sql")
sqlKeeperUpdate = mustTemplate("keeper_update.sql")
sqlKeeperList = mustTemplate("keeper_list.sql")
sqlKeeperDelete = mustTemplate("keeper_delete.sql")
sqlKeeperCreate = mustTemplate("keeper_create.sql")
sqlKeeperRead = mustTemplate("keeper_read.sql")
sqlKeeperReadActive = mustTemplate("keeper_read_active.sql")
sqlKeeperUpdate = mustTemplate("keeper_update.sql")
sqlKeeperList = mustTemplate("keeper_list.sql")
sqlKeeperDelete = mustTemplate("keeper_delete.sql")
sqlKeeperSetAsActive = mustTemplate("keeper_set_as_active.sql")
sqlKeeperListByName = mustTemplate("keeper_listByName.sql")
sqlSecureValueListByName = mustTemplate("secure_value_listByName.sql")
@@ -32,9 +34,9 @@ var (
sqlSecureValueLeaseInactive = mustTemplate("secure_value_lease_inactive.sql")
sqlSecureValueListByLeaseToken = mustTemplate("secure_value_list_by_lease_token.sql")
sqlGetLatestSecureValueVersion = mustTemplate("secure_value_get_latest_version.sql")
sqlSecureValueSetVersionToActive = mustTemplate("secure_value_set_version_to_active.sql")
sqlSecureValueSetVersionToInactive = mustTemplate("secure_value_set_version_to_inactive.sql")
sqlGetLatestSecureValueVersionAndCreatedAt = mustTemplate("secure_value_get_latest_version_and_created_at.sql")
sqlSecureValueSetVersionToActive = mustTemplate("secure_value_set_version_to_active.sql")
sqlSecureValueSetVersionToInactive = mustTemplate("secure_value_set_version_to_inactive.sql")
)
func mustTemplate(filename string) *template.Template {
@@ -48,6 +50,18 @@ func mustTemplate(filename string) *template.Template {
/**-- Keeper Queries --**/
/************************/
// Set as active
type setKeeperAsActive struct {
sqltemplate.SQLTemplate
Namespace string
Name string
}
// Validate is only used if we use `dbutil` from `unifiedstorage`
func (r setKeeperAsActive) Validate() error {
return nil // TODO
}
// Create
type createKeeper struct {
sqltemplate.SQLTemplate
@@ -72,6 +86,17 @@ func (r readKeeper) Validate() error {
return nil // TODO
}
// Read active keeper
type readActiveKeeper struct {
sqltemplate.SQLTemplate
Namespace string
}
// Validate is only used if we use `dbutil` from `unifiedstorage`
func (r readActiveKeeper) Validate() error {
return nil // TODO
}
// Update
type updateKeeper struct {
sqltemplate.SQLTemplate
@@ -146,13 +171,13 @@ func (r readSecureValue) Validate() error {
return nil // TODO
}
type getLatestSecureValueVersion struct {
type getLatestSecureValueVersionAndCreatedAt struct {
sqltemplate.SQLTemplate
Namespace string
Name string
}
func (r getLatestSecureValueVersion) Validate() error {
func (r getLatestSecureValueVersionAndCreatedAt) Validate() error {
return nil
}
+20 -2
View File
@@ -13,6 +13,15 @@ func TestKeeperQueries(t *testing.T) {
mocks.CheckQuerySnapshots(t, mocks.TemplateTestSetup{
RootDir: "testdata",
Templates: map[*template.Template][]mocks.TemplateTestCase{
sqlKeeperSetAsActive: {
{
Name: "keeper set as active",
Data: &setKeeperAsActive{
SQLTemplate: mocks.NewTestingSQLTemplate(), Name: "name",
Namespace: "ns",
},
},
},
sqlKeeperCreate: {
{
Name: "create",
@@ -54,6 +63,15 @@ func TestKeeperQueries(t *testing.T) {
},
},
},
sqlKeeperReadActive: {
{
Name: "read active",
Data: &readActiveKeeper{
SQLTemplate: mocks.NewTestingSQLTemplate(),
Namespace: "ns",
},
},
},
sqlKeeperRead: {
{
Name: "read",
@@ -123,10 +141,10 @@ func TestSecureValueQueries(t *testing.T) {
mocks.CheckQuerySnapshots(t, mocks.TemplateTestSetup{
RootDir: "testdata",
Templates: map[*template.Template][]mocks.TemplateTestCase{
sqlGetLatestSecureValueVersion: {
sqlGetLatestSecureValueVersionAndCreatedAt: {
{
Name: "get latest secure value version",
Data: &getLatestSecureValueVersion{
Data: &getLatestSecureValueVersionAndCreatedAt{
SQLTemplate: mocks.NewTestingSQLTemplate(),
Name: "name",
Namespace: "ns",
@@ -79,7 +79,7 @@ func (sv *secureValueDB) toKubernetes() (*secretv1beta1.SecureValue, error) {
}
if sv.Keeper.Valid {
resource.Spec.Keeper = &sv.Keeper.String
resource.Status.Keeper = sv.Keeper.String
}
if sv.Ref.Valid {
resource.Spec.Ref = &sv.Ref.String
@@ -122,25 +122,23 @@ func (sv *secureValueDB) toKubernetes() (*secretv1beta1.SecureValue, error) {
}
// toCreateRow maps a Kubernetes resource into a DB row for new resources being created/inserted.
func toCreateRow(now time.Time, sv *secretv1beta1.SecureValue, actorUID string) (*secureValueDB, error) {
row, err := toRow(sv, "")
func toCreateRow(createdAt, updatedAt int64, keeper string, sv *secretv1beta1.SecureValue, actorUID string) (*secureValueDB, error) {
row, err := toRow(keeper, sv, "")
if err != nil {
return nil, fmt.Errorf("failed to convert SecureValue to secureValueDB: %w", err)
}
timestamp := now.UTC().Unix()
row.GUID = uuid.New().String()
row.Created = timestamp
row.Created = createdAt
row.CreatedBy = actorUID
row.Updated = timestamp
row.Updated = updatedAt
row.UpdatedBy = actorUID
return row, nil
}
// toRow maps a Kubernetes resource into a DB row.
func toRow(sv *secretv1beta1.SecureValue, externalID string) (*secureValueDB, error) {
func toRow(keeper string, sv *secretv1beta1.SecureValue, externalID string) (*secureValueDB, error) {
var annotations string
if len(sv.Annotations) > 0 {
cleanedAnnotations := xkube.CleanAnnotations(sv.Annotations)
@@ -237,7 +235,7 @@ func toRow(sv *secretv1beta1.SecureValue, externalID string) (*secureValueDB, er
Version: sv.Status.Version,
Description: sv.Spec.Description,
Keeper: toNullString(sv.Spec.Keeper),
Keeper: toNullString(&keeper),
Decrypters: toNullString(decrypters),
Ref: toNullString(sv.Spec.Ref),
ExternalID: externalID,
@@ -47,13 +47,14 @@ type secureValueMetadataStorage struct {
tracer trace.Tracer
}
func (s *secureValueMetadataStorage) Create(ctx context.Context, sv *secretv1beta1.SecureValue, actorUID string) (_ *secretv1beta1.SecureValue, svmCreateErr error) {
func (s *secureValueMetadataStorage) Create(ctx context.Context, keeper string, sv *secretv1beta1.SecureValue, actorUID string) (_ *secretv1beta1.SecureValue, svmCreateErr error) {
start := s.clock.Now()
name := sv.GetName()
namespace := sv.GetNamespace()
ctx, span := s.tracer.Start(ctx, "SecureValueMetadataStorage.Create", trace.WithAttributes(
attribute.String("name", name),
attribute.String("namespace", namespace),
attribute.String("keeper", keeper),
attribute.String("actorUID", actorUID),
))
defer span.End()
@@ -64,6 +65,7 @@ func (s *secureValueMetadataStorage) Create(ctx context.Context, sv *secretv1bet
args := []any{
"name", name,
"namespace", namespace,
"keeper", keeper,
"actorUID", actorUID,
}
@@ -83,41 +85,14 @@ func (s *secureValueMetadataStorage) Create(ctx context.Context, sv *secretv1bet
var row *secureValueDB
err := s.db.Transaction(ctx, func(ctx context.Context) error {
if sv.Spec.Keeper != nil {
// Validate before inserting that the chosen `keeper` exists.
// -- This is a copy of KeeperMetadataStore.read, which is not public at the moment, and is not defined in contract.KeeperMetadataStorage
req := &readKeeper{
SQLTemplate: sqltemplate.New(s.dialect),
Namespace: sv.Namespace,
Name: *sv.Spec.Keeper,
IsForUpdate: true,
}
query, err := sqltemplate.Execute(sqlKeeperRead, req)
if err != nil {
return fmt.Errorf("execute template %q: %w", sqlKeeperRead.Name(), err)
}
res, err := s.db.QueryContext(ctx, query, req.GetArgs()...)
if err != nil {
return fmt.Errorf("getting row: %w", err)
}
defer func() { _ = res.Close() }()
if !res.Next() {
return contracts.ErrKeeperNotFound
}
}
latestVersion, err := s.getLatestVersion(ctx, xkube.Namespace(sv.Namespace), sv.Name)
latest, err := s.getLatestVersionAndCreatedAt(ctx, xkube.Namespace(sv.Namespace), sv.Name)
if err != nil {
return fmt.Errorf("fetching latest secure value version: %w", err)
}
version := int64(1)
if latestVersion != nil {
version = *latestVersion + 1
if latest.version > 0 {
version = latest.version + 1
}
// Some other concurrent request may have created the version we're trying to create,
@@ -127,7 +102,15 @@ func (s *secureValueMetadataStorage) Create(ctx context.Context, sv *secretv1bet
for {
sv.Status.Version = version
row, err = toCreateRow(s.clock.Now(), sv, actorUID)
now := s.clock.Now().UTC().Unix()
createdAt := now
if latest.createdAt > 0 {
createdAt = latest.createdAt
}
updatedAt := now
row, err = toCreateRow(createdAt, updatedAt, keeper, sv, actorUID)
if err != nil {
return fmt.Errorf("to create row: %w", err)
}
@@ -178,44 +161,60 @@ func (s *secureValueMetadataStorage) Create(ctx context.Context, sv *secretv1bet
return createdSecureValue, nil
}
func (s *secureValueMetadataStorage) getLatestVersion(ctx context.Context, namespace xkube.Namespace, name string) (*int64, error) {
ctx, span := s.tracer.Start(ctx, "SecureValueMetadataStorage.getLatestVersion", trace.WithAttributes(
type versionAndCreatedAt struct {
createdAt int64
version int64
}
func (s *secureValueMetadataStorage) getLatestVersionAndCreatedAt(ctx context.Context, namespace xkube.Namespace, name string) (versionAndCreatedAt, error) {
ctx, span := s.tracer.Start(ctx, "SecureValueMetadataStorage.getLatestVersionAndCreatedAt", trace.WithAttributes(
attribute.String("name", name),
attribute.String("namespace", namespace.String()),
))
defer span.End()
req := getLatestSecureValueVersion{
req := getLatestSecureValueVersionAndCreatedAt{
SQLTemplate: sqltemplate.New(s.dialect),
Namespace: namespace.String(),
Name: name,
}
q, err := sqltemplate.Execute(sqlGetLatestSecureValueVersion, req)
q, err := sqltemplate.Execute(sqlGetLatestSecureValueVersionAndCreatedAt, req)
if err != nil {
return nil, fmt.Errorf("execute template %q: %w", sqlGetLatestSecureValueVersion.Name(), err)
return versionAndCreatedAt{}, fmt.Errorf("execute template %q: %w", sqlGetLatestSecureValueVersionAndCreatedAt.Name(), err)
}
rows, err := s.db.QueryContext(ctx, q, req.GetArgs()...)
if err != nil {
return nil, fmt.Errorf("fetching latest version for secure value: namespace=%+v name=%+v %w", namespace, name, err)
return versionAndCreatedAt{}, fmt.Errorf("fetching latest version for secure value: namespace=%+v name=%+v %w", namespace, name, err)
}
defer func() { _ = rows.Close() }()
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("error executing query: %w", err)
return versionAndCreatedAt{}, fmt.Errorf("error executing query: %w", err)
}
if !rows.Next() {
return nil, nil
return versionAndCreatedAt{}, nil
}
var version int64
if err := rows.Scan(&version); err != nil {
return nil, fmt.Errorf("scanning version from returned rows: %w", err)
var (
createdAt int64
version int64
active bool
)
if err := rows.Scan(&createdAt, &version, &active); err != nil {
return versionAndCreatedAt{}, fmt.Errorf("scanning version from returned rows: %w", err)
}
return &version, nil
if !active {
createdAt = 0
}
return versionAndCreatedAt{
createdAt: createdAt,
version: version,
}, nil
}
func (s *secureValueMetadataStorage) readActiveVersion(ctx context.Context, namespace xkube.Namespace, name string, opts contracts.ReadOpts) (secureValueDB, error) {
@@ -63,20 +63,20 @@ func Test_SecureValueMetadataStorage_CreateAndRead(t *testing.T) {
Spec: secretv1beta1.SecureValueSpec{
Description: "test description",
Value: ptr.To(secretv1beta1.NewExposedSecureValue("test-value")),
Keeper: &keeperName,
},
Status: secretv1beta1.SecureValueStatus{Keeper: keeperName},
}
testSecureValue.Name = "sv-test"
testSecureValue.Namespace = "default"
// Create the secure value
createdSecureValue, err := secureValueStorage.Create(ctx, testSecureValue, "testuser")
createdSecureValue, err := secureValueStorage.Create(ctx, keeperName, testSecureValue, "testuser")
require.NoError(t, err)
require.NotNil(t, createdSecureValue)
require.Equal(t, "sv-test", createdSecureValue.Name)
require.Equal(t, "default", createdSecureValue.Namespace)
require.Equal(t, "test description", createdSecureValue.Spec.Description)
require.Equal(t, keeperName, *createdSecureValue.Spec.Keeper)
require.Equal(t, keeperName, createdSecureValue.Status.Keeper)
require.NoError(t, secureValueStorage.SetVersionToActive(ctx, xkube.Namespace(createdSecureValue.Namespace), createdSecureValue.Name, createdSecureValue.Status.Version))
@@ -87,7 +87,7 @@ func Test_SecureValueMetadataStorage_CreateAndRead(t *testing.T) {
require.Equal(t, "sv-test", readSecureValue.Name)
require.Equal(t, "default", readSecureValue.Namespace)
require.Equal(t, "test description", readSecureValue.Spec.Description)
require.Equal(t, keeperName, *readSecureValue.Spec.Keeper)
require.Equal(t, keeperName, readSecureValue.Status.Keeper)
// List secure values and verify our value is in the list
secureValues, err := secureValueStorage.List(ctx, xkube.Namespace("default"))
@@ -101,7 +101,7 @@ func Test_SecureValueMetadataStorage_CreateAndRead(t *testing.T) {
found = true
require.Equal(t, "default", sv.Namespace)
require.Equal(t, "test description", sv.Spec.Description)
require.Equal(t, keeperName, *sv.Spec.Keeper)
require.Equal(t, keeperName, sv.Status.Keeper)
break
}
}
@@ -117,14 +117,14 @@ func Test_SecureValueMetadataStorage_CreateAndRead(t *testing.T) {
Spec: secretv1beta1.SecureValueSpec{
Description: "test description 2",
Value: ptr.To(secretv1beta1.NewExposedSecureValue("test-value-2")),
Keeper: &keeperName,
},
Status: secretv1beta1.SecureValueStatus{Keeper: keeperName},
}
testSecureValue.Name = "sv-test-2"
testSecureValue.Namespace = "default"
// Create the secure value
createdSecureValue, err := secureValueStorage.Create(ctx, testSecureValue, "testuser")
createdSecureValue, err := secureValueStorage.Create(ctx, keeperName, testSecureValue, "testuser")
require.NoError(t, err)
require.NotNil(t, createdSecureValue)
@@ -199,8 +199,8 @@ func TestPropertySecureValueMetadataStorage(t *testing.T) {
t.Repeat(map[string]func(*rapid.T){
"create": func(t *rapid.T) {
sv := anySecureValueGen.Draw(t, "sv")
modelCreatedSv, modelErr := model.create(sut.Clock.Now(), deepCopy(sv))
createdSv, err := sut.CreateSv(t.Context(), testutils.CreateSvWithSv(deepCopy(sv)))
modelCreatedSv, modelErr := model.create(sut.Clock.Now(), sv.DeepCopy())
createdSv, err := sut.CreateSv(t.Context(), testutils.CreateSvWithSv(sv.DeepCopy()))
if err != nil || modelErr != nil {
require.ErrorIs(t, err, modelErr)
return
@@ -211,7 +211,7 @@ func TestPropertySecureValueMetadataStorage(t *testing.T) {
},
"delete": func(t *rapid.T) {
ns := namespaceGen.Draw(t, "ns")
name := nameGen.Draw(t, "name")
name := secureValueNameGen.Draw(t, "name")
modelSv, modelErr := model.delete(ns, name)
sv, err := sut.DeleteSv(t.Context(), ns, name)
if err != nil || modelErr != nil {
+221 -38
View File
@@ -6,7 +6,6 @@ import (
"testing"
"time"
"github.com/mitchellh/copystructure"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
@@ -26,9 +25,16 @@ type modelSecureValue struct {
leaseCreated time.Time
}
type modelKeeper struct {
namespace string
name string
active bool
}
// A simplified model of the grafana secrets manager
type model struct {
secureValues []*modelSecureValue
keepers []*modelKeeper
}
func newModel() *model {
@@ -73,24 +79,98 @@ func (m *model) readActiveVersion(namespace, name string) *modelSecureValue {
}
func (m *model) create(now time.Time, sv *secretv1beta1.SecureValue) (*secretv1beta1.SecureValue, error) {
modelSv := &modelSecureValue{SecureValue: sv, active: false, created: now}
keeper := m.getActiveKeeper(sv.Namespace)
sv = sv.DeepCopy()
// Preserve the original creation time if this secure value already exists
created := now
if sv := m.readActiveVersion(sv.Namespace, sv.Name); sv != nil {
created = sv.created
}
modelSv := &modelSecureValue{SecureValue: sv, active: false, created: created}
modelSv.Status.Version = m.getNewVersionNumber(modelSv.Namespace, modelSv.Name)
modelSv.Status.ExternalID = fmt.Sprintf("%d", modelSv.Status.Version)
modelSv.Status.Keeper = keeper.name
m.secureValues = append(m.secureValues, modelSv)
m.setVersionToActive(modelSv.Namespace, modelSv.Name, modelSv.Status.Version)
return modelSv.SecureValue, nil
}
func (m *model) getActiveKeeper(namespace string) *modelKeeper {
for _, k := range m.keepers {
if k.namespace == namespace && k.active {
return k
}
}
// Default to the system keeper when there are no active keepers in the namespace
return &modelKeeper{namespace: namespace, name: contracts.SystemKeeperName, active: true}
}
func (m *model) keeperExists(namespace, name string) bool {
return m.findKeeper(namespace, name) != nil
}
func (m *model) findKeeper(namespace, name string) *modelKeeper {
// The system keeper is not in the list of keepers
if name == contracts.SystemKeeperName {
return &modelKeeper{namespace: namespace, name: contracts.SystemKeeperName, active: true}
}
for _, k := range m.keepers {
if k.namespace == namespace && k.name == name {
return k
}
}
return nil
}
func (m *model) createKeeper(keeper *secretv1beta1.Keeper) (*secretv1beta1.Keeper, error) {
if m.keeperExists(keeper.Namespace, keeper.Name) {
return nil, contracts.ErrKeeperAlreadyExists
}
m.keepers = append(m.keepers, &modelKeeper{namespace: keeper.Namespace, name: keeper.Name})
return keeper.DeepCopy(), nil
}
func (m *model) setKeeperAsActive(namespace, keeperName string) error {
keeper := m.findKeeper(namespace, keeperName)
if keeper == nil {
return contracts.ErrKeeperNotFound
}
// Set the keeper as active
keeper.active = true
// Set every other keeper in the namespace as inactive
for _, k := range m.keepers {
if k.namespace == namespace && k.name != keeperName {
k.active = false
}
}
return nil
}
func (m *model) update(now time.Time, newSecureValue *secretv1beta1.SecureValue) (*secretv1beta1.SecureValue, bool, error) {
sv := m.readActiveVersion(newSecureValue.Namespace, newSecureValue.Name)
if sv == nil {
return nil, false, contracts.ErrSecureValueNotFound
}
// If the keeper doesn't exist, return an error
if !m.keeperExists(sv.Namespace, sv.Status.Keeper) {
return nil, false, contracts.ErrKeeperNotFound
}
// If the payload doesn't contain a value, get the value from current version
if newSecureValue.Spec.Value == nil {
sv := m.readActiveVersion(newSecureValue.Namespace, newSecureValue.Name)
if sv == nil {
return nil, false, contracts.ErrSecureValueNotFound
}
newSecureValue.Spec.Value = sv.Spec.Value
}
createdSv, err := m.create(now, newSecureValue)
return createdSv, true, err
}
@@ -122,7 +202,7 @@ func (m *model) decrypt(decrypter, namespace, name string) (map[string]decrypt.D
v.active {
if slices.ContainsFunc(v.Spec.Decrypters, func(d string) bool { return d == decrypter }) {
return map[string]decrypt.DecryptResult{
name: decrypt.NewDecryptResultValue(deepCopy(v).Spec.Value),
name: decrypt.NewDecryptResultValue(v.DeepCopy().Spec.Value),
}, nil
}
@@ -161,13 +241,14 @@ func (m *model) leaseInactiveSecureValues(now time.Time, minAge, leaseTTL time.D
}
var (
decryptersGen = rapid.SampledFrom([]string{"svc1", "svc2", "svc3", "svc4", "svc5"})
nameGen = rapid.SampledFrom([]string{"n1", "n2", "n3", "n4", "n5"})
namespaceGen = rapid.SampledFrom([]string{"ns1", "ns2", "ns3", "ns4", "ns5"})
anySecureValueGen = rapid.Custom(func(t *rapid.T) *secretv1beta1.SecureValue {
decryptersGen = rapid.SampledFrom([]string{"svc1", "svc2", "svc3", "svc4", "svc5"})
secureValueNameGen = rapid.SampledFrom([]string{"n1", "n2", "n3", "n4", "n5"})
keeperNameGen = rapid.SampledFrom([]string{"k1", "k2", "k3", "k4", "k5"})
namespaceGen = rapid.SampledFrom([]string{"ns1", "ns2", "ns3", "ns4", "ns5"})
anySecureValueGen = rapid.Custom(func(t *rapid.T) *secretv1beta1.SecureValue {
return &secretv1beta1.SecureValue{
ObjectMeta: metav1.ObjectMeta{
Name: nameGen.Draw(t, "name"),
Name: secureValueNameGen.Draw(t, "name"),
Namespace: namespaceGen.Draw(t, "ns"),
},
Spec: secretv1beta1.SecureValueSpec{
@@ -191,10 +272,37 @@ var (
decryptGen = rapid.Custom(func(t *rapid.T) decryptInput {
return decryptInput{
namespace: namespaceGen.Draw(t, "ns"),
name: nameGen.Draw(t, "name"),
name: secureValueNameGen.Draw(t, "name"),
decrypter: decryptersGen.Draw(t, "decrypter"),
}
})
anyKeeperGen = rapid.Custom(func(t *rapid.T) *secretv1beta1.Keeper {
spec := secretv1beta1.KeeperSpec{
Description: rapid.String().Draw(t, "description"),
}
keeperType := rapid.SampledFrom([]string{"isAwsKeeper", "isAzureKeeper", "isGcpKeeper", "isVaultKeeper"}).Draw(t, "keeperType")
switch keeperType {
case "isAwsKeeper":
spec.Aws = &secretv1beta1.KeeperAWSConfig{}
case "isAzureKeeper":
spec.Azure = &secretv1beta1.KeeperAzureConfig{}
case "isGcpKeeper":
spec.Gcp = &secretv1beta1.KeeperGCPConfig{}
case "isVaultKeeper":
spec.HashiCorpVault = &secretv1beta1.KeeperHashiCorpConfig{}
default:
panic(fmt.Sprintf("unhandled keeper type '%+v', did you forget a switch case?", keeperType))
}
return &secretv1beta1.Keeper{
ObjectMeta: metav1.ObjectMeta{
Name: keeperNameGen.Draw(t, "name"),
Namespace: namespaceGen.Draw(t, "ns"),
},
Spec: spec,
}
})
)
type decryptInput struct {
@@ -226,14 +334,14 @@ func TestModel(t *testing.T) {
now := time.Now()
// Create a secure value
sv1, err := m.create(now, deepCopy(sv))
sv1, err := m.create(now, sv.DeepCopy())
require.NoError(t, err)
require.Equal(t, sv.Namespace, sv1.Namespace)
require.Equal(t, sv.Name, sv1.Name)
require.EqualValues(t, 1, sv1.Status.Version)
// Create a new version of a secure value
sv2, err := m.create(now, deepCopy(sv))
sv2, err := m.create(now, sv.DeepCopy())
require.NoError(t, err)
require.Equal(t, sv.Namespace, sv2.Namespace)
require.Equal(t, sv.Name, sv2.Name)
@@ -247,30 +355,29 @@ func TestModel(t *testing.T) {
now := time.Now()
sv1, err := m.create(now, deepCopy(sv))
sv1, err := m.create(now, sv.DeepCopy())
require.NoError(t, err)
// Create a new version of a secure value by updating it
sv2, _, err := m.update(now, deepCopy(sv1))
sv2, _, err := m.update(now, sv1.DeepCopy())
require.NoError(t, err)
require.Equal(t, sv.Namespace, sv2.Namespace)
require.Equal(t, sv.Name, sv2.Name)
require.EqualValues(t, 2, sv2.Status.Version)
// Try updating a secure value that doesn't exist without specifying a value for it
sv3 := deepCopy(sv2)
sv3 := sv2.DeepCopy()
sv3.Name = "i_dont_exist"
sv3.Spec.Value = nil
_, _, err = m.update(now, sv3)
require.ErrorIs(t, err, contracts.ErrSecureValueNotFound)
// Updating a value that doesn't exist creates a new version
sv4 := deepCopy(sv3)
sv4 := sv3.DeepCopy()
sv4.Name = "i_dont_exist"
sv4.Spec.Value = ptr.To(secretv1beta1.NewExposedSecureValue("sv4"))
sv4, _, err = m.update(now, sv4)
require.NoError(t, err)
require.EqualValues(t, 1, sv4.Status.Version)
_, _, err = m.update(now, sv4)
require.ErrorIs(t, err, contracts.ErrSecureValueNotFound)
})
t.Run("deleting a secure value", func(t *testing.T) {
@@ -279,7 +386,7 @@ func TestModel(t *testing.T) {
m := newModel()
now := time.Now()
sv1, err := m.create(now, deepCopy(sv))
sv1, err := m.create(now, sv.DeepCopy())
require.NoError(t, err)
// Deleting a secure value
@@ -306,7 +413,7 @@ func TestModel(t *testing.T) {
require.Equal(t, 0, len(list.Items))
// Create a secure value
sv1, err := m.create(now, deepCopy(sv))
sv1, err := m.create(now, sv.DeepCopy())
require.NoError(t, err)
// 1 secure value exists and it should be returned
@@ -333,7 +440,7 @@ func TestModel(t *testing.T) {
// Create a secure value
secret := "v1"
sv1, err := m.create(now, deepCopy(sv))
sv1, err := m.create(now, sv.DeepCopy())
require.NoError(t, err)
// Decrypt the just created secure value
@@ -358,9 +465,9 @@ func TestStateMachine(t *testing.T) {
"create": func(t *rapid.T) {
sv := anySecureValueGen.Draw(t, "sv")
modelCreatedSv, modelErr := model.create(sut.Clock.Now(), deepCopy(sv))
modelCreatedSv, modelErr := model.create(sut.Clock.Now(), sv.DeepCopy())
createdSv, err := sut.CreateSv(t.Context(), testutils.CreateSvWithSv(deepCopy(sv)))
createdSv, err := sut.CreateSv(t.Context(), testutils.CreateSvWithSv(sv.DeepCopy()))
if err != nil || modelErr != nil {
require.ErrorIs(t, err, modelErr)
return
@@ -371,8 +478,8 @@ func TestStateMachine(t *testing.T) {
},
"update": func(t *rapid.T) {
sv := updateSecureValueGen.Draw(t, "sv")
modelCreatedSv, _, modelErr := model.update(sut.Clock.Now(), deepCopy(sv))
createdSv, err := sut.UpdateSv(t.Context(), deepCopy(sv))
modelCreatedSv, _, modelErr := model.update(sut.Clock.Now(), sv.DeepCopy())
createdSv, err := sut.UpdateSv(t.Context(), sv.DeepCopy())
if err != nil || modelErr != nil {
require.ErrorIs(t, err, modelErr)
return
@@ -412,7 +519,8 @@ func TestStateMachine(t *testing.T) {
if !slices.ContainsFunc(list.Items, func(v2 secretv1beta1.SecureValue) bool {
return v2.Namespace == v1.Namespace && v2.Name == v1.Name && v2.Status.Version == v1.Status.Version
}) {
t.Fatalf("expected sut to return secure value ns=%+v name=%+v version=%+v in the result", v1.Namespace, v1.Name, v1.Status.Version)
t.Fatalf("expected sut to return secure value ns=%+v name=%+v version=%+v in the result",
v1.Namespace, v1.Name, v1.Status.Version)
}
}
},
@@ -439,8 +547,28 @@ func TestStateMachine(t *testing.T) {
require.Equal(t, len(modelResult), len(result))
for name := range modelResult {
require.Equal(t, modelResult[name].Value(), result[name].Value())
require.Equal(t, modelResult[name].Error(), result[name].Error())
require.Equal(t, modelResult[name].Value(), result[name].Value())
}
},
"createKeeper": func(t *rapid.T) {
input := anyKeeperGen.Draw(t, "keeper")
modelKeeper, modelErr := model.createKeeper(input)
keeper, err := sut.KeeperMetadataStorage.Create(t.Context(), input, "actor-uid")
if err != nil || modelErr != nil {
require.ErrorIs(t, err, modelErr)
return
}
require.Equal(t, modelKeeper.Name, keeper.Name)
},
"setKeeperAsActive": func(t *rapid.T) {
namespace := namespaceGen.Draw(t, "namespace")
keeper := keeperNameGen.Draw(t, "keeper")
modelErr := model.setKeeperAsActive(namespace, keeper)
err := sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(namespace), keeper)
if err != nil || modelErr != nil {
require.ErrorIs(t, err, modelErr)
return
}
},
})
@@ -471,12 +599,67 @@ func TestSecureValueServiceExampleBased(t *testing.T) {
require.Equal(t, 1, len(result))
require.ErrorIs(t, result[sv.Name].Error(), contracts.ErrDecryptNotFound)
})
}
func deepCopy[T any](sv T) T {
copied, err := copystructure.Copy(sv)
if err != nil {
panic(fmt.Sprintf("failed to copy secure value: %v", err))
}
return copied.(T)
t.Run("should be able to use secrets that were created with a keeper that's inactive", func(t *testing.T) {
t.Parallel()
sut := testutils.Setup(t)
// - Create a secret with k1
k1, err := sut.KeeperMetadataStorage.Create(t.Context(), &secretv1beta1.Keeper{
ObjectMeta: metav1.ObjectMeta{
Namespace: "n1",
Name: "k1",
},
Spec: secretv1beta1.KeeperSpec{
Description: "description",
Aws: &secretv1beta1.KeeperAWSConfig{},
},
}, "actor-uid")
require.NoError(t, err)
require.NoError(t, sut.SecureValueService.SetKeeperAsActive(t.Context(), xkube.Namespace(k1.Namespace), k1.Name))
value := secretv1beta1.NewExposedSecureValue("v1")
sv1, err := sut.CreateSv(t.Context(), testutils.CreateSvWithSv(&secretv1beta1.SecureValue{
ObjectMeta: metav1.ObjectMeta{Namespace: k1.Namespace, Name: "s1"},
Spec: secretv1beta1.SecureValueSpec{
Description: "desc",
Value: &value,
},
}))
require.NoError(t, err)
require.Equal(t, k1.Name, sv1.Status.Keeper)
// - Set a new keeper as active
k2, err := sut.KeeperMetadataStorage.Create(t.Context(), &secretv1beta1.Keeper{
ObjectMeta: metav1.ObjectMeta{
Namespace: "n1",
Name: "k2",
},
Spec: secretv1beta1.KeeperSpec{
Description: "description",
Aws: &secretv1beta1.KeeperAWSConfig{},
},
}, "actor-uid")
require.NoError(t, err)
require.NoError(t, sut.SecureValueService.SetKeeperAsActive(t.Context(), xkube.Namespace(k2.Namespace), k2.Name))
// - Read secure value created with inactive keeper
readSv, err := sut.SecureValueService.Read(t.Context(), xkube.Namespace(sv1.Namespace), sv1.Name)
require.NoError(t, err)
require.Equal(t, sv1.Namespace, readSv.Namespace)
require.Equal(t, sv1.Name, readSv.Name)
require.Equal(t, k1.Name, readSv.Status.Keeper)
// - Update secure value created with inactive keeper
newSv1 := sv1.DeepCopy()
newSv1.Spec.Description = "updated desc"
updatedSv, _, err := sut.SecureValueService.Update(t.Context(), newSv1, "actor-uid")
require.NoError(t, err)
require.Equal(t, sv1.Namespace, updatedSv.Namespace)
require.Equal(t, sv1.Name, updatedSv.Name)
require.Equal(t, k1.Name, updatedSv.Status.Keeper)
require.Equal(t, newSv1.Spec.Description, updatedSv.Spec.Description)
})
}
@@ -0,0 +1,20 @@
SELECT
`guid`,
`name`,
`namespace`,
`annotations`,
`labels`,
`created`,
`created_by`,
`updated`,
`updated_by`,
`description`,
`type`,
`payload`
FROM
`secret_keeper`
WHERE
`namespace` = 'ns' AND
`active` = true
LIMIT 1
;
@@ -0,0 +1,5 @@
UPDATE `secret_keeper`
SET `active` = (`name` = 'name')
WHERE
`namespace` = 'ns'
;
@@ -0,0 +1,20 @@
SELECT
"guid",
"name",
"namespace",
"annotations",
"labels",
"created",
"created_by",
"updated",
"updated_by",
"description",
"type",
"payload"
FROM
"secret_keeper"
WHERE
"namespace" = 'ns' AND
"active" = true
LIMIT 1
;
@@ -0,0 +1,5 @@
UPDATE "secret_keeper"
SET "active" = ("name" = 'name')
WHERE
"namespace" = 'ns'
;

Some files were not shown because too many files have changed in this diff Show More