Compare commits

..

16 Commits

Author SHA1 Message Date
Todd Treece
07bb48e874 add storage test 2025-12-11 21:22:28 -05:00
Will Browne
306186c4ea Merge branch 'main' into wb/pluginmeta-local 2025-12-11 16:59:33 +00:00
Will Browne
a28c70bbcc fix linter 2025-12-11 16:06:27 +00:00
Will Browne
1ebcd2319a undo var name change 2025-12-11 15:55:17 +00:00
Will Browne
5dc3767854 more tidying 2025-12-11 15:48:26 +00:00
Will Browne
040dbfb5e3 tidy 2025-12-11 15:34:55 +00:00
Will Browne
32d43f5b5d add gen files 2025-12-11 14:51:09 +00:00
Will Browne
fef9c760a0 merge with main 2025-12-11 14:48:58 +00:00
Will Browne
1fe9a38a2a add angular + translations 2025-11-27 15:04:20 +00:00
Will Browne
59bf7896f4 Merge branch 'main' into wb/pluginmeta-local 2025-11-27 10:40:45 +00:00
Will Browne
4b4ad544a8 Merge branch 'main' into wb/pluginmeta-local 2025-11-26 16:08:53 +00:00
Will Browne
7e3289f2c9 Merge branch 'main' into wb/pluginmeta-local 2025-11-26 12:24:17 +00:00
Will Browne
0d0b5b757b fix test + lint issues 2025-11-26 12:14:50 +00:00
Will Browne
c49261cce2 fix another test 2025-11-26 11:54:12 +00:00
Will Browne
d5efce72f3 fix tests 2025-11-26 11:48:13 +00:00
Will Browne
881c81f0b3 flesh out local for demo 2025-11-26 11:28:16 +00:00
242 changed files with 3077 additions and 9380 deletions

24
.github/CODEOWNERS vendored
View File

@@ -77,11 +77,11 @@
/.air.toml @macabu
# Git Sync / App Platform Provisioning
/apps/provisioning/ @grafana/grafana-app-platform-squad
/pkg/operators @grafana/grafana-app-platform-squad
/public/app/features/provisioning @grafana/grafana-search-navigate-organise
/pkg/registry/apis/provisioning @grafana/grafana-app-platform-squad
/pkg/tests/apis/provisioning @grafana/grafana-app-platform-squad
/apps/provisioning/ @grafana/grafana-git-ui-sync-team
/pkg/operators @grafana/grafana-git-ui-sync-team
/public/app/features/provisioning @grafana/grafana-git-ui-sync-team
/pkg/registry/apis/provisioning @grafana/grafana-git-ui-sync-team
/pkg/tests/apis/provisioning @grafana/grafana-git-ui-sync-team
# Git Sync frontend owned by frontend team as a whole.
/apps/alerting/ @grafana/alerting-backend
@@ -753,7 +753,7 @@ i18next.config.ts @grafana/grafana-frontend-platform
/packages/grafana-api-clients/src/clients/rtkq/iam/ @grafana/access-squad @grafana/identity-squad
/packages/grafana-api-clients/src/clients/rtkq/logsdrilldown/ @grafana/observability-logs
/packages/grafana-api-clients/src/clients/rtkq/preferences/ @grafana/plugins-platform-frontend
/packages/grafana-api-clients/src/clients/rtkq/provisioning/ @grafana/grafana-search-navigate-organise
/packages/grafana-api-clients/src/clients/rtkq/provisioning/ @grafana/grafana-git-ui-sync-team
/packages/grafana-api-clients/src/clients/rtkq/shorturl/ @grafana/sharing-squad
# root files, mostly frontend
@@ -1084,7 +1084,7 @@ playwright.storybook.config.ts @grafana/grafana-frontend-platform
eslint-suppressions.json @grafanabot
# Design system
/public/img/icons/unicons/ @grafana/product-design-engineering
/public/img/icons/unicons/ @grafana/design-system
# Core datasources
/public/app/plugins/datasource/dashboard/ @grafana/dashboards-squad
@@ -1260,11 +1260,11 @@ embed.go @grafana/grafana-as-code
/.github/workflows/stale.yml @grafana/grafana-developer-enablement-squad
/.github/workflows/storybook-a11y.yml @grafana/grafana-frontend-platform
/.github/workflows/update-make-docs.yml @grafana/docs-tooling
/.github/workflows/scripts/kinds/verify-kinds.go @grafana/grafana-app-platform-squad
/.github/workflows/scripts/kinds/verify-kinds.go @grafana/platform-monitoring
/.github/workflows/scripts/create-security-branch/create-security-branch.sh @grafana/grafana-developer-enablement-squad
/.github/workflows/publish-kinds-next.yml @grafana/grafana-app-platform-squad
/.github/workflows/publish-kinds-release.yml @grafana/grafana-app-platform-squad
/.github/workflows/verify-kinds.yml @grafana/grafana-app-platform-squad
/.github/workflows/publish-kinds-next.yml @grafana/platform-monitoring
/.github/workflows/publish-kinds-release.yml @grafana/platform-monitoring
/.github/workflows/verify-kinds.yml @grafana/platform-monitoring
/.github/workflows/dashboards-issue-add-label.yml @grafana/dashboards-squad
/.github/workflows/run-schema-v2-e2e.yml @grafana/dashboards-squad
/.github/workflows/run-dashboard-search-e2e.yml @grafana/grafana-search-and-storage
@@ -1325,7 +1325,7 @@ embed.go @grafana/grafana-as-code
/conf/provisioning/dashboards/ @grafana/dashboards-squad
/conf/provisioning/datasources/ @grafana/plugins-platform-backend
/conf/provisioning/plugins/ @grafana/plugins-platform-backend
/conf/provisioning/sample/ @grafana/grafana-app-platform-squad
/conf/provisioning/sample/ @grafana/grafana-git-ui-sync-team
# Security
/relyance.yaml @grafana/security-team

View File

@@ -1603,6 +1603,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1670,6 +1671,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"hide": false,
"max": 98,
"min": 5,
"noise": 22,
@@ -1687,6 +1689,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1754,6 +1757,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"hide": false,
"max": 98,
"min": 5,
"noise": 22,
@@ -1784,6 +1788,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1852,6 +1857,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"hide": false,
"max": 8,
"min": 1,
"noise": 2,
@@ -1869,6 +1875,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1937,6 +1944,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"hide": false,
"max": 12,
"min": 1,
"noise": 2,
@@ -1954,6 +1962,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2021,6 +2030,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"hide": false,
"max": 100,
"min": 10,
"noise": 22,
@@ -2038,6 +2048,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2105,6 +2116,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"hide": false,
"max": 100,
"min": 10,
"noise": 22,
@@ -2117,147 +2129,6 @@
],
"title": "Backend",
"type": "radialbar"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 66
},
"id": 35,
"panels": [],
"title": "Empty data",
"type": "row"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 67
},
"id": 36,
"options": {
"barWidthFactor": 0.5,
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true,
"rounded": false,
"spotlight": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "gauge",
"showThresholdLabels": false,
"showThresholdMarkers": true,
"sparkline": true
},
"pluginVersion": "13.0.0-pre",
"targets": [
{
"refId": "A",
"scenarioId": "random_walk",
"seriesCount": 0
}
],
"title": "Numeric, no series",
"type": "gauge"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 6,
"y": 67
},
"id": 37,
"options": {
"barWidthFactor": 0.5,
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true,
"rounded": false,
"spotlight": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "gauge",
"showThresholdLabels": false,
"showThresholdMarkers": true,
"sparkline": true
},
"pluginVersion": "13.0.0-pre",
"targets": [
{
"refId": "A",
"scenarioId": "logs"
}
],
"title": "Non-numeric",
"type": "gauge"
}
],
"preload": false,

View File

@@ -22,40 +22,13 @@ v0alpha1: {
serviceaccountv0alpha1,
externalGroupMappingv0alpha1
]
routes: {
namespaced: {
"/searchUsers": {
"GET": {
request: {
query: {
query?: string
limit?: int64 | 10
offset?: int64 | 0
page?: int64 | 1
}
}
response: {
offset: int64
totalHits: int64
hits: [...#UserHit]
queryCost: float64
maxScore: float64
}
responseMetadata: {
typeMeta: false
objectMeta: false
}
}
}
"/searchTeams": {
"GET": {
request: {
query: {
query?: string
limit?: int64 | 50
offset?: int64 | 0
page?: int64 | 1
}
}
response: {
@@ -78,15 +51,3 @@ v0alpha1: {
}
}
}
#UserHit: {
name: string
title: string
login: string
email: string
role: string
lastSeenAt: int64
lastSeenAtAge: string
provisioned: bool
score: float64
}

View File

@@ -29,9 +29,6 @@ userv0alpha1: userKind & {
// }
schema: {
spec: v0alpha1.UserSpec
status: {
lastSeenAt: int64 | 0
}
}
// TODO: Uncomment when the custom routes implementation is done
// routes: {

View File

@@ -3,10 +3,7 @@
package v0alpha1
type GetSearchTeamsRequestParams struct {
Query *string `json:"query,omitempty"`
Limit int64 `json:"limit,omitempty"`
Offset int64 `json:"offset,omitempty"`
Page int64 `json:"page,omitempty"`
Query *string `json:"query,omitempty"`
}
// NewGetSearchTeamsRequestParams creates a new GetSearchTeamsRequestParams object.

View File

@@ -1,33 +0,0 @@
// Code generated - EDITING IS FUTILE. DO NOT EDIT.
package v0alpha1
import (
"github.com/grafana/grafana-app-sdk/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
type GetSearchUsersRequestParamsObject struct {
metav1.TypeMeta `json:",inline"`
GetSearchUsersRequestParams `json:",inline"`
}
func NewGetSearchUsersRequestParamsObject() *GetSearchUsersRequestParamsObject {
return &GetSearchUsersRequestParamsObject{}
}
func (o *GetSearchUsersRequestParamsObject) DeepCopyObject() runtime.Object {
dst := NewGetSearchUsersRequestParamsObject()
o.DeepCopyInto(dst)
return dst
}
func (o *GetSearchUsersRequestParamsObject) DeepCopyInto(dst *GetSearchUsersRequestParamsObject) {
dst.TypeMeta.APIVersion = o.TypeMeta.APIVersion
dst.TypeMeta.Kind = o.TypeMeta.Kind
dstGetSearchUsersRequestParams := GetSearchUsersRequestParams{}
_ = resource.CopyObjectInto(&dstGetSearchUsersRequestParams, &o.GetSearchUsersRequestParams)
}
var _ runtime.Object = NewGetSearchUsersRequestParamsObject()

View File

@@ -1,15 +0,0 @@
// Code generated - EDITING IS FUTILE. DO NOT EDIT.
package v0alpha1
type GetSearchUsersRequestParams struct {
Query *string `json:"query,omitempty"`
Limit int64 `json:"limit,omitempty"`
Offset int64 `json:"offset,omitempty"`
Page int64 `json:"page,omitempty"`
}
// NewGetSearchUsersRequestParams creates a new GetSearchUsersRequestParams object.
func NewGetSearchUsersRequestParams() *GetSearchUsersRequestParams {
return &GetSearchUsersRequestParams{}
}

View File

@@ -1,37 +0,0 @@
// Code generated - EDITING IS FUTILE. DO NOT EDIT.
package v0alpha1
// +k8s:openapi-gen=true
type UserHit struct {
Name string `json:"name"`
Title string `json:"title"`
Login string `json:"login"`
Email string `json:"email"`
Role string `json:"role"`
LastSeenAt int64 `json:"lastSeenAt"`
LastSeenAtAge string `json:"lastSeenAtAge"`
Provisioned bool `json:"provisioned"`
Score float64 `json:"score"`
}
// NewUserHit creates a new UserHit object.
func NewUserHit() *UserHit {
return &UserHit{}
}
// +k8s:openapi-gen=true
type GetSearchUsers struct {
Offset int64 `json:"offset"`
TotalHits int64 `json:"totalHits"`
Hits []UserHit `json:"hits"`
QueryCost float64 `json:"queryCost"`
MaxScore float64 `json:"maxScore"`
}
// NewGetSearchUsers creates a new GetSearchUsers object.
func NewGetSearchUsers() *GetSearchUsers {
return &GetSearchUsers{
Hits: []UserHit{},
}
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"github.com/grafana/grafana-app-sdk/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type UserClient struct {
@@ -76,24 +75,6 @@ func (c *UserClient) Patch(ctx context.Context, identifier resource.Identifier,
return c.client.Patch(ctx, identifier, req, opts)
}
func (c *UserClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus UserStatus, opts resource.UpdateOptions) (*User, error) {
return c.client.Update(ctx, &User{
TypeMeta: metav1.TypeMeta{
Kind: UserKind().Kind(),
APIVersion: GroupVersion.Identifier(),
},
ObjectMeta: metav1.ObjectMeta{
ResourceVersion: opts.ResourceVersion,
Namespace: identifier.Namespace,
Name: identifier.Name,
},
Status: newStatus,
}, resource.UpdateOptions{
Subresource: "status",
ResourceVersion: opts.ResourceVersion,
})
}
func (c *UserClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error {
return c.client.Delete(ctx, identifier, opts)
}

View File

@@ -21,14 +21,11 @@ type User struct {
// Spec is the spec of the User
Spec UserSpec `json:"spec" yaml:"spec"`
Status UserStatus `json:"status" yaml:"status"`
}
func NewUser() *User {
return &User{
Spec: *NewUserSpec(),
Status: *NewUserStatus(),
Spec: *NewUserSpec(),
}
}
@@ -46,15 +43,11 @@ func (o *User) SetSpec(spec any) error {
}
func (o *User) GetSubresources() map[string]any {
return map[string]any{
"status": o.Status,
}
return map[string]any{}
}
func (o *User) GetSubresource(name string) (any, bool) {
switch name {
case "status":
return o.Status, true
default:
return nil, false
}
@@ -62,13 +55,6 @@ func (o *User) GetSubresource(name string) (any, bool) {
func (o *User) SetSubresource(name string, value any) error {
switch name {
case "status":
cast, ok := value.(UserStatus)
if !ok {
return fmt.Errorf("cannot set status type %#v, not of type UserStatus", value)
}
o.Status = cast
return nil
default:
return fmt.Errorf("subresource '%s' does not exist", name)
}
@@ -240,7 +226,6 @@ func (o *User) DeepCopyInto(dst *User) {
dst.TypeMeta.Kind = o.TypeMeta.Kind
o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta)
o.Spec.DeepCopyInto(&dst.Spec)
o.Status.DeepCopyInto(&dst.Status)
}
// Interface compliance compile-time check
@@ -312,15 +297,3 @@ func (s *UserSpec) DeepCopy() *UserSpec {
func (s *UserSpec) DeepCopyInto(dst *UserSpec) {
resource.CopyObjectInto(dst, s)
}
// DeepCopy creates a full deep copy of UserStatus
func (s *UserStatus) DeepCopy() *UserStatus {
cpy := &UserStatus{}
s.DeepCopyInto(cpy)
return cpy
}
// DeepCopyInto deep copies UserStatus into another UserStatus object
func (s *UserStatus) DeepCopyInto(dst *UserStatus) {
resource.CopyObjectInto(dst, s)
}

View File

@@ -2,12 +2,43 @@
package v0alpha1
// +k8s:openapi-gen=true
type UserstatusOperatorState struct {
// lastEvaluation is the ResourceVersion last evaluated
LastEvaluation string `json:"lastEvaluation"`
// state describes the state of the lastEvaluation.
// It is limited to three possible states for machine evaluation.
State UserStatusOperatorStateState `json:"state"`
// descriptiveState is an optional more descriptive state field which has no requirements on format
DescriptiveState *string `json:"descriptiveState,omitempty"`
// details contains any extra information that is operator-specific
Details map[string]interface{} `json:"details,omitempty"`
}
// NewUserstatusOperatorState creates a new UserstatusOperatorState object.
func NewUserstatusOperatorState() *UserstatusOperatorState {
return &UserstatusOperatorState{}
}
// +k8s:openapi-gen=true
type UserStatus struct {
LastSeenAt int64 `json:"lastSeenAt"`
// operatorStates is a map of operator ID to operator state evaluations.
// Any operator which consumes this kind SHOULD add its state evaluation information to this field.
OperatorStates map[string]UserstatusOperatorState `json:"operatorStates,omitempty"`
// additionalFields is reserved for future use
AdditionalFields map[string]interface{} `json:"additionalFields,omitempty"`
}
// NewUserStatus creates a new UserStatus object.
func NewUserStatus() *UserStatus {
return &UserStatus{}
}
// +k8s:openapi-gen=true
type UserStatusOperatorStateState string
const (
UserStatusOperatorStateStateSuccess UserStatusOperatorStateState = "success"
UserStatusOperatorStateStateInProgress UserStatusOperatorStateState = "in_progress"
UserStatusOperatorStateStateFailed UserStatusOperatorStateState = "failed"
)

View File

@@ -21,7 +21,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GetGroupsBody": schema_pkg_apis_iam_v0alpha1_GetGroupsBody(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GetSearchTeams": schema_pkg_apis_iam_v0alpha1_GetSearchTeams(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GetSearchTeamsBody": schema_pkg_apis_iam_v0alpha1_GetSearchTeamsBody(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GetSearchUsers": schema_pkg_apis_iam_v0alpha1_GetSearchUsers(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRole": schema_pkg_apis_iam_v0alpha1_GlobalRole(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBinding": schema_pkg_apis_iam_v0alpha1_GlobalRoleBinding(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBindingList": schema_pkg_apis_iam_v0alpha1_GlobalRoleBindingList(ref),
@@ -73,10 +72,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamStatus": schema_pkg_apis_iam_v0alpha1_TeamStatus(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamstatusOperatorState": schema_pkg_apis_iam_v0alpha1_TeamstatusOperatorState(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.User": schema_pkg_apis_iam_v0alpha1_User(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserHit": schema_pkg_apis_iam_v0alpha1_UserHit(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserList": schema_pkg_apis_iam_v0alpha1_UserList(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec": schema_pkg_apis_iam_v0alpha1_UserSpec(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserStatus": schema_pkg_apis_iam_v0alpha1_UserStatus(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserstatusOperatorState": schema_pkg_apis_iam_v0alpha1_UserstatusOperatorState(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.VersionsV0alpha1Kinds7RoutesGroupsGETResponseExternalGroupMapping": schema_pkg_apis_iam_v0alpha1_VersionsV0alpha1Kinds7RoutesGroupsGETResponseExternalGroupMapping(ref),
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.VersionsV0alpha1RoutesNamespacedSearchTeamsGETResponseTeamHit": schema_pkg_apis_iam_v0alpha1_VersionsV0alpha1RoutesNamespacedSearchTeamsGETResponseTeamHit(ref),
}
@@ -689,62 +688,6 @@ func schema_pkg_apis_iam_v0alpha1_GetSearchTeamsBody(ref common.ReferenceCallbac
}
}
func schema_pkg_apis_iam_v0alpha1_GetSearchUsers(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"offset": {
SchemaProps: spec.SchemaProps{
Default: 0,
Type: []string{"integer"},
Format: "int64",
},
},
"totalHits": {
SchemaProps: spec.SchemaProps{
Default: 0,
Type: []string{"integer"},
Format: "int64",
},
},
"hits": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserHit"),
},
},
},
},
},
"queryCost": {
SchemaProps: spec.SchemaProps{
Default: 0,
Type: []string{"number"},
Format: "double",
},
},
"maxScore": {
SchemaProps: spec.SchemaProps{
Default: 0,
Type: []string{"number"},
Format: "double",
},
},
},
Required: []string{"offset", "totalHits", "hits", "queryCost", "maxScore"},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserHit"},
}
}
func schema_pkg_apis_iam_v0alpha1_GlobalRole(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -2890,94 +2833,12 @@ func schema_pkg_apis_iam_v0alpha1_User(ref common.ReferenceCallback) common.Open
Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserStatus"),
},
},
},
Required: []string{"metadata", "spec", "status"},
Required: []string{"metadata", "spec"},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_iam_v0alpha1_UserHit(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"title": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"login": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"email": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"role": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"lastSeenAt": {
SchemaProps: spec.SchemaProps{
Default: 0,
Type: []string{"integer"},
Format: "int64",
},
},
"lastSeenAtAge": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"provisioned": {
SchemaProps: spec.SchemaProps{
Default: false,
Type: []string{"boolean"},
Format: "",
},
},
"score": {
SchemaProps: spec.SchemaProps{
Default: 0,
Type: []string{"number"},
Format: "double",
},
},
},
Required: []string{"name", "title", "login", "email", "role", "lastSeenAt", "lastSeenAtAge", "provisioned", "score"},
},
},
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
@@ -3104,15 +2965,90 @@ func schema_pkg_apis_iam_v0alpha1_UserStatus(ref common.ReferenceCallback) commo
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"lastSeenAt": {
"operatorStates": {
SchemaProps: spec.SchemaProps{
Default: 0,
Type: []string{"integer"},
Format: "int64",
Description: "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserstatusOperatorState"),
},
},
},
},
},
"additionalFields": {
SchemaProps: spec.SchemaProps{
Description: "additionalFields is reserved for future use",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
},
},
},
},
Required: []string{"lastSeenAt"},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserstatusOperatorState"},
}
}
func schema_pkg_apis_iam_v0alpha1_UserstatusOperatorState(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"lastEvaluation": {
SchemaProps: spec.SchemaProps{
Description: "lastEvaluation is the ResourceVersion last evaluated",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"state": {
SchemaProps: spec.SchemaProps{
Description: "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"descriptiveState": {
SchemaProps: spec.SchemaProps{
Description: "descriptiveState is an optional more descriptive state field which has no requirements on format",
Type: []string{"string"},
Format: "",
},
},
"details": {
SchemaProps: spec.SchemaProps{
Description: "details contains any extra information that is operator-specific",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
},
},
},
},
Required: []string{"lastEvaluation", "state"},
},
},
}

View File

@@ -173,36 +173,6 @@ var appManifestData = app.ManifestData{
Parameters: []*spec3.Parameter{
{
ParameterProps: spec3.ParameterProps{
Name: "limit",
In: "query",
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{},
},
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "offset",
In: "query",
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{},
},
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "page",
In: "query",
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{},
},
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "query",
@@ -291,118 +261,6 @@ var appManifestData = app.ManifestData{
},
},
},
"/searchUsers": {
Get: &spec3.Operation{
OperationProps: spec3.OperationProps{
OperationId: "getSearchUsers",
Parameters: []*spec3.Parameter{
{
ParameterProps: spec3.ParameterProps{
Name: "limit",
In: "query",
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{},
},
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "offset",
In: "query",
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{},
},
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "page",
In: "query",
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{},
},
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "query",
In: "query",
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
},
},
},
Responses: &spec3.Responses{
ResponsesProps: spec3.ResponsesProps{
Default: &spec3.Response{
ResponseProps: spec3.ResponseProps{
Description: "Default OK response",
Content: map[string]*spec3.MediaType{
"application/json": {
MediaTypeProps: spec3.MediaTypeProps{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"hits": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: spec.MustCreateRef("#/components/schemas/getSearchUsersUserHit"),
}},
},
},
},
"maxScore": {
SchemaProps: spec.SchemaProps{
Type: []string{"number"},
},
},
"offset": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
},
},
"queryCost": {
SchemaProps: spec.SchemaProps{
Type: []string{"number"},
},
},
"totalHits": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
},
},
},
Required: []string{
"offset",
"totalHits",
"hits",
"queryCost",
"maxScore",
},
}},
}},
},
},
},
}},
},
},
},
},
Cluster: map[string]spec3.PathProps{},
Schemas: map[string]spec.Schema{
@@ -445,69 +303,6 @@ var appManifestData = app.ManifestData{
},
},
},
"getSearchUsersUserHit": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"email": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
"lastSeenAt": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
},
},
"lastSeenAtAge": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
"login": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
"name": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
"provisioned": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
},
},
"role": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
"score": {
SchemaProps: spec.SchemaProps{
Type: []string{"number"},
},
},
"title": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
},
},
},
Required: []string{
"name",
"title",
"login",
"email",
"role",
"lastSeenAt",
"lastSeenAtAge",
"provisioned",
"score",
},
},
},
},
},
},
@@ -547,7 +342,6 @@ var customRouteToGoResponseType = map[string]any{
"v0alpha1|Team|groups|GET": v0alpha1.GetGroups{},
"v0alpha1||<namespace>/searchTeams|GET": v0alpha1.GetSearchTeams{},
"v0alpha1||<namespace>/searchUsers|GET": v0alpha1.GetSearchUsers{},
}
// ManifestCustomRouteResponsesAssociator returns the associated response go type for a given kind, version, custom route path, and method, if one exists.

View File

@@ -24,6 +24,7 @@ require (
require (
cel.dev/expr v0.24.0 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/ProtonMail/go-crypto v1.1.6 // indirect
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/apache/arrow-go/v18 v18.4.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
@@ -35,16 +36,21 @@ require (
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 // indirect
github.com/aws/smithy-go v1.23.1 // indirect
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/bluele/gcache v0.0.2 // indirect
github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 // indirect
github.com/bwmarrin/snowflake v0.3.0 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cheekybits/genny v1.0.0 // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/diegoholiveira/jsonlogic/v3 v3.7.4 // indirect
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
@@ -132,11 +138,15 @@ require (
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/nikunjy/rules v1.5.0 // indirect
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect
github.com/oklog/run v1.1.0 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/open-feature/go-sdk v1.16.0 // indirect
github.com/open-feature/go-sdk-contrib/providers/go-feature-flag v0.2.6 // indirect
github.com/open-feature/go-sdk-contrib/providers/ofrep v0.1.6 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/perimeterx/marshmallow v1.1.5 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
@@ -155,6 +165,7 @@ require (
github.com/spf13/pflag v1.0.10 // indirect
github.com/stoewer/go-strcase v1.3.1 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/thomaspoignant/go-feature-flag v1.42.0 // indirect
github.com/tjhop/slog-gokit v0.1.5 // indirect
github.com/woodsbury/decimal128 v1.3.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
@@ -176,6 +187,8 @@ require (
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.38.0 // indirect
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/mock v0.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.1 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect

View File

@@ -4,9 +4,13 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw=
github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -38,12 +42,18 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 h1:+LVB0xBqEgjQoqr9bGZbRzvg212B
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5/go.mod h1:xoaxeqnnUaZjPjaICgIy5B+MHCSb/ZSOn4MvkFNOUA0=
github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M=
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df h1:GSoSVRLoBaFpOOds6QyY1L8AX7uoY+Ln3BHc22W40X0=
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df/go.mod h1:hiVxq5OP2bUGBRNS3Z/bt/reCLFNbdcST6gISi1fiOM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 h1:N7oVaKyGp8bttX0bfZGmcGkjz7DLQXhAn3DNd3T0ous=
github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c=
github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw=
@@ -60,6 +70,8 @@ github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wX
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
@@ -69,6 +81,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/diegoholiveira/jsonlogic/v3 v3.7.4 h1:92HSmB9bwM/o0ZvrCpcvTP2EsPXSkKtAniIr2W/dcIM=
github.com/diegoholiveira/jsonlogic/v3 v3.7.4/go.mod h1:OYRb6FSTVmMM+MNQ7ElmMsczyNSepw+OU4Z8emDSi4w=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes=
@@ -341,6 +355,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nikunjy/rules v1.5.0 h1:KJDSLOsFhwt7kcXUyZqwkgrQg5YoUwj+TVu6ItCQShw=
github.com/nikunjy/rules v1.5.0/go.mod h1:TlZtZdBChrkqi8Lr2AXocme8Z7EsbxtFdDoKeI6neBQ=
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY=
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw=
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c=
@@ -355,6 +371,12 @@ github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/open-feature/go-sdk v1.16.0 h1:5NCHYv5slvNBIZhYXAzAufo0OI59OACZ5tczVqSE+Tg=
github.com/open-feature/go-sdk v1.16.0/go.mod h1:EIF40QcoYT1VbQkMPy2ZJH4kvZeY+qGUXAorzSWgKSo=
github.com/open-feature/go-sdk-contrib/providers/go-feature-flag v0.2.6 h1:megzzlQGjsRVWDX8oJnLaa5eEcsAHekiL4Uvl3jSAcY=
github.com/open-feature/go-sdk-contrib/providers/go-feature-flag v0.2.6/go.mod h1:K1gDKvt76CGFLSUMHUydd5ba2V5Cv69gQZsdbnXhAm8=
github.com/open-feature/go-sdk-contrib/providers/ofrep v0.1.6 h1:WinefYxeVx5rV0uQmuWbxQf8iACu/JiRubo5w0saToc=
github.com/open-feature/go-sdk-contrib/providers/ofrep v0.1.6/go.mod h1:Dwcaoma6lZVqYwyfVlY7eB6RXbG+Ju3b9cnpTlUN+Hc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
@@ -440,6 +462,10 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/thejerf/slogassert v0.3.4 h1:VoTsXixRbXMrRSSxDjYTiEDCM4VWbsYPW5rB/hX24kM=
github.com/thejerf/slogassert v0.3.4/go.mod h1:0zn9ISLVKo1aPMTqcGfG1o6dWwt+Rk574GlUxHD4rs8=
github.com/thomaspoignant/go-feature-flag v1.42.0 h1:C7embmOTzaLyRki+OoU2RvtVjJE9IrvgBA2C1mRN1lc=
github.com/thomaspoignant/go-feature-flag v1.42.0/go.mod h1:y0QiWH7chHWhGATb/+XqwAwErORmPSH2MUsQlCmmWlM=
github.com/tjhop/slog-gokit v0.1.5 h1:ayloIUi5EK2QYB8eY4DOPO95/mRtMW42lUkp3quJohc=
github.com/tjhop/slog-gokit v0.1.5/go.mod h1:yA48zAHvV+Sg4z4VRyeFyFUNNXd3JY5Zg84u3USICq0=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
@@ -507,8 +533,12 @@ go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJr
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=

View File

@@ -5,7 +5,24 @@ metaV0Alpha1: {
scope: "Namespaced"
schema: {
spec: {
pluginJSON: #JSONData,
pluginJson: #JSONData
module?: {
path: string
hash?: string
loadingStrategy?: "fetch" | "script"
}
baseURL?: string
signature?: {
status: "internal" | "valid" | "invalid" | "modified" | "unsigned"
type?: "grafana" | "commercial" | "community" | "private" | "private-glob"
org?: string
}
angular?: {
detected: bool
}
translations?: [string]: string
// +listType=atomic
children?: [...string]
}
}
}

View File

@@ -208,13 +208,20 @@ func NewMetaExtensions() *MetaExtensions {
// +k8s:openapi-gen=true
type MetaSpec struct {
PluginJSON MetaJSONData `json:"pluginJSON"`
PluginJson MetaJSONData `json:"pluginJson"`
Module *MetaV0alpha1SpecModule `json:"module,omitempty"`
BaseURL *string `json:"baseURL,omitempty"`
Signature *MetaV0alpha1SpecSignature `json:"signature,omitempty"`
Angular *MetaV0alpha1SpecAngular `json:"angular,omitempty"`
Translations map[string]string `json:"translations,omitempty"`
// +listType=atomic
Children []string `json:"children,omitempty"`
}
// NewMetaSpec creates a new MetaSpec object.
func NewMetaSpec() *MetaSpec {
return &MetaSpec{
PluginJSON: *NewMetaJSONData(),
PluginJson: *NewMetaJSONData(),
}
}
@@ -412,6 +419,40 @@ func NewMetaV0alpha1ExtensionsExtensionPoints() *MetaV0alpha1ExtensionsExtension
return &MetaV0alpha1ExtensionsExtensionPoints{}
}
// +k8s:openapi-gen=true
type MetaV0alpha1SpecModule struct {
Path string `json:"path"`
Hash *string `json:"hash,omitempty"`
LoadingStrategy *MetaV0alpha1SpecModuleLoadingStrategy `json:"loadingStrategy,omitempty"`
}
// NewMetaV0alpha1SpecModule creates a new MetaV0alpha1SpecModule object.
func NewMetaV0alpha1SpecModule() *MetaV0alpha1SpecModule {
return &MetaV0alpha1SpecModule{}
}
// +k8s:openapi-gen=true
type MetaV0alpha1SpecSignature struct {
Status MetaV0alpha1SpecSignatureStatus `json:"status"`
Type *MetaV0alpha1SpecSignatureType `json:"type,omitempty"`
Org *string `json:"org,omitempty"`
}
// NewMetaV0alpha1SpecSignature creates a new MetaV0alpha1SpecSignature object.
func NewMetaV0alpha1SpecSignature() *MetaV0alpha1SpecSignature {
return &MetaV0alpha1SpecSignature{}
}
// +k8s:openapi-gen=true
type MetaV0alpha1SpecAngular struct {
Detected bool `json:"detected"`
}
// NewMetaV0alpha1SpecAngular creates a new MetaV0alpha1SpecAngular object.
func NewMetaV0alpha1SpecAngular() *MetaV0alpha1SpecAngular {
return &MetaV0alpha1SpecAngular{}
}
// +k8s:openapi-gen=true
type MetaJSONDataType string
@@ -472,3 +513,33 @@ const (
MetaV0alpha1DependenciesPluginsTypeDatasource MetaV0alpha1DependenciesPluginsType = "datasource"
MetaV0alpha1DependenciesPluginsTypePanel MetaV0alpha1DependenciesPluginsType = "panel"
)
// +k8s:openapi-gen=true
type MetaV0alpha1SpecModuleLoadingStrategy string
const (
MetaV0alpha1SpecModuleLoadingStrategyFetch MetaV0alpha1SpecModuleLoadingStrategy = "fetch"
MetaV0alpha1SpecModuleLoadingStrategyScript MetaV0alpha1SpecModuleLoadingStrategy = "script"
)
// +k8s:openapi-gen=true
type MetaV0alpha1SpecSignatureStatus string
const (
MetaV0alpha1SpecSignatureStatusInternal MetaV0alpha1SpecSignatureStatus = "internal"
MetaV0alpha1SpecSignatureStatusValid MetaV0alpha1SpecSignatureStatus = "valid"
MetaV0alpha1SpecSignatureStatusInvalid MetaV0alpha1SpecSignatureStatus = "invalid"
MetaV0alpha1SpecSignatureStatusModified MetaV0alpha1SpecSignatureStatus = "modified"
MetaV0alpha1SpecSignatureStatusUnsigned MetaV0alpha1SpecSignatureStatus = "unsigned"
)
// +k8s:openapi-gen=true
type MetaV0alpha1SpecSignatureType string
const (
MetaV0alpha1SpecSignatureTypeGrafana MetaV0alpha1SpecSignatureType = "grafana"
MetaV0alpha1SpecSignatureTypeCommercial MetaV0alpha1SpecSignatureType = "commercial"
MetaV0alpha1SpecSignatureTypeCommunity MetaV0alpha1SpecSignatureType = "community"
MetaV0alpha1SpecSignatureTypePrivate MetaV0alpha1SpecSignatureType = "private"
MetaV0alpha1SpecSignatureTypePrivateGlob MetaV0alpha1SpecSignatureType = "private-glob"
)

File diff suppressed because one or more lines are too long

View File

@@ -10,8 +10,6 @@ import (
"time"
"github.com/grafana/grafana-app-sdk/logging"
pluginsv0alpha1 "github.com/grafana/grafana/apps/plugins/pkg/apis/plugins/v0alpha1"
)
const (
@@ -87,45 +85,9 @@ func (p *CatalogProvider) GetMeta(ctx context.Context, pluginID, version string)
return nil, fmt.Errorf("failed to decode response: %w", err)
}
metaSpec := grafanaComPluginVersionMetaToMetaSpec(gcomMeta)
return &Result{
Meta: gcomMeta.JSON,
Meta: metaSpec,
TTL: p.ttl,
}, nil
}
// grafanaComPluginVersionMeta represents the response from grafana.com API
// GET /api/plugins/{pluginId}/versions/{version}
type grafanaComPluginVersionMeta struct {
PluginID string `json:"pluginSlug"`
Version string `json:"version"`
URL string `json:"url"`
Commit string `json:"commit"`
Description string `json:"description"`
Keywords []string `json:"keywords"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
JSON pluginsv0alpha1.MetaJSONData `json:"json"`
Readme string `json:"readme"`
Downloads int `json:"downloads"`
Verified bool `json:"verified"`
Status string `json:"status"`
StatusContext string `json:"statusContext"`
DownloadSlug string `json:"downloadSlug"`
SignatureType string `json:"signatureType"`
SignedByOrg string `json:"signedByOrg"`
SignedByOrgName string `json:"signedByOrgName"`
Packages struct {
Any struct {
Md5 string `json:"md5"`
Sha256 string `json:"sha256"`
PackageName string `json:"packageName"`
DownloadURL string `json:"downloadUrl"`
} `json:"any"`
} `json:"packages"`
Links []struct {
Rel string `json:"rel"`
Href string `json:"href"`
} `json:"links"`
AngularDetected bool `json:"angularDetected"`
Scopes []string `json:"scopes"`
}

View File

@@ -49,7 +49,7 @@ func TestCatalogProvider_GetMeta(t *testing.T) {
require.NoError(t, err)
require.NotNil(t, result)
assert.Equal(t, expectedMeta, result.Meta)
assert.Equal(t, expectedMeta, result.Meta.PluginJson)
assert.Equal(t, defaultCatalogTTL, result.TTL)
})

View File

@@ -0,0 +1,725 @@
package meta
import (
"encoding/json"
"time"
pluginsv0alpha1 "github.com/grafana/grafana/apps/plugins/pkg/apis/plugins/v0alpha1"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
)
// jsonDataToMetaJSONData converts a plugins.JSONData to a pluginsv0alpha1.MetaJSONData.
// nolint:gocyclo
func jsonDataToMetaJSONData(jsonData plugins.JSONData) pluginsv0alpha1.MetaJSONData {
meta := pluginsv0alpha1.MetaJSONData{
Id: jsonData.ID,
Name: jsonData.Name,
}
// Map plugin type
switch jsonData.Type {
case plugins.TypeApp:
meta.Type = pluginsv0alpha1.MetaJSONDataTypeApp
case plugins.TypeDataSource:
meta.Type = pluginsv0alpha1.MetaJSONDataTypeDatasource
case plugins.TypePanel:
meta.Type = pluginsv0alpha1.MetaJSONDataTypePanel
case plugins.TypeRenderer:
meta.Type = pluginsv0alpha1.MetaJSONDataTypeRenderer
}
// Map Info
meta.Info = pluginsv0alpha1.MetaInfo{
Keywords: jsonData.Info.Keywords,
Logos: pluginsv0alpha1.MetaV0alpha1InfoLogos{
Small: jsonData.Info.Logos.Small,
Large: jsonData.Info.Logos.Large,
},
Updated: jsonData.Info.Updated,
Version: jsonData.Info.Version,
}
if jsonData.Info.Description != "" {
meta.Info.Description = &jsonData.Info.Description
}
if jsonData.Info.Author.Name != "" || jsonData.Info.Author.URL != "" {
author := &pluginsv0alpha1.MetaV0alpha1InfoAuthor{}
if jsonData.Info.Author.Name != "" {
author.Name = &jsonData.Info.Author.Name
}
if jsonData.Info.Author.URL != "" {
author.Url = &jsonData.Info.Author.URL
}
meta.Info.Author = author
}
if len(jsonData.Info.Links) > 0 {
meta.Info.Links = make([]pluginsv0alpha1.MetaV0alpha1InfoLinks, 0, len(jsonData.Info.Links))
for _, link := range jsonData.Info.Links {
v0Link := pluginsv0alpha1.MetaV0alpha1InfoLinks{}
if link.Name != "" {
v0Link.Name = &link.Name
}
if link.URL != "" {
v0Link.Url = &link.URL
}
meta.Info.Links = append(meta.Info.Links, v0Link)
}
}
if len(jsonData.Info.Screenshots) > 0 {
meta.Info.Screenshots = make([]pluginsv0alpha1.MetaV0alpha1InfoScreenshots, 0, len(jsonData.Info.Screenshots))
for _, screenshot := range jsonData.Info.Screenshots {
v0Screenshot := pluginsv0alpha1.MetaV0alpha1InfoScreenshots{}
if screenshot.Name != "" {
v0Screenshot.Name = &screenshot.Name
}
if screenshot.Path != "" {
v0Screenshot.Path = &screenshot.Path
}
meta.Info.Screenshots = append(meta.Info.Screenshots, v0Screenshot)
}
}
// Map Dependencies
meta.Dependencies = pluginsv0alpha1.MetaDependencies{
GrafanaDependency: jsonData.Dependencies.GrafanaDependency,
}
if jsonData.Dependencies.GrafanaVersion != "" {
meta.Dependencies.GrafanaVersion = &jsonData.Dependencies.GrafanaVersion
}
if len(jsonData.Dependencies.Plugins) > 0 {
meta.Dependencies.Plugins = make([]pluginsv0alpha1.MetaV0alpha1DependenciesPlugins, 0, len(jsonData.Dependencies.Plugins))
for _, dep := range jsonData.Dependencies.Plugins {
var depType pluginsv0alpha1.MetaV0alpha1DependenciesPluginsType
switch dep.Type {
case "app":
depType = pluginsv0alpha1.MetaV0alpha1DependenciesPluginsTypeApp
case "datasource":
depType = pluginsv0alpha1.MetaV0alpha1DependenciesPluginsTypeDatasource
case "panel":
depType = pluginsv0alpha1.MetaV0alpha1DependenciesPluginsTypePanel
}
meta.Dependencies.Plugins = append(meta.Dependencies.Plugins, pluginsv0alpha1.MetaV0alpha1DependenciesPlugins{
Id: dep.ID,
Type: depType,
Name: dep.Name,
})
}
}
if len(jsonData.Dependencies.Extensions.ExposedComponents) > 0 {
meta.Dependencies.Extensions = &pluginsv0alpha1.MetaV0alpha1DependenciesExtensions{
ExposedComponents: jsonData.Dependencies.Extensions.ExposedComponents,
}
}
// Map optional boolean fields
if jsonData.Alerting {
meta.Alerting = &jsonData.Alerting
}
if jsonData.Annotations {
meta.Annotations = &jsonData.Annotations
}
if jsonData.AutoEnabled {
meta.AutoEnabled = &jsonData.AutoEnabled
}
if jsonData.Backend {
meta.Backend = &jsonData.Backend
}
if jsonData.BuiltIn {
meta.BuiltIn = &jsonData.BuiltIn
}
if jsonData.HideFromList {
meta.HideFromList = &jsonData.HideFromList
}
if jsonData.Logs {
meta.Logs = &jsonData.Logs
}
if jsonData.Metrics {
meta.Metrics = &jsonData.Metrics
}
if jsonData.MultiValueFilterOperators {
meta.MultiValueFilterOperators = &jsonData.MultiValueFilterOperators
}
if jsonData.Preload {
meta.Preload = &jsonData.Preload
}
if jsonData.SkipDataQuery {
meta.SkipDataQuery = &jsonData.SkipDataQuery
}
if jsonData.Streaming {
meta.Streaming = &jsonData.Streaming
}
if jsonData.Tracing {
meta.Tracing = &jsonData.Tracing
}
// Map category
if jsonData.Category != "" {
var category pluginsv0alpha1.MetaJSONDataCategory
switch jsonData.Category {
case "tsdb":
category = pluginsv0alpha1.MetaJSONDataCategoryTsdb
case "logging":
category = pluginsv0alpha1.MetaJSONDataCategoryLogging
case "cloud":
category = pluginsv0alpha1.MetaJSONDataCategoryCloud
case "tracing":
category = pluginsv0alpha1.MetaJSONDataCategoryTracing
case "profiling":
category = pluginsv0alpha1.MetaJSONDataCategoryProfiling
case "sql":
category = pluginsv0alpha1.MetaJSONDataCategorySql
case "enterprise":
category = pluginsv0alpha1.MetaJSONDataCategoryEnterprise
case "iot":
category = pluginsv0alpha1.MetaJSONDataCategoryIot
case "other":
category = pluginsv0alpha1.MetaJSONDataCategoryOther
default:
category = pluginsv0alpha1.MetaJSONDataCategoryOther
}
meta.Category = &category
}
// Map state
if jsonData.State != "" {
var state pluginsv0alpha1.MetaJSONDataState
switch jsonData.State {
case plugins.ReleaseStateAlpha:
state = pluginsv0alpha1.MetaJSONDataStateAlpha
case plugins.ReleaseStateBeta:
state = pluginsv0alpha1.MetaJSONDataStateBeta
default:
}
if state != "" {
meta.State = &state
}
}
// Map executable
if jsonData.Executable != "" {
meta.Executable = &jsonData.Executable
}
// Map QueryOptions
if len(jsonData.QueryOptions) > 0 {
queryOptions := &pluginsv0alpha1.MetaQueryOptions{}
if val, ok := jsonData.QueryOptions["maxDataPoints"]; ok {
queryOptions.MaxDataPoints = &val
}
if val, ok := jsonData.QueryOptions["minInterval"]; ok {
queryOptions.MinInterval = &val
}
if val, ok := jsonData.QueryOptions["cacheTimeout"]; ok {
queryOptions.CacheTimeout = &val
}
meta.QueryOptions = queryOptions
}
// Map Includes
if len(jsonData.Includes) > 0 {
meta.Includes = make([]pluginsv0alpha1.MetaInclude, 0, len(jsonData.Includes))
for _, include := range jsonData.Includes {
v0Include := pluginsv0alpha1.MetaInclude{}
if include.UID != "" {
v0Include.Uid = &include.UID
}
if include.Type != "" {
var includeType pluginsv0alpha1.MetaIncludeType
switch include.Type {
case "dashboard":
includeType = pluginsv0alpha1.MetaIncludeTypeDashboard
case "page":
includeType = pluginsv0alpha1.MetaIncludeTypePage
case "panel":
includeType = pluginsv0alpha1.MetaIncludeTypePanel
case "datasource":
includeType = pluginsv0alpha1.MetaIncludeTypeDatasource
}
v0Include.Type = &includeType
}
if include.Name != "" {
v0Include.Name = &include.Name
}
if include.Component != "" {
v0Include.Component = &include.Component
}
if include.Role != "" {
var role pluginsv0alpha1.MetaIncludeRole
switch include.Role {
case "Admin":
role = pluginsv0alpha1.MetaIncludeRoleAdmin
case "Editor":
role = pluginsv0alpha1.MetaIncludeRoleEditor
case "Viewer":
role = pluginsv0alpha1.MetaIncludeRoleViewer
}
v0Include.Role = &role
}
if include.Action != "" {
v0Include.Action = &include.Action
}
if include.Path != "" {
v0Include.Path = &include.Path
}
if include.AddToNav {
v0Include.AddToNav = &include.AddToNav
}
if include.DefaultNav {
v0Include.DefaultNav = &include.DefaultNav
}
if include.Icon != "" {
v0Include.Icon = &include.Icon
}
meta.Includes = append(meta.Includes, v0Include)
}
}
// Map Routes
if len(jsonData.Routes) > 0 {
meta.Routes = make([]pluginsv0alpha1.MetaRoute, 0, len(jsonData.Routes))
for _, route := range jsonData.Routes {
v0Route := pluginsv0alpha1.MetaRoute{}
if route.Path != "" {
v0Route.Path = &route.Path
}
if route.Method != "" {
v0Route.Method = &route.Method
}
if route.URL != "" {
v0Route.Url = &route.URL
}
if route.ReqRole != "" {
reqRole := string(route.ReqRole)
v0Route.ReqRole = &reqRole
}
if route.ReqAction != "" {
v0Route.ReqAction = &route.ReqAction
}
if len(route.Headers) > 0 {
headers := make([]string, 0, len(route.Headers))
for _, header := range route.Headers {
headers = append(headers, header.Name+": "+header.Content)
}
v0Route.Headers = headers
}
if len(route.URLParams) > 0 {
v0Route.UrlParams = make([]pluginsv0alpha1.MetaV0alpha1RouteUrlParams, 0, len(route.URLParams))
for _, param := range route.URLParams {
v0Param := pluginsv0alpha1.MetaV0alpha1RouteUrlParams{}
if param.Name != "" {
v0Param.Name = &param.Name
}
if param.Content != "" {
v0Param.Content = &param.Content
}
v0Route.UrlParams = append(v0Route.UrlParams, v0Param)
}
}
if route.TokenAuth != nil {
v0Route.TokenAuth = &pluginsv0alpha1.MetaV0alpha1RouteTokenAuth{}
if route.TokenAuth.Url != "" {
v0Route.TokenAuth.Url = &route.TokenAuth.Url
}
if len(route.TokenAuth.Scopes) > 0 {
v0Route.TokenAuth.Scopes = route.TokenAuth.Scopes
}
if len(route.TokenAuth.Params) > 0 {
v0Route.TokenAuth.Params = make(map[string]interface{})
for k, v := range route.TokenAuth.Params {
v0Route.TokenAuth.Params[k] = v
}
}
}
if route.JwtTokenAuth != nil {
v0Route.JwtTokenAuth = &pluginsv0alpha1.MetaV0alpha1RouteJwtTokenAuth{}
if route.JwtTokenAuth.Url != "" {
v0Route.JwtTokenAuth.Url = &route.JwtTokenAuth.Url
}
if len(route.JwtTokenAuth.Scopes) > 0 {
v0Route.JwtTokenAuth.Scopes = route.JwtTokenAuth.Scopes
}
if len(route.JwtTokenAuth.Params) > 0 {
v0Route.JwtTokenAuth.Params = make(map[string]interface{})
for k, v := range route.JwtTokenAuth.Params {
v0Route.JwtTokenAuth.Params[k] = v
}
}
}
if len(route.Body) > 0 {
var bodyMap map[string]interface{}
if err := json.Unmarshal(route.Body, &bodyMap); err == nil {
v0Route.Body = bodyMap
}
}
meta.Routes = append(meta.Routes, v0Route)
}
}
// Map Extensions
if len(jsonData.Extensions.AddedLinks) > 0 || len(jsonData.Extensions.AddedComponents) > 0 ||
len(jsonData.Extensions.ExposedComponents) > 0 || len(jsonData.Extensions.ExtensionPoints) > 0 {
extensions := &pluginsv0alpha1.MetaExtensions{}
if len(jsonData.Extensions.AddedLinks) > 0 {
extensions.AddedLinks = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsAddedLinks, 0, len(jsonData.Extensions.AddedLinks))
for _, link := range jsonData.Extensions.AddedLinks {
v0Link := pluginsv0alpha1.MetaV0alpha1ExtensionsAddedLinks{
Targets: link.Targets,
Title: link.Title,
}
if link.Description != "" {
v0Link.Description = &link.Description
}
extensions.AddedLinks = append(extensions.AddedLinks, v0Link)
}
}
if len(jsonData.Extensions.AddedComponents) > 0 {
extensions.AddedComponents = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsAddedComponents, 0, len(jsonData.Extensions.AddedComponents))
for _, comp := range jsonData.Extensions.AddedComponents {
v0Comp := pluginsv0alpha1.MetaV0alpha1ExtensionsAddedComponents{
Targets: comp.Targets,
Title: comp.Title,
}
if comp.Description != "" {
v0Comp.Description = &comp.Description
}
extensions.AddedComponents = append(extensions.AddedComponents, v0Comp)
}
}
if len(jsonData.Extensions.ExposedComponents) > 0 {
extensions.ExposedComponents = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsExposedComponents, 0, len(jsonData.Extensions.ExposedComponents))
for _, comp := range jsonData.Extensions.ExposedComponents {
v0Comp := pluginsv0alpha1.MetaV0alpha1ExtensionsExposedComponents{
Id: comp.Id,
}
if comp.Title != "" {
v0Comp.Title = &comp.Title
}
if comp.Description != "" {
v0Comp.Description = &comp.Description
}
extensions.ExposedComponents = append(extensions.ExposedComponents, v0Comp)
}
}
if len(jsonData.Extensions.ExtensionPoints) > 0 {
extensions.ExtensionPoints = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsExtensionPoints, 0, len(jsonData.Extensions.ExtensionPoints))
for _, point := range jsonData.Extensions.ExtensionPoints {
v0Point := pluginsv0alpha1.MetaV0alpha1ExtensionsExtensionPoints{
Id: point.Id,
}
if point.Title != "" {
v0Point.Title = &point.Title
}
if point.Description != "" {
v0Point.Description = &point.Description
}
extensions.ExtensionPoints = append(extensions.ExtensionPoints, v0Point)
}
}
meta.Extensions = extensions
}
// Map Roles
if len(jsonData.Roles) > 0 {
meta.Roles = make([]pluginsv0alpha1.MetaRole, 0, len(jsonData.Roles))
for _, role := range jsonData.Roles {
v0Role := pluginsv0alpha1.MetaRole{
Grants: role.Grants,
}
if role.Role.Name != "" || role.Role.Description != "" || len(role.Role.Permissions) > 0 {
v0RoleRole := &pluginsv0alpha1.MetaV0alpha1RoleRole{}
if role.Role.Name != "" {
v0RoleRole.Name = &role.Role.Name
}
if role.Role.Description != "" {
v0RoleRole.Description = &role.Role.Description
}
if len(role.Role.Permissions) > 0 {
v0RoleRole.Permissions = make([]pluginsv0alpha1.MetaV0alpha1RoleRolePermissions, 0, len(role.Role.Permissions))
for _, perm := range role.Role.Permissions {
v0Perm := pluginsv0alpha1.MetaV0alpha1RoleRolePermissions{}
if perm.Action != "" {
v0Perm.Action = &perm.Action
}
if perm.Scope != "" {
v0Perm.Scope = &perm.Scope
}
v0RoleRole.Permissions = append(v0RoleRole.Permissions, v0Perm)
}
}
v0Role.Role = v0RoleRole
}
meta.Roles = append(meta.Roles, v0Role)
}
}
// Map IAM
if jsonData.IAM != nil && len(jsonData.IAM.Permissions) > 0 {
iam := &pluginsv0alpha1.MetaIAM{
Permissions: make([]pluginsv0alpha1.MetaV0alpha1IAMPermissions, 0, len(jsonData.IAM.Permissions)),
}
for _, perm := range jsonData.IAM.Permissions {
v0Perm := pluginsv0alpha1.MetaV0alpha1IAMPermissions{}
if perm.Action != "" {
v0Perm.Action = &perm.Action
}
if perm.Scope != "" {
v0Perm.Scope = &perm.Scope
}
iam.Permissions = append(iam.Permissions, v0Perm)
}
meta.Iam = iam
}
return meta
}
// pluginStorePluginToMeta converts a pluginstore.Plugin to a pluginsv0alpha1.MetaSpec.
// This is similar to pluginToPluginMetaSpec but works with the plugin store DTO.
// loadingStrategy and moduleHash are optional calculated values that can be provided.
func pluginStorePluginToMeta(plugin pluginstore.Plugin, loadingStrategy plugins.LoadingStrategy, moduleHash string) pluginsv0alpha1.MetaSpec {
metaSpec := pluginsv0alpha1.MetaSpec{
PluginJson: jsonDataToMetaJSONData(plugin.JSONData),
}
if plugin.Module != "" {
module := &pluginsv0alpha1.MetaV0alpha1SpecModule{
Path: plugin.Module,
}
if moduleHash != "" {
module.Hash = &moduleHash
}
if loadingStrategy != "" {
var ls pluginsv0alpha1.MetaV0alpha1SpecModuleLoadingStrategy
switch loadingStrategy {
case plugins.LoadingStrategyFetch:
ls = pluginsv0alpha1.MetaV0alpha1SpecModuleLoadingStrategyFetch
case plugins.LoadingStrategyScript:
ls = pluginsv0alpha1.MetaV0alpha1SpecModuleLoadingStrategyScript
}
module.LoadingStrategy = &ls
}
metaSpec.Module = module
}
if plugin.BaseURL != "" {
metaSpec.BaseURL = &plugin.BaseURL
}
if plugin.Signature != "" {
signature := &pluginsv0alpha1.MetaV0alpha1SpecSignature{
Status: convertSignatureStatus(plugin.Signature),
}
if plugin.SignatureType != "" {
sigType := convertSignatureType(plugin.SignatureType)
signature.Type = &sigType
}
if plugin.SignatureOrg != "" {
signature.Org = &plugin.SignatureOrg
}
metaSpec.Signature = signature
}
if len(plugin.Children) > 0 {
metaSpec.Children = plugin.Children
}
metaSpec.Angular = &pluginsv0alpha1.MetaV0alpha1SpecAngular{
Detected: plugin.Angular.Detected,
}
if len(plugin.Translations) > 0 {
metaSpec.Translations = plugin.Translations
}
return metaSpec
}
// convertSignatureStatus converts plugins.SignatureStatus to pluginsv0alpha1.MetaV0alpha1SpecSignatureStatus.
func convertSignatureStatus(status plugins.SignatureStatus) pluginsv0alpha1.MetaV0alpha1SpecSignatureStatus {
switch status {
case plugins.SignatureStatusInternal:
return pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusInternal
case plugins.SignatureStatusValid:
return pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusValid
case plugins.SignatureStatusInvalid:
return pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusInvalid
case plugins.SignatureStatusModified:
return pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusModified
case plugins.SignatureStatusUnsigned:
return pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusUnsigned
default:
return pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusUnsigned
}
}
// convertSignatureType converts plugins.SignatureType to pluginsv0alpha1.MetaV0alpha1SpecSignatureType.
func convertSignatureType(sigType plugins.SignatureType) pluginsv0alpha1.MetaV0alpha1SpecSignatureType {
switch sigType {
case plugins.SignatureTypeGrafana:
return pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeGrafana
case plugins.SignatureTypeCommercial:
return pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeCommercial
case plugins.SignatureTypeCommunity:
return pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeCommunity
case plugins.SignatureTypePrivate:
return pluginsv0alpha1.MetaV0alpha1SpecSignatureTypePrivate
case plugins.SignatureTypePrivateGlob:
return pluginsv0alpha1.MetaV0alpha1SpecSignatureTypePrivateGlob
default:
return pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeGrafana
}
}
// pluginToMetaSpec converts a fully loaded *plugins.Plugin to a pluginsv0alpha1.MetaSpec.
func pluginToMetaSpec(plugin *plugins.Plugin) pluginsv0alpha1.MetaSpec {
metaSpec := pluginsv0alpha1.MetaSpec{
PluginJson: jsonDataToMetaJSONData(plugin.JSONData),
}
// Set module information
if plugin.Module != "" {
module := &pluginsv0alpha1.MetaV0alpha1SpecModule{
Path: plugin.Module,
}
loadingStrategy := pluginsv0alpha1.MetaV0alpha1SpecModuleLoadingStrategyScript
module.LoadingStrategy = &loadingStrategy
metaSpec.Module = module
}
// Set BaseURL
if plugin.BaseURL != "" {
metaSpec.BaseURL = &plugin.BaseURL
}
// Set signature information
signature := &pluginsv0alpha1.MetaV0alpha1SpecSignature{
Status: convertSignatureStatus(plugin.Signature),
}
if plugin.SignatureType != "" {
sigType := convertSignatureType(plugin.SignatureType)
signature.Type = &sigType
}
if plugin.SignatureOrg != "" {
signature.Org = &plugin.SignatureOrg
}
metaSpec.Signature = signature
if len(plugin.Children) > 0 {
children := make([]string, 0, len(plugin.Children))
for _, child := range plugin.Children {
children = append(children, child.ID)
}
metaSpec.Children = children
}
metaSpec.Angular = &pluginsv0alpha1.MetaV0alpha1SpecAngular{
Detected: plugin.Angular.Detected,
}
if len(plugin.Translations) > 0 {
metaSpec.Translations = plugin.Translations
}
return metaSpec
}
// grafanaComPluginVersionMeta represents the response from grafana.com API
// GET /api/plugins/{pluginId}/versions/{version}
type grafanaComPluginVersionMeta struct {
PluginID string `json:"pluginSlug"`
Version string `json:"version"`
URL string `json:"url"`
Commit string `json:"commit"`
Description string `json:"description"`
Keywords []string `json:"keywords"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
JSON pluginsv0alpha1.MetaJSONData `json:"json"`
Readme string `json:"readme"`
Downloads int `json:"downloads"`
Verified bool `json:"verified"`
Status string `json:"status"`
StatusContext string `json:"statusContext"`
DownloadSlug string `json:"downloadSlug"`
SignatureType string `json:"signatureType"`
SignedByOrg string `json:"signedByOrg"`
SignedByOrgName string `json:"signedByOrgName"`
Packages struct {
Any struct {
Md5 string `json:"md5"`
Sha256 string `json:"sha256"`
PackageName string `json:"packageName"`
DownloadURL string `json:"downloadUrl"`
} `json:"any"`
} `json:"packages"`
Links []struct {
Rel string `json:"rel"`
Href string `json:"href"`
} `json:"links"`
AngularDetected bool `json:"angularDetected"`
Scopes []string `json:"scopes"`
}
// grafanaComPluginVersionMetaToMetaSpec converts a grafanaComPluginVersionMeta to a pluginsv0alpha1.MetaSpec.
func grafanaComPluginVersionMetaToMetaSpec(gcomMeta grafanaComPluginVersionMeta) pluginsv0alpha1.MetaSpec {
metaSpec := pluginsv0alpha1.MetaSpec{
PluginJson: gcomMeta.JSON,
}
if gcomMeta.SignatureType != "" {
signature := &pluginsv0alpha1.MetaV0alpha1SpecSignature{
Status: pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusValid,
}
switch gcomMeta.SignatureType {
case "grafana":
sigType := pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeGrafana
signature.Type = &sigType
case "commercial":
sigType := pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeCommercial
signature.Type = &sigType
case "community":
sigType := pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeCommunity
signature.Type = &sigType
case "private":
sigType := pluginsv0alpha1.MetaV0alpha1SpecSignatureTypePrivate
signature.Type = &sigType
case "private-glob":
sigType := pluginsv0alpha1.MetaV0alpha1SpecSignatureTypePrivateGlob
signature.Type = &sigType
}
if gcomMeta.SignedByOrg != "" {
signature.Org = &gcomMeta.SignedByOrg
}
metaSpec.Signature = signature
}
// Set angular info
metaSpec.Angular = &pluginsv0alpha1.MetaV0alpha1SpecAngular{
Detected: gcomMeta.AngularDetected,
}
return metaSpec
}

View File

@@ -2,7 +2,6 @@ package meta
import (
"context"
"encoding/json"
"errors"
"os"
"path/filepath"
@@ -13,7 +12,15 @@ import (
pluginsv0alpha1 "github.com/grafana/grafana/apps/plugins/pkg/apis/plugins/v0alpha1"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/plugins/config"
pluginsLoader "github.com/grafana/grafana/pkg/plugins/manager/loader"
"github.com/grafana/grafana/pkg/plugins/manager/pipeline/bootstrap"
"github.com/grafana/grafana/pkg/plugins/manager/pipeline/discovery"
"github.com/grafana/grafana/pkg/plugins/manager/pipeline/initialization"
"github.com/grafana/grafana/pkg/plugins/manager/pipeline/termination"
"github.com/grafana/grafana/pkg/plugins/manager/pipeline/validation"
"github.com/grafana/grafana/pkg/plugins/manager/sources"
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginerrs"
)
const (
@@ -23,9 +30,10 @@ const (
// CoreProvider retrieves plugin metadata for core plugins.
type CoreProvider struct {
mu sync.RWMutex
loadedPlugins map[string]pluginsv0alpha1.MetaJSONData
loadedPlugins map[string]pluginsv0alpha1.MetaSpec
initialized bool
ttl time.Duration
loader pluginsLoader.Service
}
// NewCoreProvider creates a new CoreProvider for core plugins.
@@ -35,9 +43,13 @@ func NewCoreProvider() *CoreProvider {
// NewCoreProviderWithTTL creates a new CoreProvider with a custom TTL.
func NewCoreProviderWithTTL(ttl time.Duration) *CoreProvider {
cfg := &config.PluginManagementCfg{
Features: config.Features{},
}
return &CoreProvider{
loadedPlugins: make(map[string]pluginsv0alpha1.MetaJSONData),
loadedPlugins: make(map[string]pluginsv0alpha1.MetaSpec),
ttl: ttl,
loader: createLoader(cfg),
}
}
@@ -76,9 +88,9 @@ func (p *CoreProvider) GetMeta(ctx context.Context, pluginID, _ string) (*Result
p.initialized = true
}
if meta, found := p.loadedPlugins[pluginID]; found {
if spec, found := p.loadedPlugins[pluginID]; found {
return &Result{
Meta: meta,
Meta: spec,
TTL: p.ttl,
}, nil
}
@@ -86,8 +98,8 @@ func (p *CoreProvider) GetMeta(ctx context.Context, pluginID, _ string) (*Result
return nil, ErrMetaNotFound
}
// loadPlugins discovers and caches all core plugins.
// Returns an error if the static root path cannot be found or if plugin discovery fails.
// loadPlugins discovers and caches all core plugins by fully loading them.
// Returns an error if the static root path cannot be found or if plugin loading fails.
// This error will be handled gracefully by GetMeta, which will return ErrMetaNotFound
// to allow other providers to handle the request.
func (p *CoreProvider) loadPlugins(ctx context.Context) error {
@@ -108,496 +120,51 @@ func (p *CoreProvider) loadPlugins(ctx context.Context) error {
panelPath := filepath.Join(staticRootPath, "app", "plugins", "panel")
src := sources.NewLocalSource(plugins.ClassCore, []string{datasourcePath, panelPath})
ps, err := src.Discover(ctx)
loadedPlugins, err := p.loader.Load(ctx, src)
if err != nil {
return err
}
if len(ps) == 0 {
logging.DefaultLogger.Warn("CoreProvider: no core plugins found during discovery")
if len(loadedPlugins) == 0 {
logging.DefaultLogger.Warn("CoreProvider: no core plugins found during loading")
return nil
}
for _, bundle := range ps {
meta := jsonDataToMetaJSONData(bundle.Primary.JSONData)
p.loadedPlugins[bundle.Primary.JSONData.ID] = meta
for _, plugin := range loadedPlugins {
metaSpec := pluginToMetaSpec(plugin)
p.loadedPlugins[plugin.ID] = metaSpec
}
return nil
}
// jsonDataToMetaJSONData converts a plugins.JSONData to a pluginsv0alpha1.MetaJSONData.
// nolint:gocyclo
func jsonDataToMetaJSONData(jsonData plugins.JSONData) pluginsv0alpha1.MetaJSONData {
meta := pluginsv0alpha1.MetaJSONData{
Id: jsonData.ID,
Name: jsonData.Name,
}
// Map plugin type
switch jsonData.Type {
case plugins.TypeApp:
meta.Type = pluginsv0alpha1.MetaJSONDataTypeApp
case plugins.TypeDataSource:
meta.Type = pluginsv0alpha1.MetaJSONDataTypeDatasource
case plugins.TypePanel:
meta.Type = pluginsv0alpha1.MetaJSONDataTypePanel
case plugins.TypeRenderer:
meta.Type = pluginsv0alpha1.MetaJSONDataTypeRenderer
}
// Map Info
meta.Info = pluginsv0alpha1.MetaInfo{
Keywords: jsonData.Info.Keywords,
Logos: pluginsv0alpha1.MetaV0alpha1InfoLogos{
Small: jsonData.Info.Logos.Small,
Large: jsonData.Info.Logos.Large,
// createLoader creates a loader service configured for core plugins.
func createLoader(cfg *config.PluginManagementCfg) pluginsLoader.Service {
d := discovery.New(cfg, discovery.Opts{
FilterFuncs: []discovery.FilterFunc{
// Allow all plugin types for core plugins
},
Updated: jsonData.Info.Updated,
Version: jsonData.Info.Version,
}
})
b := bootstrap.New(cfg, bootstrap.Opts{
DecorateFuncs: []bootstrap.DecorateFunc{}, // no decoration required for metadata
})
v := validation.New(cfg, validation.Opts{
ValidateFuncs: []validation.ValidateFunc{
// Skip validation for core plugins - they're trusted
},
})
i := initialization.New(cfg, initialization.Opts{
InitializeFuncs: []initialization.InitializeFunc{
// Skip initialization - we only need metadata, not running plugins
},
})
t, _ := termination.New(cfg, termination.Opts{
TerminateFuncs: []termination.TerminateFunc{
// No termination needed for metadata-only loading
},
})
if jsonData.Info.Description != "" {
meta.Info.Description = &jsonData.Info.Description
}
et := pluginerrs.ProvideErrorTracker()
if jsonData.Info.Author.Name != "" || jsonData.Info.Author.URL != "" {
author := &pluginsv0alpha1.MetaV0alpha1InfoAuthor{}
if jsonData.Info.Author.Name != "" {
author.Name = &jsonData.Info.Author.Name
}
if jsonData.Info.Author.URL != "" {
author.Url = &jsonData.Info.Author.URL
}
meta.Info.Author = author
}
if len(jsonData.Info.Links) > 0 {
meta.Info.Links = make([]pluginsv0alpha1.MetaV0alpha1InfoLinks, 0, len(jsonData.Info.Links))
for _, link := range jsonData.Info.Links {
v0Link := pluginsv0alpha1.MetaV0alpha1InfoLinks{}
if link.Name != "" {
v0Link.Name = &link.Name
}
if link.URL != "" {
v0Link.Url = &link.URL
}
meta.Info.Links = append(meta.Info.Links, v0Link)
}
}
if len(jsonData.Info.Screenshots) > 0 {
meta.Info.Screenshots = make([]pluginsv0alpha1.MetaV0alpha1InfoScreenshots, 0, len(jsonData.Info.Screenshots))
for _, screenshot := range jsonData.Info.Screenshots {
v0Screenshot := pluginsv0alpha1.MetaV0alpha1InfoScreenshots{}
if screenshot.Name != "" {
v0Screenshot.Name = &screenshot.Name
}
if screenshot.Path != "" {
v0Screenshot.Path = &screenshot.Path
}
meta.Info.Screenshots = append(meta.Info.Screenshots, v0Screenshot)
}
}
// Map Dependencies
meta.Dependencies = pluginsv0alpha1.MetaDependencies{
GrafanaDependency: jsonData.Dependencies.GrafanaDependency,
}
if jsonData.Dependencies.GrafanaVersion != "" {
meta.Dependencies.GrafanaVersion = &jsonData.Dependencies.GrafanaVersion
}
if len(jsonData.Dependencies.Plugins) > 0 {
meta.Dependencies.Plugins = make([]pluginsv0alpha1.MetaV0alpha1DependenciesPlugins, 0, len(jsonData.Dependencies.Plugins))
for _, dep := range jsonData.Dependencies.Plugins {
var depType pluginsv0alpha1.MetaV0alpha1DependenciesPluginsType
switch dep.Type {
case "app":
depType = pluginsv0alpha1.MetaV0alpha1DependenciesPluginsTypeApp
case "datasource":
depType = pluginsv0alpha1.MetaV0alpha1DependenciesPluginsTypeDatasource
case "panel":
depType = pluginsv0alpha1.MetaV0alpha1DependenciesPluginsTypePanel
}
meta.Dependencies.Plugins = append(meta.Dependencies.Plugins, pluginsv0alpha1.MetaV0alpha1DependenciesPlugins{
Id: dep.ID,
Type: depType,
Name: dep.Name,
})
}
}
if len(jsonData.Dependencies.Extensions.ExposedComponents) > 0 {
meta.Dependencies.Extensions = &pluginsv0alpha1.MetaV0alpha1DependenciesExtensions{
ExposedComponents: jsonData.Dependencies.Extensions.ExposedComponents,
}
}
// Map optional boolean fields
if jsonData.Alerting {
meta.Alerting = &jsonData.Alerting
}
if jsonData.Annotations {
meta.Annotations = &jsonData.Annotations
}
if jsonData.AutoEnabled {
meta.AutoEnabled = &jsonData.AutoEnabled
}
if jsonData.Backend {
meta.Backend = &jsonData.Backend
}
if jsonData.BuiltIn {
meta.BuiltIn = &jsonData.BuiltIn
}
if jsonData.HideFromList {
meta.HideFromList = &jsonData.HideFromList
}
if jsonData.Logs {
meta.Logs = &jsonData.Logs
}
if jsonData.Metrics {
meta.Metrics = &jsonData.Metrics
}
if jsonData.MultiValueFilterOperators {
meta.MultiValueFilterOperators = &jsonData.MultiValueFilterOperators
}
if jsonData.Preload {
meta.Preload = &jsonData.Preload
}
if jsonData.SkipDataQuery {
meta.SkipDataQuery = &jsonData.SkipDataQuery
}
if jsonData.Streaming {
meta.Streaming = &jsonData.Streaming
}
if jsonData.Tracing {
meta.Tracing = &jsonData.Tracing
}
// Map category
if jsonData.Category != "" {
var category pluginsv0alpha1.MetaJSONDataCategory
switch jsonData.Category {
case "tsdb":
category = pluginsv0alpha1.MetaJSONDataCategoryTsdb
case "logging":
category = pluginsv0alpha1.MetaJSONDataCategoryLogging
case "cloud":
category = pluginsv0alpha1.MetaJSONDataCategoryCloud
case "tracing":
category = pluginsv0alpha1.MetaJSONDataCategoryTracing
case "profiling":
category = pluginsv0alpha1.MetaJSONDataCategoryProfiling
case "sql":
category = pluginsv0alpha1.MetaJSONDataCategorySql
case "enterprise":
category = pluginsv0alpha1.MetaJSONDataCategoryEnterprise
case "iot":
category = pluginsv0alpha1.MetaJSONDataCategoryIot
case "other":
category = pluginsv0alpha1.MetaJSONDataCategoryOther
default:
category = pluginsv0alpha1.MetaJSONDataCategoryOther
}
meta.Category = &category
}
// Map state
if jsonData.State != "" {
var state pluginsv0alpha1.MetaJSONDataState
switch jsonData.State {
case plugins.ReleaseStateAlpha:
state = pluginsv0alpha1.MetaJSONDataStateAlpha
case plugins.ReleaseStateBeta:
state = pluginsv0alpha1.MetaJSONDataStateBeta
default:
}
if state != "" {
meta.State = &state
}
}
// Map executable
if jsonData.Executable != "" {
meta.Executable = &jsonData.Executable
}
// Map QueryOptions
if len(jsonData.QueryOptions) > 0 {
queryOptions := &pluginsv0alpha1.MetaQueryOptions{}
if val, ok := jsonData.QueryOptions["maxDataPoints"]; ok {
queryOptions.MaxDataPoints = &val
}
if val, ok := jsonData.QueryOptions["minInterval"]; ok {
queryOptions.MinInterval = &val
}
if val, ok := jsonData.QueryOptions["cacheTimeout"]; ok {
queryOptions.CacheTimeout = &val
}
meta.QueryOptions = queryOptions
}
// Map Includes
if len(jsonData.Includes) > 0 {
meta.Includes = make([]pluginsv0alpha1.MetaInclude, 0, len(jsonData.Includes))
for _, include := range jsonData.Includes {
v0Include := pluginsv0alpha1.MetaInclude{}
if include.UID != "" {
v0Include.Uid = &include.UID
}
if include.Type != "" {
var includeType pluginsv0alpha1.MetaIncludeType
switch include.Type {
case "dashboard":
includeType = pluginsv0alpha1.MetaIncludeTypeDashboard
case "page":
includeType = pluginsv0alpha1.MetaIncludeTypePage
case "panel":
includeType = pluginsv0alpha1.MetaIncludeTypePanel
case "datasource":
includeType = pluginsv0alpha1.MetaIncludeTypeDatasource
}
v0Include.Type = &includeType
}
if include.Name != "" {
v0Include.Name = &include.Name
}
if include.Component != "" {
v0Include.Component = &include.Component
}
if include.Role != "" {
var role pluginsv0alpha1.MetaIncludeRole
switch include.Role {
case "Admin":
role = pluginsv0alpha1.MetaIncludeRoleAdmin
case "Editor":
role = pluginsv0alpha1.MetaIncludeRoleEditor
case "Viewer":
role = pluginsv0alpha1.MetaIncludeRoleViewer
}
v0Include.Role = &role
}
if include.Action != "" {
v0Include.Action = &include.Action
}
if include.Path != "" {
v0Include.Path = &include.Path
}
if include.AddToNav {
v0Include.AddToNav = &include.AddToNav
}
if include.DefaultNav {
v0Include.DefaultNav = &include.DefaultNav
}
if include.Icon != "" {
v0Include.Icon = &include.Icon
}
meta.Includes = append(meta.Includes, v0Include)
}
}
// Map Routes
if len(jsonData.Routes) > 0 {
meta.Routes = make([]pluginsv0alpha1.MetaRoute, 0, len(jsonData.Routes))
for _, route := range jsonData.Routes {
v0Route := pluginsv0alpha1.MetaRoute{}
if route.Path != "" {
v0Route.Path = &route.Path
}
if route.Method != "" {
v0Route.Method = &route.Method
}
if route.URL != "" {
v0Route.Url = &route.URL
}
if route.ReqRole != "" {
reqRole := string(route.ReqRole)
v0Route.ReqRole = &reqRole
}
if route.ReqAction != "" {
v0Route.ReqAction = &route.ReqAction
}
if len(route.Headers) > 0 {
headers := make([]string, 0, len(route.Headers))
for _, header := range route.Headers {
headers = append(headers, header.Name+": "+header.Content)
}
v0Route.Headers = headers
}
if len(route.URLParams) > 0 {
v0Route.UrlParams = make([]pluginsv0alpha1.MetaV0alpha1RouteUrlParams, 0, len(route.URLParams))
for _, param := range route.URLParams {
v0Param := pluginsv0alpha1.MetaV0alpha1RouteUrlParams{}
if param.Name != "" {
v0Param.Name = &param.Name
}
if param.Content != "" {
v0Param.Content = &param.Content
}
v0Route.UrlParams = append(v0Route.UrlParams, v0Param)
}
}
if route.TokenAuth != nil {
v0Route.TokenAuth = &pluginsv0alpha1.MetaV0alpha1RouteTokenAuth{}
if route.TokenAuth.Url != "" {
v0Route.TokenAuth.Url = &route.TokenAuth.Url
}
if len(route.TokenAuth.Scopes) > 0 {
v0Route.TokenAuth.Scopes = route.TokenAuth.Scopes
}
if len(route.TokenAuth.Params) > 0 {
v0Route.TokenAuth.Params = make(map[string]interface{})
for k, v := range route.TokenAuth.Params {
v0Route.TokenAuth.Params[k] = v
}
}
}
if route.JwtTokenAuth != nil {
v0Route.JwtTokenAuth = &pluginsv0alpha1.MetaV0alpha1RouteJwtTokenAuth{}
if route.JwtTokenAuth.Url != "" {
v0Route.JwtTokenAuth.Url = &route.JwtTokenAuth.Url
}
if len(route.JwtTokenAuth.Scopes) > 0 {
v0Route.JwtTokenAuth.Scopes = route.JwtTokenAuth.Scopes
}
if len(route.JwtTokenAuth.Params) > 0 {
v0Route.JwtTokenAuth.Params = make(map[string]interface{})
for k, v := range route.JwtTokenAuth.Params {
v0Route.JwtTokenAuth.Params[k] = v
}
}
}
if len(route.Body) > 0 {
var bodyMap map[string]interface{}
if err := json.Unmarshal(route.Body, &bodyMap); err == nil {
v0Route.Body = bodyMap
}
}
meta.Routes = append(meta.Routes, v0Route)
}
}
// Map Extensions
if len(jsonData.Extensions.AddedLinks) > 0 || len(jsonData.Extensions.AddedComponents) > 0 ||
len(jsonData.Extensions.ExposedComponents) > 0 || len(jsonData.Extensions.ExtensionPoints) > 0 {
extensions := &pluginsv0alpha1.MetaExtensions{}
if len(jsonData.Extensions.AddedLinks) > 0 {
extensions.AddedLinks = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsAddedLinks, 0, len(jsonData.Extensions.AddedLinks))
for _, link := range jsonData.Extensions.AddedLinks {
v0Link := pluginsv0alpha1.MetaV0alpha1ExtensionsAddedLinks{
Targets: link.Targets,
Title: link.Title,
}
if link.Description != "" {
v0Link.Description = &link.Description
}
extensions.AddedLinks = append(extensions.AddedLinks, v0Link)
}
}
if len(jsonData.Extensions.AddedComponents) > 0 {
extensions.AddedComponents = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsAddedComponents, 0, len(jsonData.Extensions.AddedComponents))
for _, comp := range jsonData.Extensions.AddedComponents {
v0Comp := pluginsv0alpha1.MetaV0alpha1ExtensionsAddedComponents{
Targets: comp.Targets,
Title: comp.Title,
}
if comp.Description != "" {
v0Comp.Description = &comp.Description
}
extensions.AddedComponents = append(extensions.AddedComponents, v0Comp)
}
}
if len(jsonData.Extensions.ExposedComponents) > 0 {
extensions.ExposedComponents = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsExposedComponents, 0, len(jsonData.Extensions.ExposedComponents))
for _, comp := range jsonData.Extensions.ExposedComponents {
v0Comp := pluginsv0alpha1.MetaV0alpha1ExtensionsExposedComponents{
Id: comp.Id,
}
if comp.Title != "" {
v0Comp.Title = &comp.Title
}
if comp.Description != "" {
v0Comp.Description = &comp.Description
}
extensions.ExposedComponents = append(extensions.ExposedComponents, v0Comp)
}
}
if len(jsonData.Extensions.ExtensionPoints) > 0 {
extensions.ExtensionPoints = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsExtensionPoints, 0, len(jsonData.Extensions.ExtensionPoints))
for _, point := range jsonData.Extensions.ExtensionPoints {
v0Point := pluginsv0alpha1.MetaV0alpha1ExtensionsExtensionPoints{
Id: point.Id,
}
if point.Title != "" {
v0Point.Title = &point.Title
}
if point.Description != "" {
v0Point.Description = &point.Description
}
extensions.ExtensionPoints = append(extensions.ExtensionPoints, v0Point)
}
}
meta.Extensions = extensions
}
// Map Roles
if len(jsonData.Roles) > 0 {
meta.Roles = make([]pluginsv0alpha1.MetaRole, 0, len(jsonData.Roles))
for _, role := range jsonData.Roles {
v0Role := pluginsv0alpha1.MetaRole{
Grants: role.Grants,
}
if role.Role.Name != "" || role.Role.Description != "" || len(role.Role.Permissions) > 0 {
v0RoleRole := &pluginsv0alpha1.MetaV0alpha1RoleRole{}
if role.Role.Name != "" {
v0RoleRole.Name = &role.Role.Name
}
if role.Role.Description != "" {
v0RoleRole.Description = &role.Role.Description
}
if len(role.Role.Permissions) > 0 {
v0RoleRole.Permissions = make([]pluginsv0alpha1.MetaV0alpha1RoleRolePermissions, 0, len(role.Role.Permissions))
for _, perm := range role.Role.Permissions {
v0Perm := pluginsv0alpha1.MetaV0alpha1RoleRolePermissions{}
if perm.Action != "" {
v0Perm.Action = &perm.Action
}
if perm.Scope != "" {
v0Perm.Scope = &perm.Scope
}
v0RoleRole.Permissions = append(v0RoleRole.Permissions, v0Perm)
}
}
v0Role.Role = v0RoleRole
}
meta.Roles = append(meta.Roles, v0Role)
}
}
// Map IAM
if jsonData.IAM != nil && len(jsonData.IAM.Permissions) > 0 {
iam := &pluginsv0alpha1.MetaIAM{
Permissions: make([]pluginsv0alpha1.MetaV0alpha1IAMPermissions, 0, len(jsonData.IAM.Permissions)),
}
for _, perm := range jsonData.IAM.Permissions {
v0Perm := pluginsv0alpha1.MetaV0alpha1IAMPermissions{}
if perm.Action != "" {
v0Perm.Action = &perm.Action
}
if perm.Scope != "" {
v0Perm.Scope = &perm.Scope
}
iam.Permissions = append(iam.Permissions, v0Perm)
}
meta.Iam = iam
}
return meta
return pluginsLoader.New(cfg, d, b, v, i, t, et)
}

View File

@@ -22,10 +22,12 @@ func TestCoreProvider_GetMeta(t *testing.T) {
t.Run("returns cached plugin when available", func(t *testing.T) {
provider := NewCoreProvider()
expectedMeta := pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
expectedMeta := pluginsv0alpha1.MetaSpec{
PluginJson: pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
},
}
provider.mu.Lock()
@@ -58,10 +60,12 @@ func TestCoreProvider_GetMeta(t *testing.T) {
t.Run("ignores version parameter", func(t *testing.T) {
provider := NewCoreProvider()
expectedMeta := pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
expectedMeta := pluginsv0alpha1.MetaSpec{
PluginJson: pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
},
}
provider.mu.Lock()
@@ -81,10 +85,12 @@ func TestCoreProvider_GetMeta(t *testing.T) {
customTTL := 2 * time.Hour
provider := NewCoreProviderWithTTL(customTTL)
expectedMeta := pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
expectedMeta := pluginsv0alpha1.MetaSpec{
PluginJson: pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
},
}
provider.mu.Lock()
@@ -226,8 +232,8 @@ func TestCoreProvider_loadPlugins(t *testing.T) {
if loaded {
result, err := provider.GetMeta(ctx, "test-datasource", "1.0.0")
require.NoError(t, err)
assert.Equal(t, "test-datasource", result.Meta.Id)
assert.Equal(t, "Test Datasource", result.Meta.Name)
assert.Equal(t, "test-datasource", result.Meta.PluginJson.Id)
assert.Equal(t, "Test Datasource", result.Meta.PluginJson.Name)
}
})
}

View File

@@ -0,0 +1,53 @@
package meta
import (
"context"
"time"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
)
const (
defaultLocalTTL = 1 * time.Hour
)
// PluginAssetsCalculator is an interface for calculating plugin asset information.
// LocalProvider requires this to calculate loading strategy and module hash.
type PluginAssetsCalculator interface {
LoadingStrategy(ctx context.Context, p pluginstore.Plugin) plugins.LoadingStrategy
ModuleHash(ctx context.Context, p pluginstore.Plugin) string
}
// LocalProvider retrieves plugin metadata for locally installed plugins.
// It uses the plugin store to access plugins that have already been loaded.
type LocalProvider struct {
store pluginstore.Store
pluginAssets PluginAssetsCalculator
}
// NewLocalProvider creates a new LocalProvider for locally installed plugins.
// pluginAssets is required for calculating loading strategy and module hash.
func NewLocalProvider(pluginStore pluginstore.Store, pluginAssets PluginAssetsCalculator) *LocalProvider {
return &LocalProvider{
store: pluginStore,
pluginAssets: pluginAssets,
}
}
// GetMeta retrieves plugin metadata for locally installed plugins.
func (p *LocalProvider) GetMeta(ctx context.Context, pluginID, version string) (*Result, error) {
plugin, exists := p.store.Plugin(ctx, pluginID)
if !exists {
return nil, ErrMetaNotFound
}
loadingStrategy := p.pluginAssets.LoadingStrategy(ctx, plugin)
moduleHash := p.pluginAssets.ModuleHash(ctx, plugin)
spec := pluginStorePluginToMeta(plugin, loadingStrategy, moduleHash)
return &Result{
Meta: spec,
TTL: defaultLocalTTL,
}, nil
}

View File

@@ -16,7 +16,7 @@ const (
// cachedMeta represents a cached metadata entry with expiration time
type cachedMeta struct {
meta pluginsv0alpha1.MetaJSONData
meta pluginsv0alpha1.MetaSpec
ttl time.Duration
expiresAt time.Time
}
@@ -84,7 +84,7 @@ func (pm *ProviderManager) GetMeta(ctx context.Context, pluginID, version string
if err == nil {
// Don't cache results with a zero TTL
if result.TTL == 0 {
continue
return result, nil
}
pm.cacheMu.Lock()

View File

@@ -35,10 +35,12 @@ func TestProviderManager_GetMeta(t *testing.T) {
ctx := context.Background()
t.Run("returns cached result when available and not expired", func(t *testing.T) {
cachedMeta := pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
cachedMeta := pluginsv0alpha1.MetaSpec{
PluginJson: pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
},
}
provider := &mockProvider{
@@ -60,8 +62,10 @@ func TestProviderManager_GetMeta(t *testing.T) {
provider.getMetaFunc = func(ctx context.Context, pluginID, version string) (*Result, error) {
return &Result{
Meta: pluginsv0alpha1.MetaJSONData{Id: "different"},
TTL: time.Hour,
Meta: pluginsv0alpha1.MetaSpec{
PluginJson: pluginsv0alpha1.MetaJSONData{Id: "different"},
},
TTL: time.Hour,
}, nil
}
@@ -73,10 +77,12 @@ func TestProviderManager_GetMeta(t *testing.T) {
})
t.Run("fetches from provider when not cached", func(t *testing.T) {
expectedMeta := pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
expectedMeta := pluginsv0alpha1.MetaSpec{
PluginJson: pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
},
}
expectedTTL := 2 * time.Hour
@@ -107,19 +113,16 @@ func TestProviderManager_GetMeta(t *testing.T) {
assert.Equal(t, expectedTTL, cached.ttl)
})
t.Run("does not cache result with zero TTL and tries next provider", func(t *testing.T) {
zeroTTLMeta := pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Zero TTL Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
}
expectedMeta := pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
t.Run("does not cache result with zero TTL", func(t *testing.T) {
zeroTTLMeta := pluginsv0alpha1.MetaSpec{
PluginJson: pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Zero TTL Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
},
}
provider1 := &mockProvider{
provider := &mockProvider{
getMetaFunc: func(ctx context.Context, pluginID, version string) (*Result, error) {
return &Result{
Meta: zeroTTLMeta,
@@ -127,37 +130,30 @@ func TestProviderManager_GetMeta(t *testing.T) {
}, nil
},
}
provider2 := &mockProvider{
getMetaFunc: func(ctx context.Context, pluginID, version string) (*Result, error) {
return &Result{
Meta: expectedMeta,
TTL: time.Hour,
}, nil
},
}
pm := NewProviderManager(provider1, provider2)
pm := NewProviderManager(provider)
result, err := pm.GetMeta(ctx, "test-plugin", "1.0.0")
require.NoError(t, err)
require.NotNil(t, result)
assert.Equal(t, expectedMeta, result.Meta)
assert.Equal(t, zeroTTLMeta, result.Meta)
assert.Equal(t, time.Duration(0), result.TTL)
pm.cacheMu.RLock()
cached, exists := pm.cache["test-plugin:1.0.0"]
_, exists := pm.cache["test-plugin:1.0.0"]
pm.cacheMu.RUnlock()
assert.True(t, exists)
assert.Equal(t, expectedMeta, cached.meta)
assert.Equal(t, time.Hour, cached.ttl)
assert.False(t, exists, "zero TTL results should not be cached")
})
t.Run("tries next provider when first returns ErrMetaNotFound", func(t *testing.T) {
expectedMeta := pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
expectedMeta := pluginsv0alpha1.MetaSpec{
PluginJson: pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
},
}
provider1 := &mockProvider{
@@ -229,15 +225,19 @@ func TestProviderManager_GetMeta(t *testing.T) {
})
t.Run("skips expired cache entries", func(t *testing.T) {
expiredMeta := pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Expired Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
expiredMeta := pluginsv0alpha1.MetaSpec{
PluginJson: pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Expired Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
},
}
expectedMeta := pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
expectedMeta := pluginsv0alpha1.MetaSpec{
PluginJson: pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Test Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
},
}
callCount := 0
@@ -272,15 +272,19 @@ func TestProviderManager_GetMeta(t *testing.T) {
})
t.Run("uses first successful provider", func(t *testing.T) {
expectedMeta1 := pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Provider 1 Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
expectedMeta1 := pluginsv0alpha1.MetaSpec{
PluginJson: pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Provider 1 Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
},
}
expectedMeta2 := pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Provider 2 Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
expectedMeta2 := pluginsv0alpha1.MetaSpec{
PluginJson: pluginsv0alpha1.MetaJSONData{
Id: "test-plugin",
Name: "Provider 2 Plugin",
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
},
}
provider1 := &mockProvider{
@@ -331,9 +335,9 @@ func TestProviderManager_Run(t *testing.T) {
func TestProviderManager_cleanupExpired(t *testing.T) {
t.Run("removes expired entries", func(t *testing.T) {
validMeta := pluginsv0alpha1.MetaJSONData{Id: "valid"}
expiredMeta1 := pluginsv0alpha1.MetaJSONData{Id: "expired1"}
expiredMeta2 := pluginsv0alpha1.MetaJSONData{Id: "expired2"}
validMeta := pluginsv0alpha1.MetaSpec{PluginJson: pluginsv0alpha1.MetaJSONData{Id: "valid"}}
expiredMeta1 := pluginsv0alpha1.MetaSpec{PluginJson: pluginsv0alpha1.MetaJSONData{Id: "expired1"}}
expiredMeta2 := pluginsv0alpha1.MetaSpec{PluginJson: pluginsv0alpha1.MetaJSONData{Id: "expired2"}}
provider := &mockProvider{
getMetaFunc: func(ctx context.Context, pluginID, version string) (*Result, error) {

View File

@@ -14,7 +14,7 @@ var (
// Result contains plugin metadata along with its recommended TTL.
type Result struct {
Meta pluginsv0alpha1.MetaJSONData
Meta pluginsv0alpha1.MetaSpec
TTL time.Duration
}

View File

@@ -121,8 +121,19 @@ func (s *MetaStorage) List(ctx context.Context, options *internalversion.ListOpt
continue
}
pluginMeta := createMetaFromMetaJSONData(result.Meta, plugin.Name, plugin.Namespace)
metaItems = append(metaItems, *pluginMeta)
pluginMeta := pluginsv0alpha1.Meta{
ObjectMeta: metav1.ObjectMeta{
Name: plugin.Name,
Namespace: plugin.Namespace,
},
Spec: result.Meta,
}
pluginMeta.SetGroupVersionKind(schema.GroupVersionKind{
Group: pluginsv0alpha1.APIGroup,
Version: pluginsv0alpha1.APIVersion,
Kind: pluginsv0alpha1.MetaKind().Kind(),
})
metaItems = append(metaItems, pluginMeta)
}
list := &pluginsv0alpha1.MetaList{
@@ -169,27 +180,18 @@ func (s *MetaStorage) Get(ctx context.Context, name string, options *metav1.GetO
return nil, apierrors.NewInternalError(fmt.Errorf("failed to fetch plugin metadata: %w", err))
}
return createMetaFromMetaJSONData(result.Meta, name, ns.Value), nil
}
// createMetaFromMetaJSONData creates a Meta k8s object from MetaJSONData and plugin metadata.
func createMetaFromMetaJSONData(pluginJSON pluginsv0alpha1.MetaJSONData, name, namespace string) *pluginsv0alpha1.Meta {
pluginMeta := &pluginsv0alpha1.Meta{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: pluginsv0alpha1.MetaSpec{
PluginJSON: pluginJSON,
Name: plugin.Name,
Namespace: plugin.Namespace,
},
Spec: result.Meta,
}
// Set the GroupVersionKind
pluginMeta.SetGroupVersionKind(schema.GroupVersionKind{
Group: pluginsv0alpha1.APIGroup,
Version: pluginsv0alpha1.APIVersion,
Kind: pluginsv0alpha1.MetaKind().Kind(),
})
return pluginMeta
return pluginMeta, nil
}

View File

@@ -0,0 +1,249 @@
package app
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"slices"
"strings"
"testing"
"github.com/grafana/grafana-app-sdk/resource"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/endpoints/request"
pluginsv0alpha1 "github.com/grafana/grafana/apps/plugins/pkg/apis/plugins/v0alpha1"
"github.com/grafana/grafana/apps/plugins/pkg/app/meta"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
)
func TestMetaStorageListPreload(t *testing.T) {
ctx := request.WithNamespace(context.Background(), "default")
preloadPlugin := pluginstore.Plugin{
JSONData: plugins.JSONData{
ID: "test-plugin",
Name: "Test Plugin",
Type: plugins.TypeDataSource,
Info: plugins.Info{Version: "1.0.0"},
Preload: true,
},
}
nonPreloadPlugin := pluginstore.Plugin{
JSONData: plugins.JSONData{
ID: "test-plugin-2",
Name: "Test Plugin 2",
Type: plugins.TypeDataSource,
Info: plugins.Info{Version: "1.0.0"},
Preload: false,
},
}
store := &mockPluginStore{plugins: map[string]pluginstore.Plugin{
"test-plugin": preloadPlugin,
}}
store2 := &mockPluginStore{plugins: map[string]pluginstore.Plugin{
"test-plugin-2": nonPreloadPlugin,
}}
catalogServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, http.MethodGet, r.Method)
require.Equal(t, "application/json", r.Header.Get("Accept"))
require.Equal(t, "grafana-plugins-app", r.Header.Get("User-Agent"))
segments := strings.Split(strings.Trim(r.URL.Path, "/"), "/")
require.Len(t, segments, 5)
require.Equal(t, "api", segments[0])
require.Equal(t, "plugins", segments[1])
require.Equal(t, "versions", segments[3])
preload := true
response := struct {
PluginID string `json:"pluginSlug"`
Version string `json:"version"`
JSON pluginsv0alpha1.MetaJSONData `json:"json"`
}{
PluginID: segments[2],
Version: segments[4],
JSON: pluginsv0alpha1.MetaJSONData{
Id: segments[2],
Name: segments[2],
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
Preload: &preload,
},
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
require.NoError(t, json.NewEncoder(w).Encode(response))
}))
defer catalogServer.Close()
provider := meta.NewLocalProvider(store, mockPluginAssets{})
provider2 := meta.NewLocalProvider(store2, mockPluginAssets{})
catalogProvider := meta.NewCatalogProvider(catalogServer.URL + "/api/plugins")
metaManager := meta.NewProviderManager(provider2, provider, catalogProvider)
pluginClient := pluginsv0alpha1.NewPluginClient(&mockResourceClient{
listFunc: func(ctx context.Context, namespace string, opts resource.ListOptions) (resource.ListObject, error) {
return newPluginList(), nil
},
})
storage := NewMetaStorage(metaManager, func(ctx context.Context) (*pluginsv0alpha1.PluginClient, error) {
return pluginClient, nil
})
obj, err := storage.List(ctx, nil)
require.NoError(t, err)
metaList, ok := obj.(*pluginsv0alpha1.MetaList)
require.True(t, ok)
require.Len(t, metaList.Items, 3)
require.NotNil(t, metaList.Items[0].Spec.PluginJson.Preload)
require.True(t, *metaList.Items[0].Spec.PluginJson.Preload)
require.NotNil(t, metaList.Items[1].Spec.PluginJson.Preload)
require.True(t, *metaList.Items[1].Spec.PluginJson.Preload)
require.Nil(t, metaList.Items[2].Spec.PluginJson.Preload)
obj, err = storage.List(ctx, nil)
require.NoError(t, err)
metaList, ok = obj.(*pluginsv0alpha1.MetaList)
require.True(t, ok)
require.Len(t, metaList.Items, 3)
require.NotNil(t, metaList.Items[0].Spec.PluginJson.Preload)
require.True(t, *metaList.Items[0].Spec.PluginJson.Preload)
require.NotNil(t, metaList.Items[1].Spec.PluginJson.Preload)
require.True(t, *metaList.Items[1].Spec.PluginJson.Preload)
require.Nil(t, metaList.Items[2].Spec.PluginJson.Preload)
}
type mockPluginAssets struct{}
func (mockPluginAssets) LoadingStrategy(ctx context.Context, p pluginstore.Plugin) plugins.LoadingStrategy {
return plugins.LoadingStrategyFetch
}
func (mockPluginAssets) ModuleHash(ctx context.Context, p pluginstore.Plugin) string {
return "hash"
}
type mockPluginStore struct {
plugins map[string]pluginstore.Plugin
}
func (m *mockPluginStore) Plugin(ctx context.Context, pluginID string) (pluginstore.Plugin, bool) {
if m.plugins[pluginID].ID != pluginID {
return pluginstore.Plugin{}, false
}
return m.plugins[pluginID], true
}
func (m *mockPluginStore) Plugins(ctx context.Context, pluginTypes ...plugins.Type) []pluginstore.Plugin {
result := []pluginstore.Plugin{}
for _, plugin := range m.plugins {
if len(pluginTypes) == 0 || slices.Contains(pluginTypes, plugin.Type) {
result = append(result, plugin)
}
}
return result
}
func newPluginList() *pluginsv0alpha1.PluginList {
return &pluginsv0alpha1.PluginList{
Items: []pluginsv0alpha1.Plugin{
{
ObjectMeta: metav1.ObjectMeta{Name: "grafana-plugins-app", Namespace: "org-1"},
Spec: pluginsv0alpha1.PluginSpec{Id: "grafana-plugins-app", Version: "1.0.0"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "test-plugin", Namespace: "org-1"},
Spec: pluginsv0alpha1.PluginSpec{Id: "test-plugin", Version: "1.0.0"},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "test-plugin-2", Namespace: "org-1"},
Spec: pluginsv0alpha1.PluginSpec{Id: "test-plugin-2", Version: "1.0.0"},
},
},
}
}
type mockResourceClient struct {
listFunc func(ctx context.Context, namespace string, opts resource.ListOptions) (resource.ListObject, error)
}
func (m *mockResourceClient) List(ctx context.Context, namespace string, opts resource.ListOptions) (resource.ListObject, error) {
if m.listFunc != nil {
return m.listFunc(ctx, namespace, opts)
}
return &pluginsv0alpha1.PluginList{}, nil
}
func (m *mockResourceClient) ListInto(ctx context.Context, namespace string, opts resource.ListOptions, into resource.ListObject) error {
list, err := m.List(ctx, namespace, opts)
if err != nil {
return err
}
if src, ok := list.(*pluginsv0alpha1.PluginList); ok {
if dst, ok := into.(*pluginsv0alpha1.PluginList); ok {
*dst = *src
}
}
return nil
}
func (m *mockResourceClient) Get(ctx context.Context, identifier resource.Identifier) (resource.Object, error) {
return nil, nil
}
func (m *mockResourceClient) GetInto(ctx context.Context, identifier resource.Identifier, into resource.Object) error {
return nil
}
func (m *mockResourceClient) Create(ctx context.Context, identifier resource.Identifier, obj resource.Object, opts resource.CreateOptions) (resource.Object, error) {
return nil, nil
}
func (m *mockResourceClient) CreateInto(ctx context.Context, identifier resource.Identifier, obj resource.Object, opts resource.CreateOptions, into resource.Object) error {
return nil
}
func (m *mockResourceClient) Update(ctx context.Context, identifier resource.Identifier, obj resource.Object, opts resource.UpdateOptions) (resource.Object, error) {
return nil, nil
}
func (m *mockResourceClient) UpdateInto(ctx context.Context, identifier resource.Identifier, obj resource.Object, opts resource.UpdateOptions, into resource.Object) error {
return nil
}
func (m *mockResourceClient) Patch(ctx context.Context, identifier resource.Identifier, patch resource.PatchRequest, opts resource.PatchOptions) (resource.Object, error) {
return nil, nil
}
func (m *mockResourceClient) PatchInto(ctx context.Context, identifier resource.Identifier, patch resource.PatchRequest, opts resource.PatchOptions, into resource.Object) error {
return nil
}
func (m *mockResourceClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error {
return nil
}
func (m *mockResourceClient) SubresourceRequest(ctx context.Context, identifier resource.Identifier, req resource.CustomRouteRequestOptions) ([]byte, error) {
return nil, nil
}
func (m *mockResourceClient) Watch(ctx context.Context, namespace string, opts resource.WatchOptions) (resource.WatchResponse, error) {
return &mockWatchResponse{}, nil
}
type mockWatchResponse struct{}
func (m *mockWatchResponse) Stop() {}
func (m *mockWatchResponse) WatchEvents() <-chan resource.WatchEvent {
ch := make(chan resource.WatchEvent)
close(ch)
return ch
}

View File

@@ -198,7 +198,6 @@ type JobStatus struct {
Finished int64 `json:"finished,omitempty"`
Message string `json:"message,omitempty"`
Errors []string `json:"errors,omitempty"`
Warnings []string `json:"warnings,omitempty"`
// Optional value 0-100 that can be set while running
Progress float64 `json:"progress,omitempty"`
@@ -226,20 +225,18 @@ type JobResourceSummary struct {
Kind string `json:"kind,omitempty"`
Total int64 `json:"total,omitempty"` // the count (if known)
Create int64 `json:"create,omitempty"`
Update int64 `json:"update,omitempty"`
Delete int64 `json:"delete,omitempty"`
Write int64 `json:"write,omitempty"` // Create or update (export)
Error int64 `json:"error,omitempty"` // The error count
Warning int64 `json:"warning,omitempty"` // The warning count
Create int64 `json:"create,omitempty"`
Update int64 `json:"update,omitempty"`
Delete int64 `json:"delete,omitempty"`
Write int64 `json:"write,omitempty"` // Create or update (export)
Error int64 `json:"error,omitempty"` // The error count
// No action required (useful for sync)
Noop int64 `json:"noop,omitempty"`
// Report errors/warnings for this resource type
// Report errors for this resource type
// This may not be an exhaustive list and recommend looking at the logs for more info
Errors []string `json:"errors,omitempty"`
Warnings []string `json:"warnings,omitempty"`
Errors []string `json:"errors,omitempty"`
}
// HistoricJob is an append only log, saving all jobs that have been processed.

View File

@@ -401,11 +401,6 @@ func (in *JobResourceSummary) DeepCopyInto(out *JobResourceSummary) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Warnings != nil {
in, out := &in.Warnings, &out.Warnings
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
@@ -473,11 +468,6 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Warnings != nil {
in, out := &in.Warnings, &out.Warnings
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Summary != nil {
in, out := &in.Summary, &out.Summary
*out = make([]*JobResourceSummary, len(*in))

View File

@@ -889,13 +889,6 @@ func schema_pkg_apis_provisioning_v0alpha1_JobResourceSummary(ref common.Referen
Format: "int64",
},
},
"warning": {
SchemaProps: spec.SchemaProps{
Description: "The error count",
Type: []string{"integer"},
Format: "int64",
},
},
"noop": {
SchemaProps: spec.SchemaProps{
Description: "No action required (useful for sync)",
@@ -905,7 +898,7 @@ func schema_pkg_apis_provisioning_v0alpha1_JobResourceSummary(ref common.Referen
},
"errors": {
SchemaProps: spec.SchemaProps{
Description: "Report errors/warnings for this resource type This may not be an exhaustive list and recommend looking at the logs for more info",
Description: "Report errors for this resource type This may not be an exhaustive list and recommend looking at the logs for more info",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
@@ -918,20 +911,6 @@ func schema_pkg_apis_provisioning_v0alpha1_JobResourceSummary(ref common.Referen
},
},
},
"warnings": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
@@ -1050,20 +1029,6 @@ func schema_pkg_apis_provisioning_v0alpha1_JobStatus(ref common.ReferenceCallbac
},
},
},
"warnings": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"progress": {
SchemaProps: spec.SchemaProps{
Description: "Optional value 0-100 that can be set while running",

View File

@@ -3,10 +3,8 @@ API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioni
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,FileList,Items
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,HistoryList,Items
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobResourceSummary,Errors
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobResourceSummary,Warnings
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobStatus,Errors
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobStatus,Summary
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobStatus,Warnings
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,ManagerStats,Stats
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,MoveJobOptions,Paths
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,MoveJobOptions,Resources

View File

@@ -7,18 +7,16 @@ package v0alpha1
// JobResourceSummaryApplyConfiguration represents a declarative configuration of the JobResourceSummary type for use
// with apply.
type JobResourceSummaryApplyConfiguration struct {
Group *string `json:"group,omitempty"`
Kind *string `json:"kind,omitempty"`
Total *int64 `json:"total,omitempty"`
Create *int64 `json:"create,omitempty"`
Update *int64 `json:"update,omitempty"`
Delete *int64 `json:"delete,omitempty"`
Write *int64 `json:"write,omitempty"`
Error *int64 `json:"error,omitempty"`
Warning *int64 `json:"warning,omitempty"`
Noop *int64 `json:"noop,omitempty"`
Errors []string `json:"errors,omitempty"`
Warnings []string `json:"warnings,omitempty"`
Group *string `json:"group,omitempty"`
Kind *string `json:"kind,omitempty"`
Total *int64 `json:"total,omitempty"`
Create *int64 `json:"create,omitempty"`
Update *int64 `json:"update,omitempty"`
Delete *int64 `json:"delete,omitempty"`
Write *int64 `json:"write,omitempty"`
Error *int64 `json:"error,omitempty"`
Noop *int64 `json:"noop,omitempty"`
Errors []string `json:"errors,omitempty"`
}
// JobResourceSummaryApplyConfiguration constructs a declarative configuration of the JobResourceSummary type for use with
@@ -91,14 +89,6 @@ func (b *JobResourceSummaryApplyConfiguration) WithError(value int64) *JobResour
return b
}
// WithWarning sets the Warning field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Warning field is set to the value of the last call.
func (b *JobResourceSummaryApplyConfiguration) WithWarning(value int64) *JobResourceSummaryApplyConfiguration {
b.Warning = &value
return b
}
// WithNoop sets the Noop field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Noop field is set to the value of the last call.
@@ -116,13 +106,3 @@ func (b *JobResourceSummaryApplyConfiguration) WithErrors(values ...string) *Job
}
return b
}
// WithWarnings adds the given value to the Warnings field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Warnings field.
func (b *JobResourceSummaryApplyConfiguration) WithWarnings(values ...string) *JobResourceSummaryApplyConfiguration {
for i := range values {
b.Warnings = append(b.Warnings, values[i])
}
return b
}

View File

@@ -16,7 +16,6 @@ type JobStatusApplyConfiguration struct {
Finished *int64 `json:"finished,omitempty"`
Message *string `json:"message,omitempty"`
Errors []string `json:"errors,omitempty"`
Warnings []string `json:"warnings,omitempty"`
Progress *float64 `json:"progress,omitempty"`
Summary []*provisioningv0alpha1.JobResourceSummary `json:"summary,omitempty"`
URLs *RepositoryURLsApplyConfiguration `json:"url,omitempty"`
@@ -70,16 +69,6 @@ func (b *JobStatusApplyConfiguration) WithErrors(values ...string) *JobStatusApp
return b
}
// WithWarnings adds the given value to the Warnings field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Warnings field.
func (b *JobStatusApplyConfiguration) WithWarnings(values ...string) *JobStatusApplyConfiguration {
for i := range values {
b.Warnings = append(b.Warnings, values[i])
}
return b
}
// WithProgress sets the Progress field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Progress field is set to the value of the last call.

View File

@@ -75,9 +75,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false,
"rounded": true,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -152,9 +152,9 @@
"effects": {
"barGlow": false,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -229,9 +229,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -306,9 +306,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -383,9 +383,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -460,9 +460,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": false,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -537,9 +537,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": false,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -627,9 +627,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -704,9 +704,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -781,9 +781,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -858,9 +858,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": false,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -952,9 +952,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -1029,9 +1029,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -1106,9 +1106,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": true
},
"orientation": "auto",
"reduceOptions": {
@@ -1183,9 +1183,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -1260,9 +1260,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": false,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": false
},
"orientation": "auto",
"reduceOptions": {
@@ -1354,9 +1354,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": true
},
"orientation": "auto",
"reduceOptions": {
@@ -1435,9 +1435,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": true
},
"orientation": "auto",
"reduceOptions": {
@@ -1516,9 +1516,9 @@
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true,
"rounded": false,
"spotlight": false
"spotlight": false,
"gradient": true
},
"orientation": "auto",
"reduceOptions": {
@@ -1565,6 +1565,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1605,9 +1606,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
@@ -1630,6 +1631,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"hide": false,
"max": 98,
"min": 5,
"noise": 22,
@@ -1647,6 +1649,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1687,9 +1690,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
@@ -1712,6 +1715,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"hide": false,
"max": 98,
"min": 5,
"noise": 22,
@@ -1742,6 +1746,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1783,9 +1788,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
@@ -1808,6 +1813,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"hide": false,
"max": 8,
"min": 1,
"noise": 2,
@@ -1825,6 +1831,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1866,9 +1873,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
@@ -1891,6 +1898,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"hide": false,
"max": 12,
"min": 1,
"noise": 2,
@@ -1908,6 +1916,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -1948,9 +1957,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
@@ -1973,6 +1982,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"hide": false,
"max": 100,
"min": 10,
"noise": 22,
@@ -1990,6 +2000,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -2030,9 +2041,9 @@
"effects": {
"barGlow": true,
"centerGlow": true,
"gradient": true,
"rounded": true,
"spotlight": true
"spotlight": true,
"gradient": true
},
"glow": "both",
"orientation": "auto",
@@ -2055,6 +2066,7 @@
"datasource": {
"type": "grafana-testdata-datasource"
},
"hide": false,
"max": 100,
"min": 10,
"noise": 22,
@@ -2067,147 +2079,6 @@
],
"title": "Backend",
"type": "radialbar"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 66
},
"id": 35,
"panels": [],
"title": "Empty data",
"type": "row"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 67
},
"id": 36,
"options": {
"barWidthFactor": 0.5,
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true,
"rounded": false,
"spotlight": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "gauge",
"showThresholdLabels": false,
"showThresholdMarkers": true,
"sparkline": true
},
"pluginVersion": "13.0.0-pre",
"targets": [
{
"refId": "A",
"scenarioId": "random_walk",
"seriesCount": 0
}
],
"title": "Numeric, no series",
"type": "gauge"
},
{
"datasource": {
"type": "grafana-testdata-datasource"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 6,
"y": 67
},
"id": 37,
"options": {
"barWidthFactor": 0.5,
"effects": {
"barGlow": false,
"centerGlow": false,
"gradient": true,
"rounded": false,
"spotlight": false
},
"orientation": "auto",
"reduceOptions": {
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
"segmentCount": 1,
"segmentSpacing": 0.3,
"shape": "gauge",
"showThresholdLabels": false,
"showThresholdMarkers": true,
"sparkline": true
},
"pluginVersion": "13.0.0-pre",
"targets": [
{
"refId": "A",
"scenarioId": "logs"
}
],
"title": "Non-numeric",
"type": "gauge"
}
],
"preload": false,
@@ -2224,5 +2095,5 @@
"timezone": "browser",
"title": "Panel tests - Gauge (new)",
"uid": "panel-tests-gauge-new",
"version": 9
"version": 6
}

View File

@@ -7,8 +7,8 @@ MAKEFLAGS += --no-builtin-rule
include docs.mk
.PHONY: sources/visualizations/panels-visualizations/query-transform-data/transform-data/index.md
sources/visualizations/panels-visualizations/query-transform-data/transform-data/index.md: ## Generate the Transform Data page source.
.PHONY: sources/panels-visualizations/query-transform-data/transform-data/index.md
sources/panels-visualizations/query-transform-data/transform-data/index.md: ## Generate the Transform Data page source.
cd $(CURDIR)/.. && \
npx tsx ./scripts/docs/generate-transformations.ts && \
npx prettier -w $(CURDIR)/$@

View File

@@ -59,9 +59,9 @@ For more details on contact points, including how to test them and enable notifi
## Alertmanager settings
| Option | Description |
| ------ | ----------------------------------------------------------------------------------------------------------------- |
| URL | The Alertmanager URL. This field is [protected](ref:configure-contact-points) from modification in Grafana Cloud. |
| Option | Description |
| ------ | ---------------------------------------------------------------------------------------------------------------------------------- |
| URL | The Alertmanager URL. This field is [protected](ref:configure-contact-points#protected-fields) from modification in Grafana Cloud. |
#### Optional settings

View File

@@ -49,14 +49,14 @@ For more details on contact points, including how to test them and enable notifi
### Required Settings
| Key | Description |
| ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| URL | The URL of the REST API of your Jira instance. Supported versions: `2` and `3` (e.g., `https://your-domain.atlassian.net/rest/api/3`). This field is [protected](ref:configure-contact-points) from modification in Grafana Cloud. |
| Basic Auth User | Username for authentication. For Jira Cloud, use your email address. |
| Basic Auth Password | Password or personal token. For Jira Cloud, you need to obtain a personal token [here](https://id.atlassian.com/manage-profile/security/api-tokens) and use it as the password. |
| API Token | An alternative to basic authentication, a bearer token is used to authorize the API requests. See [Jira documentation](https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html) for more information. |
| Project Key | The project key identifying the project where issues will be created. Project keys are unique identifiers for a project. |
| Issue Type | The type of issue to create (e.g., `Task`, `Bug`, `Incident`). Make sure that you specify a type that is available in your project. |
| Key | Description |
| ------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| URL | The URL of the REST API of your Jira instance. Supported versions: `2` and `3` (e.g., `https://your-domain.atlassian.net/rest/api/3`). This field is [protected](ref:configure-contact-points#protected-fields) from modification in Grafana Cloud. |
| Basic Auth User | Username for authentication. For Jira Cloud, use your email address. |
| Basic Auth Password | Password or personal token. For Jira Cloud, you need to obtain a personal token [here](https://id.atlassian.com/manage-profile/security/api-tokens) and use it as the password. |
| API Token | An alternative to basic authentication, a bearer token is used to authorize the API requests. See [Jira documentation](https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html) for more information. |
| Project Key | The project key identifying the project where issues will be created. Project keys are unique identifiers for a project. |
| Issue Type | The type of issue to create (e.g., `Task`, `Bug`, `Incident`). Make sure that you specify a type that is available in your project. |
### Optional Settings

View File

@@ -54,10 +54,10 @@ For more details on contact points, including how to test them and enable notifi
### Required Settings
| Option | Description |
| ---------- | ----------------------------------------------------------------------------------------------------------------------- |
| Broker URL | The URL of the MQTT broker. This field is [protected](ref:configure-contact-points) from modification in Grafana Cloud. |
| Topic | The topic to which the message will be sent. |
| Option | Description |
| ---------- | ---------------------------------------------------------------------------------------------------------------------------------------- |
| Broker URL | The URL of the MQTT broker. This field is [protected](ref:configure-contact-points#protected-fields) from modification in Grafana Cloud. |
| Topic | The topic to which the message will be sent. |
### Optional Settings

View File

@@ -51,8 +51,8 @@ You can customize the `title` and `body` of the Slack message using [notificatio
If you are using a Slack API Token, complete the following steps.
1. Follow step 1 of the [Slack API Quickstart](https://docs.slack.dev/app-management/quickstart-app-settings/#creating) to create the app.
1. Continue onto the second step of the [Slack API Quickstart](https://docs.slack.dev/app-management/quickstart-app-settings/#scopes) and add the [chat:write.public](https://api.slack.com/scopes/chat:write.public) scope as described to give your app the ability to post in all public channels without joining.
1. Follow steps 1 and 2 of the [Slack API Quickstart](https://api.slack.com/start/quickstart).
1. Add the [chat:write.public](https://api.slack.com/scopes/chat:write.public) scope to give your app the ability to post in all public channels without joining.
1. In OAuth Tokens for Your Workspace, copy the Bot User OAuth Token.
1. Open your Slack workplace.
1. Right click the channel you want to receive notifications in.

View File

@@ -62,9 +62,9 @@ For more details on contact points, including how to test them and enable notifi
## Webhook settings
| Option | Description |
| ------ | ------------------------------------------------------------------------------------------------------------ |
| URL | The Webhook URL. This field is [protected](ref:configure-contact-points) from modification in Grafana Cloud. |
| Option | Description |
| ------ | ----------------------------------------------------------------------------------------------------------------------------- |
| URL | The Webhook URL. This field is [protected](ref:configure-contact-points#protected-fields) from modification in Grafana Cloud. |
#### Optional settings

View File

@@ -81,7 +81,7 @@ Replace the placeholders with your values:
In your `grafana` directory, create a sub-folder called `dashboards`.
This guide shows you how to create three separate dashboards. For all dashboard configurations, replace the placeholders with your values:
This guide shows you how to creates three separate dashboards. For all dashboard configurations, replace the placeholders with your values:
- _`<GRAFANA_CLOUD_STACK_NAME>`_: Name of your Grafana Cloud Stack
- _`<GRAFANA_OPERATOR_NAMESPACE>`_: Namespace where the `grafana-operator` is deployed in your Kubernetes cluster

View File

@@ -1,147 +0,0 @@
---
title: Git Sync deployment scenarios
menuTitle: Deployment scenarios
description: Learn about common Git Sync deployment patterns and configurations for different organizational needs
weight: 450
keywords:
- git sync
- deployment patterns
- scenarios
- multi-environment
- teams
---
# Git Sync deployment scenarios
This guide shows practical deployment scenarios for Grafanas Git Sync. Learn how to configure bidirectional synchronization between Grafana and Git repositories for teams, environments, and regions.
{{< admonition type="caution" >}}
Git Sync is an experimental feature. It reflects Grafanas approach to Observability as Code and might include limitations or breaking changes. For current status and known limitations, refer to the [Git Sync introduction](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/intro-git-sync/).
{{< /admonition >}}
## Understand the relationship between key Git Sync components
Before you explore the scenarios, understand how the key Git Sync components relate:
- [Grafana instance](#grafana-instance)
- [Git repository structure](#git-repository-structure)
- [Git Sync repository resource](#git-sync-repository-resource)
### Grafana instance
A Grafana instance is a running Grafana server. Multiple instances can:
- Connect to the same Git repository using different Repository configurations.
- Sync from different branches of the same repository.
- Sync from different paths within the same repository.
- Sync from different repositories.
### Git repository structure
You can organize your Git repository in several ways:
- Single branch, multiple paths: Use different directories for different purposes (for example, `dev/`, `prod/`, `team-a/`).
- Multiple branches: Use different branches for different environments or teams (for example, `main`, `develop`, `team-a`).
- Multiple repositories: Use separate repositories for different teams or environments.
### Git Sync repository resource
A repository resource is a Grafana configuration object that defines:
- Which Git repository to sync with.
- Which branch to use.
- Which directory path to synchronize.
- Sync behavior and workflows.
Each repository resource creates bidirectional synchronization between a Grafana instance and a specific location in Git.
## How does repository sync behave?
With Git Sync you configure a repository resource to sync with your Grafana instance:
1. Grafana monitors the specified Git location (repository, branch, and path).
2. Grafana creates a folder in Dashboards (typically named after the repository).
3. Grafana creates dashboards from dashboard JSON files in Git within this folder.
4. Grafana commits dashboard changes made in the UI back to Git.
5. Grafana pulls dashboard changes made in Git and updates dashboards in the UI.
6. Synchronization occurs at regular intervals (configurable), or instantly if you use webhooks.
You can find the provisioned dashboards organized in folders under **Dashboards**.
## Example: Relationship between repository, branch, and path
Here's a concrete example showing how the three parameters work together:
**Configuration:**
- **Repository**: `your-org/grafana-manifests`
- **Branch**: `main`
- **Path**: `team-platform/grafana/`
**In Git (on branch `main`):**
```
your-org/grafana-manifests/
├── .git/
├── README.md
├── team-platform/
│ └── grafana/
│ ├── cpu-metrics.json ← Synced
│ ├── memory-usage.json ← Synced
│ └── disk-io.json ← Synced
├── team-data/
│ └── grafana/
│ └── pipeline-stats.json ← Not synced (different path)
└── other-files.txt ← Not synced (outside path)
```
**In Grafana Dashboards view:**
```
Dashboards
└── 📁 grafana-manifests/
├── CPU Metrics Dashboard
├── Memory Usage Dashboard
└── Disk I/O Dashboard
```
**Key points:**
- Grafana only synchronizes files within the specified path (`team-platform/grafana/`).
- Grafana ignores files in other paths or at the repository root.
- The folder name in Grafana comes from the repository name.
- Dashboard titles come from the JSON file content, not the filename.
## Repository configuration flexibility
Git Sync repositories support different combinations of repository URL, branch, and path:
- Different Git repositories: Each environment or team can use its own repository.
- Instance A: `repository: your-org/grafana-prod`.
- Instance B: `repository: your-org/grafana-dev`.
- Different branches: Use separate branches within the same repository.
- Instance A: `repository: your-org/grafana-manifests, branch: main`.
- Instance B: `repository: your-org/grafana-manifests, branch: develop`.
- Different paths: Use different directory paths within the same repository.
- Instance A: `repository: your-org/grafana-manifests, branch: main, path: production/`.
- Instance B: `repository: your-org/grafana-manifests, branch: main, path: development/`.
- Any combination: Mix and match based on your workflow requirements.
## Scenarios
Use these deployment scenarios to plan your Git Sync setup:
- [Single instance](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/single-instance/)
- [Git Sync for development and production environments](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/dev-prod/)
- [Git Sync with regional replication](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/multi-region/)
- [High availability](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/high-availability/)
- [Git Sync in a shared Grafana instance](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/multi-team/)
## Learn more
Refer to the following documents to learn more:
- [Git Sync introduction](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/intro-git-sync/)
- [Git Sync setup guide](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-setup/)
- [Dashboard provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/provisioning/)
- [Observability as Code](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/)

View File

@@ -1,147 +0,0 @@
---
title: Git Sync for development and production environments
menuTitle: Across environments
description: Use separate Grafana instances for development and production with Git-controlled promotion
weight: 20
---
# Git Sync for development and production environments
Use separate Grafana instances for development and production. Each syncs with different Git locations to test dashboards before production.
## Use it for
- **Staged deployments**: You need to test dashboard changes before production deployment.
- **Change control**: You require approvals before dashboards reach production.
- **Quality assurance**: You verify dashboard functionality in a non-production environment.
- **Risk mitigation**: You minimize the risk of breaking production dashboards.
## Architecture
```
┌────────────────────────────────────────────────────────────┐
│ GitHub Repository │
│ Repository: your-org/grafana-manifests │
│ Branch: main │
│ │
│ grafana-manifests/ │
│ ├── dev/ │
│ │ ├── dashboard-new.json ← Development dashboards │
│ │ └── dashboard-test.json │
│ │ │
│ └── prod/ │
│ ├── dashboard-stable.json ← Production dashboards │
│ └── dashboard-approved.json │
└────────────────────────────────────────────────────────────┘
↕ ↕
Git Sync (dev/) Git Sync (prod/)
↕ ↕
┌─────────────────────┐ ┌─────────────────────┐
│ Dev Grafana │ │ Prod Grafana │
│ │ │ │
│ Repository: │ │ Repository: │
│ - path: dev/ │ │ - path: prod/ │
│ │ │ │
│ Creates folder: │ │ Creates folder: │
│ "grafana-manifests"│ │ "grafana-manifests"│
└─────────────────────┘ └─────────────────────┘
```
## Repository structure
**In Git:**
```
your-org/grafana-manifests
├── dev/
│ ├── dashboard-new.json
│ └── dashboard-test.json
└── prod/
├── dashboard-stable.json
└── dashboard-approved.json
```
**In Grafana Dashboards view:**
**Dev instance:**
```
Dashboards
└── 📁 grafana-manifests/
├── New Dashboard
└── Test Dashboard
```
**Prod instance:**
```
Dashboards
└── 📁 grafana-manifests/
├── Stable Dashboard
└── Approved Dashboard
```
- Both instances create a folder named "grafana-manifests" (from repository name)
- Each instance only shows dashboards from its configured path (`dev/` or `prod/`)
- Dashboards appear with their titles from the JSON files
## Configuration parameters
Development:
- Repository: `your-org/grafana-manifests`
- Branch: `main`
- Path: `dev/`
Production:
- Repository: `your-org/grafana-manifests`
- Branch: `main`
- Path: `prod/`
## How it works
1. Developers create and modify dashboards in development.
2. Git Sync commits changes to `dev/`.
3. You review changes in Git.
4. You promote approved dashboards from `dev/` to `prod/`.
5. Production syncs from `prod/`.
6. Production dashboards update.
## Alternative: Use branches
Instead of using different paths, you can configure instances to use different branches:
**Development instance:**
- **Repository**: `your-org/grafana-manifests`
- **Branch**: `develop`
- **Path**: `grafana/`
**Production instance:**
- **Repository**: `your-org/grafana-manifests`
- **Branch**: `main`
- **Path**: `grafana/`
With this approach:
- Development changes go to the `develop` branch
- Use Git merge or pull request workflows to promote changes from `develop` to `main`
- Production automatically syncs from the `main` branch
## Alternative: Use separate repositories for stricter isolation
For stricter isolation, use completely separate repositories:
**Development instance:**
- **Repository**: `your-org/grafana-manifests-dev`
- **Branch**: `main`
- **Path**: `grafana/`
**Production instance:**
- **Repository**: `your-org/grafana-manifests-prod`
- **Branch**: `main`
- **Path**: `grafana/`

View File

@@ -1,217 +0,0 @@
---
title: Git Sync for high availability environments
menuTitle: High availability
description: Run multiple Grafana instances serving traffic simultaneously, synchronized via Git Sync
weight: 50
---
# Git Sync for high availability environments
## Primaryreplica scenario
Use a primary Grafana instance and one or more replicas synchronized with the same Git location to enable failover.
### Use it for
- **Automatic failover**: You need service continuity when the primary instance fails.
- **High availability**: Your organization requires guaranteed dashboard availability.
- **Simple HA setup**: You want high availability without the complexity of activeactive.
- **Maintenance windows**: You perform updates while another instance serves traffic.
- **Business continuity**: Dashboard access can't tolerate downtime.
### Architecture
```
┌─────────────────────────────────────────────────────┐
│ GitHub Repository │
│ Repository: your-org/grafana-manifests │
│ Branch: main │
│ │
│ grafana-manifests/ │
│ └── shared/ │
│ ├── dashboard-metrics.json │
│ ├── dashboard-alerts.json │
│ └── dashboard-logs.json │
└─────────────────────────────────────────────────────┘
↕ ↕
Git Sync (shared/) Git Sync (shared/)
↕ ↕
┌────────────────────┐ ┌────────────────────┐
│ Master Grafana │ │ Replica Grafana │
│ (Active) │ │ (Standby) │
│ │ │ │
│ Repository: │ │ Repository: │
│ - path: shared/ │ │ - path: shared/ │
└────────────────────┘ └────────────────────┘
│ │
└───────────┬───────────────────┘
┌──────────────────────┐
│ Reverse Proxy │
│ (Failover) │
└──────────────────────┘
```
### Repository structure
**In Git:**
```
your-org/grafana-manifests
└── shared/
├── dashboard-metrics.json
├── dashboard-alerts.json
└── dashboard-logs.json
```
**In Grafana Dashboards view (both instances):**
```
Dashboards
└── 📁 grafana-manifests/
├── Metrics Dashboard
├── Alerts Dashboard
└── Logs Dashboard
```
- Master and replica instances show identical folder structure.
- Both sync from the same `shared/` path.
- Reverse proxy routes traffic to master (active) instance.
- If master fails, proxy automatically fails over to replica (standby).
- Users see the same dashboards regardless of which instance is serving traffic.
### Configuration parameters
Both master and replica instances use identical parameters:
**Master instance:**
- **Repository**: `your-org/grafana-manifests`
- **Branch**: `main`
- **Path**: `shared/`
**Replica instance:**
- **Repository**: `your-org/grafana-manifests`
- **Branch**: `main`
- **Path**: `shared/`
### How it works
1. Both instances stay synchronized through Git.
2. Reverse proxy routes traffic to primary.
3. Users edit on primary. Git Sync commits changes.
4. Both instances pull latest changes to keep replica in sync.
5. On primary failure, proxy fails over to replica.
### Failover considerations
- Health checks and monitoring.
- Continuous syncing to minimize data loss.
- Plan failback (automatic or manual).
## Load balancer scenario
Run multiple active Grafana instances behind a load balancer. All instances sync from the same Git location.
### Use it for
- **High traffic**: Your deployment needs to handle significant user load.
- **Load distribution**: You want to distribute user requests across instances.
- **Maximum availability**: You need service continuity during maintenance or failures.
- **Scalability**: You want to add instances as load increases.
- **Performance**: Users need fast response times under heavy load.
### Architecture
```
┌─────────────────────────────────────────────────────┐
│ GitHub Repository │
│ Repository: your-org/grafana-manifests │
│ Branch: main │
│ │
│ grafana-manifests/ │
│ └── shared/ │
│ ├── dashboard-metrics.json │
│ ├── dashboard-alerts.json │
│ └── dashboard-logs.json │
└─────────────────────────────────────────────────────┘
↕ ↕
Git Sync (shared/) Git Sync (shared/)
↕ ↕
┌────────────────────┐ ┌────────────────────┐
│ Grafana Instance 1│ │ Grafana Instance 2│
│ (Active) │ │ (Active) │
│ │ │ │
│ Repository: │ │ Repository: │
│ - path: shared/ │ │ - path: shared/ │
└────────────────────┘ └────────────────────┘
│ │
└───────────┬───────────────────┘
┌──────────────────────┐
│ Load Balancer │
│ (Round Robin) │
└──────────────────────┘
```
### Repository structure
**In Git:**
```
your-org/grafana-manifests
└── shared/
├── dashboard-metrics.json
├── dashboard-alerts.json
└── dashboard-logs.json
```
**In Grafana Dashboards view (all instances):**
```
Dashboards
└── 📁 grafana-manifests/
├── Metrics Dashboard
├── Alerts Dashboard
└── Logs Dashboard
```
- All instances show identical folder structure.
- All instances sync from the same `shared/` path.
- Load balancer distributes requests across all active instances.
- Any instance can serve read requests.
- Any instance can accept dashboard modifications.
- Changes propagate to all instances through Git.
### Configuration parameters
All instances use identical parameters:
**Instance 1:**
- **Repository**: `your-org/grafana-manifests`
- **Branch**: `main`
- **Path**: `shared/`
**Instance 2:**
- **Repository**: `your-org/grafana-manifests`
- **Branch**: `main`
- **Path**: `shared/`
### How it works
1. All instances stay synchronized through Git.
2. Load balancer distributes incoming traffic across all active instances.
3. Users can view dashboards from any instance.
4. When a user modifies a dashboard on any instance, Git Sync commits the change.
5. All other instances pull the updated dashboard during their next sync cycle, or instantly if webhooks are configured.
6. If one instance fails, load balancer stops routing traffic to it and remaining instances continue serving.
### Important considerations
- **Eventually consistent**: Due to sync intervals, instances may briefly have different dashboard versions.
- **Concurrent edits**: Multiple users editing the same dashboard on different instances can cause conflicts.
- **Database sharing**: Instances should share the same backend database for user sessions, preferences, and annotations.
- **Stateless design**: Design for stateless operation where possible to maximize load balancing effectiveness.

View File

@@ -1,93 +0,0 @@
---
title: Git Sync with regional replication
menuTitle: Regional replication
description: Synchronize multiple regional Grafana instances from a shared Git location
weight: 30
---
# Git Sync with regional replication
Deploy multiple Grafana instances across regions. Synchronize them with the same Git location to ensure consistent dashboards everywhere.
## Use it for
- **Geographic distribution**: You deploy Grafana close to users in different regions.
- **Latency reduction**: Users need fast dashboard access from their location.
- **Data sovereignty**: You keep dashboard data in specific regions.
- **High availability**: You need dashboard availability across regions.
- **Consistent experience**: All users see the same dashboards regardless of region.
## Architecture
```
┌─────────────────────────────────────────────────────┐
│ GitHub Repository │
│ Repository: your-org/grafana-manifests │
│ Branch: main │
│ │
│ grafana-manifests/ │
│ └── shared/ │
│ ├── dashboard-global.json │
│ ├── dashboard-metrics.json │
│ └── dashboard-logs.json │
└─────────────────────────────────────────────────────┘
↕ ↕
Git Sync (shared/) Git Sync (shared/)
↕ ↕
┌────────────────────┐ ┌────────────────────┐
│ US Region │ │ EU Region │
│ Grafana │ │ Grafana │
│ │ │ │
│ Repository: │ │ Repository: │
│ - path: shared/ │ │ - path: shared/ │
└────────────────────┘ └────────────────────┘
```
## Repository structure
**In Git:**
```
your-org/grafana-manifests
└── shared/
├── dashboard-global.json
├── dashboard-metrics.json
└── dashboard-logs.json
```
**In Grafana Dashboards view (all regions):**
```
Dashboards
└── 📁 grafana-manifests/
├── Global Dashboard
├── Metrics Dashboard
└── Logs Dashboard
```
- All regional instances (US, EU, etc.) show identical folder structure
- Same folder name "grafana-manifests" in every region
- Same dashboards synced from the `shared/` path appear everywhere
- Users in any region see the exact same dashboards with the same titles
## Configuration parameters
All regions:
- Repository: `your-org/grafana-manifests`
- Branch: `main`
- Path: `shared/`
## How it works
1. All regional instances pull dashboards from `shared/`.
2. Any regions change commits to Git.
3. Other regions pull updates during the next sync (or via webhooks).
4. Changes propagate across regions per sync interval.
## Considerations
- **Write conflicts**: If users in different regions modify the same dashboard simultaneously, Git uses last-write-wins.
- **Primary region**: Consider designating one region as the primary location for making dashboard changes.
- **Propagation time**: Changes propagate to all regions within the configured sync interval, or instantly if webhooks are configured.
- **Network reliability**: Ensure all regions have reliable connectivity to the Git repository.

View File

@@ -1,169 +0,0 @@
---
title: Multiple team Git Sync
menuTitle: Shared instance
description: Use multiple Git repositories with one Grafana instance, one repository per team
weight: 60
---
# Git Sync in a Grafana instance shared by multiple teams
Use a single Grafana instance with multiple Repository resources, one per team. Each team manages its own dashboards while sharing Grafana.
## Use it for
- **Team autonomy**: Different teams manage their own dashboards independently.
- **Organizational structure**: Dashboard organization aligns with team structure.
- **Resource efficiency**: Multiple teams share Grafana infrastructure.
- **Cost optimization**: You reduce infrastructure costs while maintaining team separation.
- **Collaboration**: Teams can view each others dashboards while managing their own.
## Architecture
```
┌─────────────────────────┐ ┌─────────────────────────┐
│ Platform Team Repo │ │ Data Team Repo │
│ platform-dashboards │ │ data-dashboards │
│ │ │ │
│ platform-dashboards/ │ │ data-dashboards/ │
│ └── grafana/ │ │ └── grafana/ │
│ ├── k8s.json │ │ ├── pipeline.json │
│ └── infra.json │ │ └── analytics.json │
└─────────────────────────┘ └─────────────────────────┘
↕ ↕
Git Sync (grafana/) Git Sync (grafana/)
↕ ↕
┌──────────────────────────────────────┐
│ Grafana Instance │
│ │
│ Repository 1: │
│ - repo: platform-dashboards │
│ → Creates "platform-dashboards" │
│ │
│ Repository 2: │
│ - repo: data-dashboards │
│ → Creates "data-dashboards" │
└──────────────────────────────────────┘
```
## Repository structure
**In Git (separate repositories):**
**Platform team repository:**
```
your-org/platform-dashboards
└── grafana/
├── dashboard-k8s.json
└── dashboard-infra.json
```
**Data team repository:**
```
your-org/data-dashboards
└── grafana/
├── dashboard-pipeline.json
└── dashboard-analytics.json
```
**In Grafana Dashboards view:**
```
Dashboards
├── 📁 platform-dashboards/
│ ├── Kubernetes Dashboard
│ └── Infrastructure Dashboard
└── 📁 data-dashboards/
├── Pipeline Dashboard
└── Analytics Dashboard
```
- Two separate folders created (one per Repository resource).
- Folder names derived from repository names.
- Each team has complete control over their own repository.
- Teams can independently manage permissions, branches, and workflows in their repos.
- All teams can view each other's dashboards in Grafana but manage only their own.
## Configuration parameters
**Platform team repository:**
- **Repository**: `your-org/platform-dashboards`
- **Branch**: `main`
- **Path**: `grafana/`
**Data team repository:**
- **Repository**: `your-org/data-dashboards`
- **Branch**: `main`
- **Path**: `grafana/`
## How it works
1. Each team has their own Git repository for complete autonomy.
2. Each repository resource in Grafana creates a separate folder.
3. Platform team dashboards sync from `your-org/platform-dashboards` repository.
4. Data team dashboards sync from `your-org/data-dashboards` repository.
5. Teams can independently manage their repository settings, access controls, and workflows.
6. All teams can view each other's dashboards in Grafana but edit only their own.
## Scale to more teams
Adding additional teams is straightforward. For a third team, create a new repository and configure:
- **Repository**: `your-org/security-dashboards`
- **Branch**: `main`
- **Path**: `grafana/`
This creates a new "security-dashboards" folder in the same Grafana instance.
## Alternative: Shared repository with different paths
For teams that prefer sharing a single repository, use different paths to separate team dashboards:
**In Git:**
```
your-org/grafana-manifests
├── team-platform/
│ ├── dashboard-k8s.json
│ └── dashboard-infra.json
└── team-data/
├── dashboard-pipeline.json
└── dashboard-analytics.json
```
**Configuration:**
**Platform team:**
- **Repository**: `your-org/grafana-manifests`
- **Branch**: `main`
- **Path**: `team-platform/`
**Data team:**
- **Repository**: `your-org/grafana-manifests`
- **Branch**: `main`
- **Path**: `team-data/`
This approach provides simpler repository management but less isolation between teams.
## Alternative: Different branches per team
For teams wanting their own branch in a shared repository:
**Platform team:**
- **Repository**: `your-org/grafana-manifests`
- **Branch**: `team-platform`
- **Path**: `grafana/`
**Data team:**
- **Repository**: `your-org/grafana-manifests`
- **Branch**: `team-data`
- **Path**: `grafana/`
This allows teams to use Git branch workflows for collaboration while sharing the same repository.

View File

@@ -1,86 +0,0 @@
---
title: Single instance Git Sync
menuTitle: Single instance
description: Synchronize a single Grafana instance with a Git repository
weight: 10
---
# Single instance Git Sync
Use a single Grafana instance synchronized with a Git repository. This is the foundation for Git Sync and helps you understand bidirectional synchronization.
## Use it for
- **Getting started**: You want to learn how Git Sync works before implementing complex scenarios.
- **Personal projects**: Individual developers manage their own dashboards.
- **Small teams**: You have a simple setup without multiple environments or complex workflows.
- **Development environments**: You need quick prototyping and testing.
## Architecture
```
┌─────────────────────────────────────────────────────┐
│ GitHub Repository │
│ Repository: your-org/grafana-manifests │
│ Branch: main │
│ │
│ grafana-manifests/ │
│ └── grafana/ │
│ ├── dashboard-1.json │
│ ├── dashboard-2.json │
│ └── dashboard-3.json │
└─────────────────────────────────────────────────────┘
Git Sync (bidirectional)
┌─────────────────────────────┐
│ Grafana Instance │
│ │
│ Repository Resource: │
│ - url: grafana-manifests │
│ - branch: main │
│ - path: grafana/ │
│ │
│ Creates folder: │
│ "grafana-manifests" │
└─────────────────────────────┘
```
## Repository structure
**In Git:**
```
your-org/grafana-manifests
└── grafana/
├── dashboard-1.json
├── dashboard-2.json
└── dashboard-3.json
```
**In Grafana Dashboards view:**
```
Dashboards
└── 📁 grafana-manifests/
├── Dashboard 1
├── Dashboard 2
└── Dashboard 3
```
- A folder named "grafana-manifests" (from repository name) contains all synced dashboards.
- Each JSON file becomes a dashboard with its title displayed in the folder.
- Users browse dashboards organized under this folder structure.
## Configuration parameters
Configure your Grafana instance to synchronize with:
- **Repository**: `your-org/grafana-manifests`
- **Branch**: `main`
- **Path**: `grafana/`
## How it works
1. **From Grafana to Git**: When users create or modify dashboards in Grafana, Git Sync commits changes to the `grafana/` directory on the `main` branch.
2. **From Git to Grafana**: When dashboard JSON files are added or modified in the `grafana/` directory, Git Sync pulls these changes into Grafana.

View File

@@ -367,6 +367,5 @@ To learn more about using Git Sync:
- [Work with provisioned dashboards](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/provisioned-dashboards/)
- [Manage provisioned repositories with Git Sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/use-git-sync/)
- [Git Sync deployment scenarios](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios)
- [Export resources](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/export-resources/)
- [grafanactl documentation](https://grafana.github.io/grafanactl/)

View File

@@ -127,13 +127,7 @@ An instance can be in one of the following Git Sync states:
## Common use cases
{{< admonition type="note" >}}
Refer to [Git Sync deployment scenarios](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios) for sample scenarios, including architecture and configuration details.
{{< /admonition >}}
You can use Git Sync for the following use cases:
You can use Git Sync in the following scenarios.
### Version control and auditing

View File

@@ -14,7 +14,7 @@ labels:
- cloud
title: Manage provisioned repositories with Git Sync
menuTitle: Manage repositories with Git Sync
weight: 400
weight: 120
canonical: https://grafana.com/docs/grafana/latest/as-code/observability-as-code/provision-resources/use-git-sync/
aliases:
- ../../../observability-as-code/provision-resources/use-git-sync/ # /docs/grafana/next/observability-as-code/provision-resources/use-git-sync/

View File

@@ -62,6 +62,5 @@ The table includes default and other fields:
| targetBlank | bool. If true, the link will be opened in a new tab. Default is `false`. |
| includeVars | bool. If true, includes current template variables values in the link as query params. Default is `false`. |
| keepTime | bool. If true, includes current time range in the link as query params. Default is `false`. |
| placement? | string. Use placement to display the link somewhere else on the dashboard other than above the visualizations. Use the `inControlsMenu` parameter to render the link in the dashboard controls dropdown menu. |
<!-- prettier-ignore-end -->

View File

@@ -17,6 +17,16 @@ menuTitle: Elasticsearch
title: Elasticsearch data source
weight: 325
refs:
configuration:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#sigv4_auth_enabled
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#sigv4_auth_enabled
provisioning-grafana:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
explore:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
@@ -34,36 +44,12 @@ refs:
Elasticsearch is a search and analytics engine used for a variety of use cases.
You can create many types of queries to visualize logs or metrics stored in Elasticsearch, and annotate graphs with log events stored in Elasticsearch.
The following resources will help you get started with Elasticsearch and Grafana:
The following will help you get started working with Elasticsearch and Grafana:
- [What is Elasticsearch?](https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-intro.html)
- [Configure the Elasticsearch data source](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/configure/)
- [Elasticsearch query editor](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/query-editor/)
- [Elasticsearch template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/template-variables/)
- [Elasticsearch annotations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/annotations/)
- [Elasticsearch alerting](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/alerting/)
- [Troubleshooting issues with the Elasticsearch data source](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/troubleshooting/)
## Key capabilities
The Elasticsearch data source supports:
- **Metrics queries:** Aggregate and visualize numeric data using bucket and metric aggregations.
- **Log queries:** Search, filter, and explore log data with Lucene query syntax.
- **Annotations:** Overlay Elasticsearch events on your dashboard graphs.
- **Alerting:** Create alerts based on Elasticsearch query results.
## Before you begin
Before you configure the Elasticsearch data source, you need:
- An Elasticsearch instance (v7.17+, v8.x, or v9.x)
- Network access from Grafana to your Elasticsearch server
- Appropriate user credentials or API keys with read access
{{< admonition type="note" >}}
If you use Amazon OpenSearch Service (the successor to Amazon Elasticsearch Service), use the [OpenSearch data source](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/opensearch/) instead.
{{< /admonition >}}
- [Configure the Elasticsearch data source](/docs/grafana/latest/datasources/elasticsearch/configure-elasticsearch-data-source/)
- [Elasticsearch query editor](query-editor/)
- [Elasticsearch template variables](template-variables/)
## Supported Elasticsearch versions
@@ -77,18 +63,86 @@ This data source supports these versions of Elasticsearch:
- v8.x
- v9.x
The Grafana maintenance policy for the Elasticsearch data source aligns with [Elastic Product End of Life Dates](https://www.elastic.co/support/eol). Grafana ensures proper functionality for supported versions only. If you use an EOL version of Elasticsearch, you can still run queries, but the query builder displays a warning. Grafana doesn't guarantee functionality or provide fixes for EOL versions.
Our maintenance policy for Elasticsearch data source is aligned with the [Elastic Product End of Life Dates](https://www.elastic.co/support/eol) and we ensure proper functionality for supported versions. If you are using an Elasticsearch with version that is past its end-of-life (EOL), you can still execute queries, but you will receive a notification in the query builder indicating that the version of Elasticsearch you are using is no longer supported. It's important to note that in such cases, we do not guarantee the correctness of the functionality, and we will not be addressing any related issues.
## Additional resources
## Provision the data source
Once you have configured the Elasticsearch data source, you can:
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-grafana).
- Use [Explore](ref:explore) to run ad-hoc queries against your Elasticsearch data.
- Configure and use [template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/template-variables/) for dynamic dashboards.
- Add [Transformations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/) to process query results.
- [Build dashboards](ref:build-dashboards) to visualize your Elasticsearch data.
{{< admonition type="note" >}}
The previously used `database` field has now been [deprecated](https://github.com/grafana/grafana/pull/58647).
You should now use the `index` field in `jsonData` to store the index name.
Please see the examples below.
{{< /admonition >}}
## Related data sources
### Provisioning examples
- [OpenSearch](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/opensearch/) - For Amazon OpenSearch Service.
- [Loki](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/loki/) - Grafana's log aggregation system.
**Basic provisioning**
```yaml
apiVersion: 1
datasources:
- name: Elastic
type: elasticsearch
access: proxy
url: http://localhost:9200
jsonData:
index: '[metrics-]YYYY.MM.DD'
interval: Daily
timeField: '@timestamp'
```
**Provision for logs**
```yaml
apiVersion: 1
datasources:
- name: elasticsearch-v7-filebeat
type: elasticsearch
access: proxy
url: http://localhost:9200
jsonData:
index: '[filebeat-]YYYY.MM.DD'
interval: Daily
timeField: '@timestamp'
logMessageField: message
logLevelField: fields.level
dataLinks:
- datasourceUid: my_jaeger_uid # Target UID needs to be known
field: traceID
url: '$${__value.raw}' # Careful about the double "$$" because of env var expansion
```
## Configure Amazon Elasticsearch Service
If you use Amazon Elasticsearch Service, you can use Grafana's Elasticsearch data source to visualize data from it.
If you use an AWS Identity and Access Management (IAM) policy to control access to your Amazon Elasticsearch Service domain, you must use AWS Signature Version 4 (AWS SigV4) to sign all requests to that domain.
For details on AWS SigV4, refer to the [AWS documentation](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
### AWS Signature Version 4 authentication
To sign requests to your Amazon Elasticsearch Service domain, you can enable SigV4 in Grafana's [configuration](ref:configuration).
Once AWS SigV4 is enabled, you can configure it on the Elasticsearch data source configuration page.
For more information about AWS authentication options, refer to [AWS authentication](../aws-cloudwatch/aws-authentication/).
{{< figure src="/static/img/docs/v73/elasticsearch-sigv4-config-editor.png" max-width="500px" class="docs-image--no-shadow" caption="SigV4 configuration for AWS Elasticsearch Service" >}}
## Query the data source
You can select multiple metrics and group by multiple terms or filters when using the Elasticsearch query editor.
For details, see the [query editor documentation](query-editor/).
## Use template variables
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
Grafana refers to such variables as template variables.
For details, see the [template variables documentation](template-variables/).

View File

@@ -1,144 +0,0 @@
---
aliases:
- ../../data-sources/elasticsearch/alerting/
description: Using Grafana Alerting with the Elasticsearch data source
keywords:
- grafana
- elasticsearch
- alerting
- alerts
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Alerting
title: Elasticsearch alerting
weight: 550
refs:
alerting:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
create-alert-rule:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-grafana-managed-rule/
---
# Elasticsearch alerting
You can use Grafana Alerting with Elasticsearch to create alerts based on your Elasticsearch data. This allows you to monitor metrics, detect anomalies, and receive notifications when specific conditions are met.
For general information about Grafana Alerting, refer to [Grafana Alerting](ref:alerting).
## Before you begin
Before creating alerts with Elasticsearch, ensure you have:
- An Elasticsearch data source configured in Grafana
- Appropriate permissions to create alert rules
- Understanding of the metrics you want to monitor
## Supported query types
Elasticsearch alerting works best with **metrics queries** that return time series data. To create a valid alert query:
- Use a **Date histogram** as the last bucket aggregation (under **Group by**)
- Select appropriate metric aggregations (Count, Average, Sum, Min, Max, etc.)
Queries that return time series data allow Grafana to evaluate values over time and trigger alerts when thresholds are crossed.
### Query types and alerting compatibility
| Query type | Alerting support | Notes |
| ------------------------------ | ---------------- | ----------------------------------------------------------- |
| Metrics with Date histogram | ✅ Full support | Recommended for alerting |
| Metrics without Date histogram | ⚠️ Limited | May not evaluate correctly over time |
| Logs | ❌ Not supported | Use metrics queries instead |
| Raw data | ❌ Not supported | Use metrics queries instead |
| Raw document (deprecated) | ❌ Not supported | Deprecated since Grafana v10.1. Use metrics queries instead |
## Create an alert rule
To create an alert rule using Elasticsearch:
1. Navigate to **Alerting** > **Alert rules**.
1. Click **New alert rule**.
1. Enter a name for the alert rule.
1. Select your **Elasticsearch** data source.
1. Build your query using the query editor:
- Add metric aggregations (for example, Average, Count, Sum)
- Add a Date histogram under **Group by**
- Optionally add filters using Lucene query syntax
1. Configure the alert condition (for example, when the average is above a threshold).
1. Set the evaluation interval and pending period.
1. Configure notifications and labels.
1. Click **Save rule**.
For detailed instructions, refer to [Create a Grafana-managed alert rule](ref:create-alert-rule).
## Example alert queries
The following examples show common alerting scenarios with Elasticsearch.
### Alert on high error count
Monitor the number of error-level log entries:
1. **Query:** `level:error`
1. **Metric:** Count
1. **Group by:** Date histogram (interval: 1m)
1. **Condition:** When count is above 100
### Alert on average response time
Monitor API response times:
1. **Query:** `type:api_request`
1. **Metric:** Average on field `response_time`
1. **Group by:** Date histogram (interval: 5m)
1. **Condition:** When average is above 500 (milliseconds)
### Alert on unique user count drop
Detect drops in active users:
1. **Query:** `*` (all documents)
1. **Metric:** Unique count on field `user_id`
1. **Group by:** Date histogram (interval: 1h)
1. **Condition:** When unique count is below 100
## Limitations
When using Elasticsearch with Grafana Alerting, be aware of the following limitations:
### Template variables not supported
Alert queries cannot contain template variables. Grafana evaluates alert rules on the backend without dashboard context, so variables like `$hostname` or `$environment` won't be resolved.
If your dashboard query uses template variables, create a separate query for alerting with hard coded values.
### Logs queries not supported
Queries using the **Logs** metric type cannot be used for alerting. Convert your query to use metric aggregations with a Date histogram instead.
### Query complexity
Complex queries with many nested aggregations may timeout or fail to evaluate. Simplify queries for alerting by:
- Reducing the number of bucket aggregations
- Using appropriate time intervals
- Adding filters to limit the data scanned
## Best practices
Follow these best practices when creating Elasticsearch alerts:
- **Use specific filters:** Add Lucene query filters to focus on relevant data and improve query performance.
- **Choose appropriate intervals:** Match the Date histogram interval to your evaluation frequency.
- **Test queries first:** Verify your query returns expected results in Explore before creating an alert.
- **Set realistic thresholds:** Base alert thresholds on historical data patterns.
- **Use meaningful names:** Give alert rules descriptive names that indicate what they monitor.

View File

@@ -1,124 +0,0 @@
---
aliases:
- ../../data-sources/elasticsearch/annotations/
description: Using annotations with Elasticsearch in Grafana
keywords:
- grafana
- elasticsearch
- annotations
- events
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Annotations
title: Elasticsearch annotations
weight: 500
refs:
annotate-visualizations:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
---
# Elasticsearch annotations
Annotations overlay event data on your dashboard graphs, helping you correlate log events with metrics.
You can use Elasticsearch as a data source for annotations to display events such as deployments, alerts, or other significant occurrences on your visualizations.
For general information about annotations, refer to [Annotate visualizations](ref:annotate-visualizations).
## Before you begin
Before creating Elasticsearch annotations, ensure you have:
- An Elasticsearch data source configured in Grafana
- Documents in Elasticsearch containing event data with timestamp fields
- Read access to the Elasticsearch index containing your events
## Create an annotation query
To add an Elasticsearch annotation to your dashboard:
1. Navigate to your dashboard and click **Dashboard settings** (gear icon).
1. Select **Annotations** in the left menu.
1. Click **Add annotation query**.
1. Enter a **Name** for the annotation.
1. Select your **Elasticsearch** data source from the **Data source** drop-down.
1. Configure the annotation query and field mappings.
1. Click **Save dashboard**.
## Query
Use the query field to filter which Elasticsearch documents appear as annotations. The query uses [Lucene query syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#query-string-syntax).
**Examples:**
| Query | Description |
| ---------------------------------------- | ---------------------------------------------------- |
| `*` | Matches all documents. |
| `type:deployment` | Shows only deployment events. |
| `level:error OR level:critical` | Shows error and critical events. |
| `service:api AND environment:production` | Shows events for a specific service and environment. |
| `tags:release` | Shows events tagged as releases. |
You can use template variables in your annotation queries. For example, `service:$service` filters annotations based on the selected service variable.
## Field mappings
Field mappings tell Grafana which Elasticsearch fields contain the annotation data.
### Time
The **Time** field specifies which field contains the annotation timestamp.
- **Default:** `@timestamp`
- **Format:** The field must contain a date value that Elasticsearch recognizes.
### Time End
The **Time End** field specifies a field containing the end time for range annotations. Range annotations display as a shaded region on the graph instead of a single vertical line.
- **Default:** Empty (single-point annotations)
- **Use case:** Display maintenance windows, incidents, or any event with a duration.
### Text
The **Text** field specifies which field contains the annotation description displayed when you hover over the annotation.
- **Default:** `tags`
- **Tip:** Use a descriptive field like `message`, `description`, or `summary`.
### Tags
The **Tags** field specifies which field contains tags for the annotation. Tags help categorize and filter annotations.
- **Default:** Empty
- **Format:** The field can contain either a comma-separated string or an array of strings.
## Example: Deployment annotations
To display deployment events as annotations:
1. Create an annotation query with the following settings:
- **Query:** `type:deployment`
- **Time:** `@timestamp`
- **Text:** `message`
- **Tags:** `environment`
This configuration displays deployment events with their messages as the annotation text and environments as tags.
## Example: Range annotations for incidents
To display incidents with duration:
1. Create an annotation query with the following settings:
- **Query:** `type:incident`
- **Time:** `start_time`
- **Time End:** `end_time`
- **Text:** `description`
- **Tags:** `severity`
This configuration displays incidents as shaded regions from their start time to end time.

View File

@@ -0,0 +1,209 @@
---
aliases:
- ../data-sources/elasticsearch/
- ../features/datasources/elasticsearch/
description: Guide for configuring the Elasticsearch data source in Grafana
keywords:
- grafana
- elasticsearch
- guide
- data source
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Configure Elasticsearch
title: Configure the Elasticsearch data source
weight: 200
refs:
administration-documentation:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
supported-expressions:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#log-level
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#log-level
query-and-transform-data:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/visualizations/panels-visualizations/query-transform-data/
provisioning-data-source:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/#provision-the-data-source
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/connect-externally-hosted/data-sources/elasticsearch/#provision-the-data-source
---
# Configure the Elasticsearch data source
Grafana ships with built-in support for Elasticsearch.
You can create a variety of queries to visualize logs or metrics stored in Elasticsearch, and annotate graphs with log events stored in Elasticsearch.
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:administration-documentation).
Only users with the organization `administrator` role can add data sources.
Administrators can also [configure the data source via YAML](ref:provisioning-data-source) with Grafana's provisioning system.
## Configuring permissions
When Elasticsearch security features are enabled, it is essential to configure the necessary cluster privileges to ensure seamless operation. Below is a list of the required privileges along with their purposes:
- **monitor** - Necessary to retrieve the version information of the connected Elasticsearch instance.
- **view_index_metadata** - Required for accessing mapping definitions of indices.
- **read** - Grants the ability to perform search and retrieval operations on indices. This is essential for querying and extracting data from the cluster.
## Add the data source
To add the Elasticsearch data source, complete the following steps:
1. Click **Connections** in the left-side menu.
1. Under **Connections**, click **Add new connection**.
1. Enter `Elasticsearch` in the search bar.
1. Click **Elasticsearch** under the **Data source** section.
1. Click **Add new data source** in the upper right.
You will be taken to the **Settings** tab where you will set up your Elasticsearch configuration.
## Configuration options
The following is a list of configuration options for Elasticsearch.
The first option to configure is the name of your connection:
- **Name** - The data source name. This is how you refer to the data source in panels and queries. Examples: elastic-1, elasticsearch_metrics.
- **Default** - Toggle to select as the default data source option. When you go to a dashboard panel or Explore, this will be the default selected data source.
## Connection
Connect the Elasticsearch data source by specifying a URL.
- **URL** - The URL of your Elasticsearch server. If your Elasticsearch server is local, use `http://localhost:9200`. If it is on a server within a network, this is the URL with the port where you are running Elasticsearch. Example: `http://elasticsearch.example.orgname:9200`.
## Authentication
There are several authentication methods you can choose in the Authentication section.
Select one of the following authentication methods from the dropdown menu.
- **Basic authentication** - The most common authentication method. Use your `data source` user name and `data source` password to connect.
- **Forward OAuth identity** - Forward the OAuth access token (and the OIDC ID token if available) of the user querying the data source.
- **No authentication** - Make the data source available without authentication. Grafana recommends using some type of authentication method.
<!-- - **With credentials** - Toggle to enable credentials such as cookies or auth headers to be sent with cross-site requests. -->
### TLS settings
{{< admonition type="note" >}}
Use TLS (Transport Layer Security) for an additional layer of security when working with Elasticsearch. For information on setting up TLS encryption with Elasticsearch see [Configure TLS](https://www.elastic.co/guide/en/elasticsearch/reference/8.8/configuring-tls.html#configuring-tls). You must add TLS settings to your Elasticsearch configuration file **prior** to setting these options in Grafana.
{{< /admonition >}}
- **Add self-signed certificate** - Check the box to authenticate with a CA certificate. Follow the instructions of the CA (Certificate Authority) to download the certificate file. Required for verifying self-signed TLS certificates.
- **TLS client authentication** - Check the box to authenticate with the TLS client, where the server authenticates the client. Add the `Server name`, `Client certificate` and `Client key`. The **ServerName** is used to verify the hostname on the returned certificate. The **Client certificate** can be generated from a Certificate Authority (CA) or be self-signed. The **Client key** can also be generated from a Certificate Authority (CA) or be self-signed. The client key encrypts the data between client and server.
- **Skip TLS certificate validation** - Check the box to bypass TLS certificate validation. Skipping TLS certificate validation is not recommended unless absolutely necessary or for testing purposes.
### HTTP headers
Click **+ Add header** to add one or more HTTP headers. HTTP headers pass additional context and metadata about the request/response.
- **Header** - Add a custom header. This allows custom headers to be passed based on the needs of your Elasticsearch instance.
- **Value** - The value of the header.
## Additional settings
Additional settings are optional settings that can be configured for more control over your data source.
### Advanced HTTP settings
- **Allowed cookies** - Specify cookies by name that should be forwarded to the data source. The Grafana proxy deletes all forwarded cookies by default.
- **Timeout** - The HTTP request timeout. This must be in seconds. There is no default, so this setting is up to you.
### Elasticsearch details
The following settings are specific to the Elasticsearch data source.
- **Index name** - Use the index settings to specify a default for the `time field` and your Elasticsearch index's name. You can use a time pattern, for example `[logstash-]YYYY.MM.DD`, or a wildcard for the index name. When specifying a time pattern, the fixed part(s) of the pattern should be wrapped in square brackets.
- **Pattern** - Select the matching pattern if using one in your index name. Options include:
- no pattern
- hourly
- daily
- weekly
- monthly
- yearly
Only select a pattern option if you have specified a time pattern in the Index name field.
- **Time field name** - Name of the time field. The default value is @timestamp. You can enter a different name.
- **Max concurrent shard requests** - Sets the number of shards being queried at the same time. The default is `5`. For more information on shards see [Elasticsearch's documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/scalability.html#scalability).
- **Min time interval** - Defines a lower limit for the auto group-by time interval. This value **must** be formatted as a number followed by a valid time identifier:
| Identifier | Description |
| ---------- | ----------- |
| `y` | year |
| `M` | month |
| `w` | week |
| `d` | day |
| `h` | hour |
| `m` | minute |
| `s` | second |
| `ms` | millisecond |
We recommend setting this value to match your Elasticsearch write frequency.
For example, set this to `1m` if Elasticsearch writes data every minute.
You can also override this setting in a dashboard panel under its data source options. The default is `10s`.
- **X-Pack enabled** - Toggle to enable `X-Pack`-specific features and options, which provide the [query editor](../query-editor/) with additional aggregations, such as `Rate` and `Top Metrics`.
- **Include frozen indices** - Toggle on when the `X-Pack enabled` setting is active. Includes frozen indices in searches. You can configure Grafana to include [frozen indices](https://www.elastic.co/guide/en/elasticsearch/reference/7.13/frozen-indices.html) when performing search requests.
{{< admonition type="note" >}}
Frozen indices are [deprecated in Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/frozen-indices.html) since v7.14.
{{< /admonition >}}
- **Default query mode** - Specifies which query mode the data source uses by default. Options are `Metrics`, `Logs`, `Raw data`, and `Raw document`. The default is `Metrics`.
### Logs
In this section you can configure which fields the data source uses for log messages and log levels.
- **Message field name:** - Grabs the actual log message from the default source.
- **Level field name:** - Name of the field with log level/severity information. When a level label is specified, the value of this label is used to determine the log level and update the color of each log line accordingly. If the log doesnt have a specified level label, we try to determine if its content matches any of the [supported expressions](ref:supported-expressions). The first match always determines the log level. If Grafana cannot infer a log-level field, it will be visualized with an unknown log level.
### Data links
Data links create a link from a specified field that can be accessed in Explore's logs view. You can add multiple data links by clicking **+ Add**.
Each data link configuration consists of:
- **Field** - Sets the name of the field used by the data link.
- **URL/query** - Sets the full link URL if the link is external. If the link is internal, this input serves as a query for the target data source.<br/>In both cases, you can interpolate the value from the field with the `${__value.raw }` macro.
- **URL Label** (Optional) - Sets a custom display label for the link. The link label defaults to the full external URL or name of the linked internal data source and is overridden by this setting.
- **Internal link** - Toggle on to set an internal link. For an internal link, you can select the target data source with a data source selector. This supports only tracing data sources.
## Private data source connect (PDC) and Elasticsearch
Use private data source connect (PDC) to connect to and query data within a secure network without opening that network to inbound traffic from Grafana Cloud. See [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) for more information on how PDC works and [Configure Grafana private data source connect (PDC)](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc) for steps on setting up a PDC connection.
If you use PDC with SIGv4 (AWS Signature Version 4 Authentication), the PDC agent must allow internet egress to`sts.<region>.amazonaws.com:443`.
- **Private data source connect** - Click in the box to set the default PDC connection from the dropdown menu or create a new connection.
Once you have configured your Elasticsearch data source options, click **Save & test** at the bottom to test out your data source connection. You can also remove a connection by clicking **Delete**.

View File

@@ -1,377 +0,0 @@
---
aliases:
- ../configure-elasticsearch-data-source/
description: Guide for configuring the Elasticsearch data source in Grafana
keywords:
- grafana
- elasticsearch
- guide
- data source
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Configure
title: Configure the Elasticsearch data source
weight: 200
refs:
administration-documentation:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
supported-expressions:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#log-level
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#log-level
query-and-transform-data:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/visualizations/panels-visualizations/query-transform-data/
provisioning-data-source:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/configure/#provision-the-data-source
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/connect-externally-hosted/data-sources/elasticsearch/configure/#provision-the-data-source
configuration:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#sigv4_auth_enabled
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#sigv4_auth_enabled
provisioning-grafana:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
---
# Configure the Elasticsearch data source
Grafana ships with built-in support for Elasticsearch.
You can create a variety of queries to visualize logs or metrics stored in Elasticsearch, and annotate graphs with log events stored in Elasticsearch.
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:administration-documentation).
Administrators can also [configure the data source via YAML](ref:provisioning-data-source) with Grafana's provisioning system.
## Before you begin
To configure the Elasticsearch data source, you need:
- **Grafana administrator permissions:** Only users with the organization `administrator` role can add data sources.
- **A supported Elasticsearch version:** v7.17 or later, v8.x, or v9.x. Elastic Cloud Serverless isn't supported.
- **Elasticsearch server URL:** The HTTP or HTTPS endpoint for your Elasticsearch instance, including the port (default: `9200`).
- **Authentication credentials:** Depending on your Elasticsearch security configuration, you need one of the following:
- Username and password for basic authentication
- API key
- No credentials (if Elasticsearch security is disabled)
- **Network access:** Grafana must be able to reach your Elasticsearch server. For Grafana Cloud, consider using [Private data source connect (PDC)](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) if your Elasticsearch instance is in a private network.
## Elasticsearch permissions
When Elasticsearch security features are enabled, you must configure the following cluster privileges for the user or API key that Grafana uses to connect:
- **monitor** - Necessary to retrieve the version information of the connected Elasticsearch instance.
- **view_index_metadata** - Required for accessing mapping definitions of indices.
- **read** - Grants the ability to perform search and retrieval operations on indices. This is essential for querying and extracting data from the cluster.
## Add the data source
To add the Elasticsearch data source, complete the following steps:
1. Click **Connections** in the left-side menu.
1. Under **Connections**, click **Add new connection**.
1. Enter `Elasticsearch` in the search bar.
1. Click **Elasticsearch** under the **Data source** section.
1. Click **Add new data source** in the upper right.
You will be taken to the **Settings** tab where you will set up your Elasticsearch configuration.
## Configuration options
Configure the following basic settings for the Elasticsearch data source:
- **Name** - The data source name. This is how you refer to the data source in panels and queries. Examples: `elastic-1`, `elasticsearch_metrics`.
- **Default** - Toggle on to make this the default data source. New panels and Explore queries use the default data source.
## Connection
- **URL** - The URL of your Elasticsearch server, including the port. Examples: `http://localhost:9200`, `http://elasticsearch.example.com:9200`.
## Authentication
Select an authentication method from the drop-down menu:
- **Basic authentication** - Enter the username and password for your Elasticsearch user.
- **Forward OAuth identity** - Forward the OAuth access token (and the OIDC ID token if available) of the user querying the data source.
- **No authentication** - Connect without credentials. Only use this option if your Elasticsearch instance doesn't require authentication.
### API key authentication
To authenticate using an Elasticsearch API key, select **No authentication** and configure the API key using HTTP headers:
1. In the **HTTP headers** section, click **+ Add header**.
1. Set **Header** to `Authorization`.
1. Set **Value** to `ApiKey <your-api-key>`, replacing `<your-api-key>` with your base64-encoded Elasticsearch API key.
For information about creating API keys, refer to the [Elasticsearch API keys documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html).
### Amazon Elasticsearch Service
If you use Amazon Elasticsearch Service, you can use Grafana's Elasticsearch data source to visualize data from it.
If you use an AWS Identity and Access Management (IAM) policy to control access to your Amazon Elasticsearch Service domain, you must use AWS Signature Version 4 (AWS SigV4) to sign all requests to that domain.
For details on AWS SigV4, refer to the [AWS documentation](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
To sign requests to your Amazon Elasticsearch Service domain, you can enable SigV4 in Grafana's [configuration](ref:configuration).
Once AWS SigV4 is enabled, you can configure it on the Elasticsearch data source configuration page.
For more information about AWS authentication options, refer to [AWS authentication](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/).
{{< figure src="/static/img/docs/v73/elasticsearch-sigv4-config-editor.png" max-width="500px" class="docs-image--no-shadow" caption="SigV4 configuration for AWS Elasticsearch Service" >}}
### TLS settings
{{< admonition type="note" >}}
Use TLS (Transport Layer Security) for an additional layer of security when working with Elasticsearch. For information on setting up TLS encryption with Elasticsearch, refer to [Configure TLS](https://www.elastic.co/guide/en/elasticsearch/reference/8.8/configuring-tls.html#configuring-tls). You must add TLS settings to your Elasticsearch configuration file **prior** to setting these options in Grafana.
{{< /admonition >}}
- **Add self-signed certificate** - Check the box to authenticate with a CA certificate. Follow the instructions of the CA (Certificate Authority) to download the certificate file. Required for verifying self-signed TLS certificates.
- **TLS client authentication** - Check the box to authenticate with the TLS client, where the server authenticates the client. Add the `Server name`, `Client certificate` and `Client key`. The **ServerName** is used to verify the hostname on the returned certificate. The **Client certificate** can be generated from a Certificate Authority (CA) or be self-signed. The **Client key** can also be generated from a Certificate Authority (CA) or be self-signed. The client key encrypts the data between client and server.
- **Skip TLS certificate validation** - Check the box to bypass TLS certificate validation. Skipping TLS certificate validation is not recommended unless absolutely necessary or for testing purposes.
### HTTP headers
Click **+ Add header** to add one or more HTTP headers. HTTP headers pass additional context and metadata about the request/response.
- **Header** - Add a custom header. This allows custom headers to be passed based on the needs of your Elasticsearch instance.
- **Value** - The value of the header.
## Additional settings
Additional settings are optional settings that can be configured for more control over your data source.
### Advanced HTTP settings
- **Allowed cookies** - Specify cookies by name that should be forwarded to the data source. The Grafana proxy deletes all forwarded cookies by default.
- **Timeout** - The HTTP request timeout. This must be in seconds. There is no default, so this setting is up to you.
### Elasticsearch details
The following settings are specific to the Elasticsearch data source.
- **Index name** - The name of your Elasticsearch index. You can use the following formats:
- **Wildcard patterns** - Use `*` to match multiple indices. Examples: `logs-*`, `metrics-*`, `filebeat-*`.
- **Time patterns** - Use date placeholders for time-based indices. Wrap the fixed portion in square brackets. Examples: `[logstash-]YYYY.MM.DD`, `[metrics-]YYYY.MM`.
- **Specific index** - Enter the exact index name. Example: `application-logs`.
- **Pattern** - Select the matching pattern if you use a time pattern in your index name. Options include:
- no pattern
- hourly
- daily
- weekly
- monthly
- yearly
Only select a pattern option if you have specified a time pattern in the Index name field.
- **Time field name** - Name of the time field. The default value is `@timestamp`. You can enter a different name.
- **Max concurrent shard requests** - Sets the number of shards being queried at the same time. The default is `5`. For more information on shards, refer to the [Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/scalability.html#scalability).
- **Min time interval** - Defines a lower limit for the auto group-by time interval. This value **must** be formatted as a number followed by a valid time identifier:
| Identifier | Description |
| ---------- | ----------- |
| `y` | year |
| `M` | month |
| `w` | week |
| `d` | day |
| `h` | hour |
| `m` | minute |
| `s` | second |
| `ms` | millisecond |
We recommend setting this value to match your Elasticsearch write frequency.
For example, set this to `1m` if Elasticsearch writes data every minute.
You can also override this setting in a dashboard panel under its data source options. The default is `10s`.
- **X-Pack enabled** - Toggle to enable `X-Pack`-specific features and options, which provide the [query editor](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/query-editor/) with additional aggregations, such as `Rate` and `Top Metrics`.
- **Include frozen indices** - Toggle on when the `X-Pack enabled` setting is active. Includes frozen indices in searches. You can configure Grafana to include [frozen indices](https://www.elastic.co/guide/en/elasticsearch/reference/7.13/frozen-indices.html) when performing search requests.
{{< admonition type="note" >}}
Frozen indices are [deprecated in Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/frozen-indices.html) since v7.14.
{{< /admonition >}}
### Logs
Configure which fields the data source uses for log messages and log levels.
- **Message field name** - The field that contains the log message content.
- **Level field name** - The field that contains log level or severity information. When specified, Grafana uses this field to determine the log level and color-code each log line. If the log doesn't have a level field, Grafana tries to match the content against [supported expressions](ref:supported-expressions). If Grafana can't determine the log level, it displays as unknown.
### Data links
Data links create a link from a specified field that can be accessed in Explore's logs view. You can add multiple data links by clicking **+ Add**.
Each data link configuration consists of:
- **Field** - Sets the name of the field used by the data link.
- **URL/query** - Sets the full link URL if the link is external. If the link is internal, this input serves as a query for the target data source.<br/>In both cases, you can interpolate the value from the field with the `${__value.raw }` macro.
- **URL Label** (Optional) - Sets a custom display label for the link. The link label defaults to the full external URL or name of the linked internal data source and is overridden by this setting.
- **Internal link** - Toggle on to set an internal link. For an internal link, you can select the target data source with a data source selector. This supports only tracing data sources.
## Private data source connect (PDC) and Elasticsearch
Use private data source connect (PDC) to connect to and query data within a secure network without opening that network to inbound traffic from Grafana Cloud. Refer to [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) for more information on how PDC works and [Configure Grafana private data source connect (PDC)](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc) for steps on setting up a PDC connection.
If you use PDC with SigV4 (AWS Signature Version 4 Authentication), the PDC agent must allow internet egress to `sts.<region>.amazonaws.com:443`.
- **Private data source connect** - Click in the box to set the default PDC connection from the drop-down menu or create a new connection.
Once you have configured your Elasticsearch data source options, click **Save & test** to test the connection. A successful connection displays the following message:
`Elasticsearch data source is healthy.`
## Provision the data source
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-grafana).
{{< admonition type="note" >}}
The previously used `database` field has now been [deprecated](https://github.com/grafana/grafana/pull/58647).
Use the `index` field in `jsonData` to store the index name.
Refer to the examples below.
{{< /admonition >}}
### Basic provisioning
```yaml
apiVersion: 1
datasources:
- name: Elastic
type: elasticsearch
access: proxy
url: http://localhost:9200
jsonData:
index: '[metrics-]YYYY.MM.DD'
interval: Daily
timeField: '@timestamp'
```
### Provision for logs
```yaml
apiVersion: 1
datasources:
- name: elasticsearch-v7-filebeat
type: elasticsearch
access: proxy
url: http://localhost:9200
jsonData:
index: '[filebeat-]YYYY.MM.DD'
interval: Daily
timeField: '@timestamp'
logMessageField: message
logLevelField: fields.level
dataLinks:
- datasourceUid: my_jaeger_uid # Target UID needs to be known
field: traceID
url: '$${__value.raw}' # Careful about the double "$$" because of env var expansion
```
## Provision the data source using Terraform
You can provision the Elasticsearch data source using [Terraform](https://www.terraform.io/) with the [Grafana Terraform provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs).
For more information about provisioning resources with Terraform, refer to the [Grafana as code using Terraform](https://grafana.com/docs/grafana-cloud/developer-resources/infrastructure-as-code/terraform/) documentation.
### Basic Terraform example
The following example creates a basic Elasticsearch data source for metrics:
```hcl
resource "grafana_data_source" "elasticsearch" {
name = "Elasticsearch"
type = "elasticsearch"
url = "http://localhost:9200"
json_data_encoded = jsonencode({
index = "[metrics-]YYYY.MM.DD"
interval = "Daily"
timeField = "@timestamp"
})
}
```
### Terraform example for logs
The following example creates an Elasticsearch data source configured for logs with a data link to Jaeger:
```hcl
resource "grafana_data_source" "elasticsearch_logs" {
name = "Elasticsearch Logs"
type = "elasticsearch"
url = "http://localhost:9200"
json_data_encoded = jsonencode({
index = "[filebeat-]YYYY.MM.DD"
interval = "Daily"
timeField = "@timestamp"
logMessageField = "message"
logLevelField = "fields.level"
dataLinks = [
{
datasourceUid = grafana_data_source.jaeger.uid
field = "traceID"
url = "$${__value.raw}"
}
]
})
}
```
### Terraform example with basic authentication
The following example includes basic authentication:
```hcl
resource "grafana_data_source" "elasticsearch_auth" {
name = "Elasticsearch"
type = "elasticsearch"
url = "http://localhost:9200"
basic_auth_enabled = true
basic_auth_username = "elastic_user"
secure_json_data_encoded = jsonencode({
basicAuthPassword = var.elasticsearch_password
})
json_data_encoded = jsonencode({
index = "[metrics-]YYYY.MM.DD"
interval = "Daily"
timeField = "@timestamp"
})
}
```
For all available configuration options, refer to the [Grafana provider data source resource documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source).

View File

@@ -30,7 +30,7 @@ refs:
# Elasticsearch query editor
Grafana provides a query editor for Elasticsearch. Elasticsearch queries are in Lucene format.
For more information about query syntax, refer to [Lucene query syntax](https://www.elastic.co/guide/en/kibana/current/lucene-query.html) and [Query string syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#query-string-syntax).
See [Lucene query syntax](https://www.elastic.co/guide/en/kibana/current/lucene-query.html) and [Query string syntax](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/query-dsl-query-string-query.html#query-string-syntax) if you are new to working with Lucene queries in Elasticsearch.
{{< admonition type="note" >}}
When composing Lucene queries, ensure that you use uppercase boolean operators: `AND`, `OR`, and `NOT`. Lowercase versions of these operators are not supported by the Lucene query syntax.
@@ -38,17 +38,17 @@ When composing Lucene queries, ensure that you use uppercase boolean operators:
{{< figure src="/static/img/docs/elasticsearch/elastic-query-editor-10.1.png" max-width="800px" class="docs-image--no-shadow" caption="Elasticsearch query editor" >}}
For general documentation on querying data sources in Grafana, including options and functions common to all query editors, refer to [Query and transform data](ref:query-and-transform-data).
For general documentation on querying data sources in Grafana, including options and functions common to all query editors, see [Query and transform data](ref:query-and-transform-data).
## Aggregation types
Elasticsearch groups aggregations into three categories:
- **Bucket** - Bucket aggregations don't calculate metrics, they create buckets of documents based on field values, ranges and a variety of other criteria. Refer to [Bucket aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket.html) for additional information. Use bucket aggregations under `Group by` when creating a metrics query in the query builder.
- **Bucket** - Bucket aggregations don't calculate metrics, they create buckets of documents based on field values, ranges and a variety of other criteria. See [Bucket aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket.html) for additional information. Use bucket aggregations under `Group by` when creating a metrics query in the query builder.
- **Metrics** - Metrics aggregations perform calculations such as sum, average, min, etc. They can be single-value or multi-value. Refer to [Metrics aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html) for additional information. Use metrics aggregations in the metrics query type in the query builder.
- **Metrics** - Metrics aggregations perform calculations such as sum, average, min, etc. They can be single-value or multi-value. See [Metrics aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html) for additional information. Use metrics aggregations in the metrics query type in the query builder.
- **Pipeline** - Pipeline aggregations work on the output of other aggregations rather than on documents or fields. Refer to [Pipeline aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline.html) for additional information.
- **Pipeline** - Elasticsearch pipeline aggregations work with inputs or metrics created from other aggregations (not documents or fields). There are parent and sibling and sibling pipeline aggregations. See [Pipeline aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-pipeline.html) for additional information.
## Select a query type
@@ -56,51 +56,44 @@ There are three types of queries you can create with the Elasticsearch query bui
### Metrics query type
Metrics queries aggregate data and produce calculations such as count, min, max, and more. Click the metric box to view options in the drop-down menu. The default is `count`.
Metrics queries aggregate data and produce a variety of calculations such as count, min, max, etc. Click on the metric box to view a list of options in the dropdown menu. The default is `count`.
- **Alias** - Aliasing only applies to **time series queries**, where the last group is `date histogram`. This is ignored for any other type of query.
- **Metric** - Metrics aggregations include:
- count - refer to [Value count aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html)
- average - refer to [Avg aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html)
- sum - refer to [Sum aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html)
- max - refer to [Max aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html)
- min - refer to [Min aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html)
- extended stats - refer to [Extended stats aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html)
- percentiles - refer to [Percentiles aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html)
- unique count - refer to [Cardinality aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html)
- top metrics - refer to [Top metrics aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-metrics.html)
- rate - refer to [Rate aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-rate-aggregation.html)
- **Pipeline aggregations** - Pipeline aggregations work on the output of other aggregations rather than on documents. The following pipeline aggregations are available:
- moving function - Calculates a value based on a sliding window of aggregated values. Refer to [Moving function aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movfn-aggregation.html).
- derivative - Calculates the derivative of a metric. Refer to [Derivative aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html).
- cumulative sum - Calculates the cumulative sum of a metric. Refer to [Cumulative sum aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-cumulative-sum-aggregation.html).
- serial difference - Calculates the difference between values in a time series. Refer to [Serial differencing aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-serialdiff-aggregation.html).
- bucket script - Executes a script on metric values from other aggregations. Refer to [Bucket script aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html).
- count - see [Value count aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-valuecount-aggregation.html)
- average - see [Avg aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-rate-aggregation.html)
- sum - see [Sum aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html)
- max - see [Max aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-max-aggregation.html)
- min - see [Min aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-min-aggregation.html)
- extended stats - see [Extended stats aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html)
- percentiles - see [Percentiles aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-percentile-aggregation.html)
- unique count - see [Cardinality aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-cardinality-aggregation.html)
- top metrics - see [Top metrics aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-top-metrics.html)
- rate - see [Rate aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-rate-aggregation.html)
You can select multiple metrics and group by multiple terms or filters when using the Elasticsearch query editor.
Use the **+ sign** to the right to add multiple metrics to your query. Click on the **eye icon** next to **Metric** to hide metrics, and the **garbage can icon** to remove metrics.
- **Group by options** - Create multiple group by options when constructing your Elasticsearch query. Date histogram is the default option. The following options are available in the drop-down menu:
- terms - refer to [Terms aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html).
- filter - refer to [Filter aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html).
- geo hash grid - refer to [Geohash grid aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html).
- date histogram - for time series queries. Refer to [Date histogram aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html).
- histogram - Depicts frequency distributions. Refer to [Histogram aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html).
- nested (experimental) - Refer to [Nested aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-nested-aggregation.html).
- **Group by options** - Create multiple group by options when constructing your Elasticsearch query. Date histogram is the default option. Below is a list of options in the dropdown menu.
- terms - see [Terms aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html).
- filter - see [Filter aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html).
- geo hash grid - see [Geohash grid aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html).
- date histogram - for time series queries. See [Date histogram aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html).
- histogram - Depicts frequency distributions. See [Histogram aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html).
- nested (experimental) - See [Nested aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-nested-aggregation.html).
Each group by option will have a different subset of options to further narrow your query.
The following options are specific to the **date histogram** bucket aggregation option.
- **Time field** - The field used for time-based queries. The default can be set when configuring the data source in the **Time field name** setting under [Elasticsearch details](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/configure/#elasticsearch-details). The default is `@timestamp`.
- **Interval** - The time interval for grouping data. Select from the drop-down menu or enter a custom interval such as `30d` (30 days). The default is `Auto`.
- **Min doc count** - The minimum number of documents required to include a bucket. The default is `0`.
- **Trim edges** - Removes partial buckets at the edges of the time range. The default is `0`.
- **Offset** - Shifts the start of each bucket by the specified duration. Use positive (`+`) or negative (`-`) values. Examples: `1h`, `5s`, `1d`.
- **Timezone** - The timezone for date calculations. The default is `Coordinated Universal Time`.
- **Time field** - Depicts date data options. The default option can be specified when configuring the Elasticsearch data source in the **Time field name** under the [**Elasticsearch details**](/docs/grafana/latest/datasources/elasticsearch/configure-elasticsearch-data-source/#elasticsearch-details) section. Otherwise **@timestamp** field will be used as a default option.
- **Interval** - Group by a type of interval. There are option to choose from the dropdown menu to select seconds, minutes, hours or day. You can also add a custom interval such as `30d` (30 days). `Auto` is the default option.
- **Min doc count** - The minimum amount of data to include in your query. The default is `0`.
- **Thin edges** - Select to trim edges on the time series data points. The default is `0`.
- **Offset** - Changes the start value of each bucket by the specified positive(+) or negative (-) offset duration. Examples include `1h` for 1 hour, `5s` for 5 seconds or `1d` for 1 day.
- **Timezone** - Select a timezone from the dropdown menu. The default is `Coordinated universal time`.
Configure the following options for the **terms** bucket aggregation option:
@@ -108,7 +101,7 @@ Configure the following options for the **terms** bucket aggregation option:
- **Size** - Limits the number of documents, or size of the data set. You can set a custom number or `no limit`.
- **Min doc count** - The minimum amount of data to include in your query. The default is `0`.
- **Order by** - Order terms by `term value`, `doc count` or `count`.
- **Missing** - Defines how documents missing a value should be treated. Missing values are ignored by default, but they can be treated as if they had a value. Refer to [Missing value](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html#_missing_value_5) in the Elasticsearch documentation for more information.
- **Missing** - Defines how documents missing a value should be treated. Missing values are ignored by default, but they can be treated as if they had a value. See [Missing value](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html#_missing_value_5) in Elasticsearch's documentation for more information.
Configure the following options for the **filters** bucket aggregation option:
@@ -121,8 +114,8 @@ Configure the following options for the **geo hash grid** bucket aggregation opt
Configure the following options for the **histogram** bucket aggregation option:
- **Interval** - The numeric interval for grouping values into buckets.
- **Min doc count** - The minimum number of documents required to include a bucket. The default is `0`.
- **Interval** - Group by a type of interval. There are option to choose from the dropdown menu to select seconds, minutes, hours or day. You can also add a custom interval such as `30d` (30 days). `Auto` is the default option.
- **Min doc count** - The minimum amount of data to include in your query. The default is `0`
The **nested** group by option is currently experimental, you can select a field and then settings specific to that field.
@@ -148,7 +141,7 @@ The option to run a **raw document query** is deprecated as of Grafana v10.1.
## Use template variables
You can also augment queries by using [template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/template-variables/).
You can also augment queries by using [template variables](../template-variables/).
Queries of `terms` have a 500-result limit by default.
To set a custom limit, set the `size` property in your query.

View File

@@ -22,11 +22,6 @@ refs:
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
add-template-variables-add-ad-hoc-filters:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/#add-ad-hoc-filters
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/#add-ad-hoc-filters
add-template-variables-multi-value-variables:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/#multi-value-variables
@@ -42,29 +37,11 @@ refs:
# Elasticsearch template variables
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
Grafana lists these variables in drop-down select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
Grafana refers to such variables as template variables.
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables) documentation.
## Use ad hoc filters
Elasticsearch supports the **Ad hoc filters** variable type.
You can use this variable type to specify any number of key/value filters, and Grafana applies them automatically to all of your Elasticsearch queries.
Ad hoc filters support the following operators:
| Operator | Description |
| -------- | ------------------------------------------------------------- |
| `=` | Equals. Adds `AND field:"value"` to the query. |
| `!=` | Not equals. Adds `AND -field:"value"` to the query. |
| `=~` | Matches regex. Adds `AND field:/value/` to the query. |
| `!~` | Does not match regex. Adds `AND -field:/value/` to the query. |
| `>` | Greater than. Adds `AND field:>value` to the query. |
| `<` | Less than. Adds `AND field:<value` to the query. |
For more information, refer to [Add ad hoc filters](ref:add-template-variables-add-ad-hoc-filters).
## Choose a variable syntax
The Elasticsearch data source supports two variable syntaxes for use in the **Query** field:
@@ -73,35 +50,34 @@ The Elasticsearch data source supports two variable syntaxes for use in the **Qu
- `[[varname]]`, such as `hostname:[[hostname]]`
When the _Multi-value_ or _Include all value_ options are enabled, Grafana converts the labels from plain text to a Lucene-compatible condition.
For details, refer to the [Multi-value variables](ref:add-template-variables-multi-value-variables) documentation.
For details, see the [Multi-value variables](ref:add-template-variables-multi-value-variables) documentation.
## Use variables in queries
You can use variables in the Lucene query field, metric aggregation fields, bucket aggregation fields, and the alias field.
### Variables in Lucene queries
Use variables to filter your Elasticsearch queries dynamically:
You can use other variables inside the query.
This example is used to define a variable named `$host`:
```
hostname:$hostname AND level:$level
{"find": "terms", "field": "hostname", "query": "source:$source"}
```
### Chain or nest variables
This uses another variable named `$source` inside the query definition.
Whenever you change the value of the `$source` variable via the dropdown, Grafana triggers an update of the `$host` variable to contain only hostnames filtered by, in this case, the `source` document property.
You can create nested variables, where one variable's values depend on another variable's selection.
These queries by default return results in term order (which can then be sorted alphabetically or numerically as for any variable).
To produce a list of terms sorted by doc count (a top-N values list), add an `orderBy` property of "doc_count".
This automatically selects a descending sort.
This example defines a variable named `$host` that only shows hosts matching the selected `$environment`:
{{< admonition type="note" >}}
To use an ascending sort (`asc`) with doc_count (a bottom-N list), set `order: "asc"`. However, Elasticsearch [discourages this](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html#search-aggregations-bucket-terms-aggregation-order) because sorting by ascending doc count can return inaccurate results.
{{< /admonition >}}
To keep terms in the doc count order, set the variable's Sort dropdown to **Disabled**.
You can alternatively use other sorting criteria, such as **Alphabetical**, to re-sort them.
```json
{ "find": "terms", "field": "hostname", "query": "environment:$environment" }
```
Whenever you change the value of the `$environment` variable via the drop-down, Grafana triggers an update of the `$host` variable to contain only hostnames filtered by the selected environment.
### Variables in aggregations
You can use variables in bucket aggregation fields to dynamically change how data is grouped. For example, use a variable in the **Terms** group by field to let users switch between grouping by `hostname`, `service`, or `datacenter`.
{"find": "terms", "field": "hostname", "orderBy": "doc_count"}
```
## Template variable examples
@@ -116,36 +92,11 @@ Write the query using a custom JSON string, with the field mapped as a [keyword]
If the query is [multi-field](https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-fields.html) with both a `text` and `keyword` type, use `"field":"fieldname.keyword"` (sometimes `fieldname.raw`) to specify the keyword field in your query.
| Query | Description |
| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ |
| `{"find": "fields", "type": "keyword"}` | Returns a list of field names with the index type `keyword`. |
| `{"find": "fields", "type": "number"}` | Returns a list of numeric field names (includes `float`, `double`, `integer`, `long`, `scaled_float`). |
| `{"find": "fields", "type": "date"}` | Returns a list of date field names. |
| `{"find": "terms", "field": "hostname.keyword", "size": 1000}` | Returns a list of values for a keyword field. Uses the current dashboard time range. |
| `{"find": "terms", "field": "hostname", "query": "<Lucene query>"}` | Returns a list of values filtered by a Lucene query. Uses the current dashboard time range. |
| `{"find": "terms", "field": "status", "orderBy": "doc_count"}` | Returns values sorted by document count (descending by default). |
| `{"find": "terms", "field": "status", "orderBy": "doc_count", "order": "asc"}` | Returns values sorted by document count in ascending order. |
| Query | Description |
| ------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `{"find": "fields", "type": "keyword"}` | Returns a list of field names with the index type `keyword`. |
| `{"find": "terms", "field": "hostname.keyword", "size": 1000}` | Returns a list of values for a keyword using term aggregation. Query will use current dashboard time range as time range query. |
| `{"find": "terms", "field": "hostname", "query": '<Lucene query>'}` | Returns a list of values for a keyword field using term aggregation and a specified Lucene query filter. Query will use current dashboard time range as time range for query. |
Queries of `terms` have a 500-result limit by default. To set a custom limit, set the `size` property in your query.
### Sort query results
By default, queries return results in term order (which can then be sorted alphabetically or numerically using the variable's Sort setting).
To produce a list of terms sorted by document count (a top-N values list), add an `orderBy` property of `doc_count`. This automatically selects a descending sort:
```json
{ "find": "terms", "field": "status", "orderBy": "doc_count" }
```
You can also use the `order` property to explicitly set ascending or descending sort:
```json
{ "find": "terms", "field": "hostname", "orderBy": "doc_count", "order": "asc" }
```
{{< admonition type="note" >}}
Elasticsearch [discourages](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html#search-aggregations-bucket-terms-aggregation-order) sorting by ascending doc count because it can return inaccurate results.
{{< /admonition >}}
To keep terms in the document count order, set the variable's Sort drop-down to **Disabled**. You can alternatively use other sorting criteria, such as **Alphabetical**, to re-sort them.
Queries of `terms` have a 500-result limit by default.
To set a custom limit, set the `size` property in your query.

View File

@@ -1,266 +0,0 @@
---
aliases:
- ../../data-sources/elasticsearch/troubleshooting/
description: Troubleshooting the Elasticsearch data source in Grafana
keywords:
- grafana
- elasticsearch
- troubleshooting
- errors
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Troubleshooting
title: Troubleshoot issues with the Elasticsearch data source
weight: 600
---
# Troubleshoot issues with the Elasticsearch data source
This document provides troubleshooting information for common errors you may encounter when using the Elasticsearch data source in Grafana.
## Connection errors
The following errors occur when Grafana cannot establish or maintain a connection to Elasticsearch.
### Failed to connect to Elasticsearch
**Error message:** "Health check failed: Failed to connect to Elasticsearch"
**Cause:** Grafana cannot establish a network connection to the Elasticsearch server.
**Solution:**
1. Verify that the Elasticsearch URL is correct in the data source configuration.
1. Check that Elasticsearch is running and accessible from the Grafana server.
1. Ensure there are no firewall rules blocking the connection.
1. If using a proxy, verify the proxy settings are correct.
1. For Grafana Cloud, ensure you have configured [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) if your Elasticsearch instance is not publicly accessible.
### Request timed out
**Error message:** "Health check failed: Elasticsearch data source is not healthy. Request timed out"
**Cause:** The connection to Elasticsearch timed out before receiving a response.
**Solution:**
1. Check the network latency between Grafana and Elasticsearch.
1. Verify that Elasticsearch is not overloaded or experiencing performance issues.
1. Increase the timeout setting in the data source configuration if needed.
1. Check if any network devices (load balancers, proxies) are timing out the connection.
### Failed to parse data source URL
**Error message:** "Failed to parse data source URL"
**Cause:** The URL entered in the data source configuration is not valid.
**Solution:**
1. Verify the URL format is correct (for example, `http://localhost:9200` or `https://elasticsearch.example.com:9200`).
1. Ensure the URL includes the protocol (`http://` or `https://`).
1. Remove any trailing slashes or invalid characters from the URL.
## Authentication errors
The following errors occur when there are issues with authentication credentials or permissions.
### Unauthorized (401)
**Error message:** "Health check failed: Elasticsearch data source is not healthy. Status: 401 Unauthorized"
**Cause:** The authentication credentials are invalid or missing.
**Solution:**
1. Verify that the username and password are correct.
1. If using an API key, ensure the key is valid and has not expired.
1. Check that the authentication method selected matches your Elasticsearch configuration.
1. Verify the user has the required permissions to access the Elasticsearch cluster.
### Forbidden (403)
**Error message:** "Health check failed: Elasticsearch data source is not healthy. Status: 403 Forbidden"
**Cause:** The authenticated user does not have permission to access the requested resource.
**Solution:**
1. Verify the user has read access to the specified index.
1. Check Elasticsearch security settings and role mappings.
1. Ensure the user has permission to access the `_cluster/health` endpoint.
1. If using AWS Elasticsearch Service with SigV4 authentication, verify the IAM policy grants the required permissions.
## Cluster health errors
The following errors occur when the Elasticsearch cluster is unhealthy or unavailable.
### Cluster status is red
**Error message:** "Health check failed: Elasticsearch data source is not healthy"
**Cause:** The Elasticsearch cluster health status is red, indicating one or more primary shards are not allocated.
**Solution:**
1. Check the Elasticsearch cluster health using `GET /_cluster/health`.
1. Review Elasticsearch logs for errors.
1. Verify all nodes in the cluster are running and connected.
1. Check for unassigned shards using `GET /_cat/shards?v&h=index,shard,prirep,state,unassigned.reason`.
1. Consider increasing the cluster's resources or reducing the number of shards.
### Bad Gateway (502)
**Error message:** "Health check failed: Elasticsearch data source is not healthy. Status: 502 Bad Gateway"
**Cause:** A proxy or load balancer between Grafana and Elasticsearch returned an error.
**Solution:**
1. Check the health of any proxies or load balancers in the connection path.
1. Verify Elasticsearch is running and accepting connections.
1. Review proxy/load balancer logs for more details.
1. Ensure the proxy timeout is configured appropriately for Elasticsearch requests.
## Index errors
The following errors occur when there are issues with the configured index or index pattern.
### Index not found
**Error message:** "Error validating index: index_not_found"
**Cause:** The specified index or index pattern does not match any existing indices.
**Solution:**
1. Verify the index name or pattern in the data source configuration.
1. Check that the index exists using `GET /_cat/indices`.
1. If using a time-based index pattern (for example, `[logs-]YYYY.MM.DD`), ensure indices exist for the selected time range.
1. Verify the user has permission to access the index.
### Time field not found
**Error message:** "Could not find time field '@timestamp' with type date in index"
**Cause:** The specified time field does not exist in the index or is not of type `date`.
**Solution:**
1. Verify the time field name in the data source configuration matches the field in your index.
1. Check the field mapping using `GET /<index>/_mapping`.
1. Ensure the time field is mapped as a `date` type, not `text` or `keyword`.
1. If the field name is different (for example, `timestamp` instead of `@timestamp`), update the data source configuration.
## Query errors
The following errors occur when there are issues with query syntax or configuration.
### Too many buckets
**Error message:** "Trying to create too many buckets. Must be less than or equal to: [65536]."
**Cause:** The query is generating more aggregation buckets than Elasticsearch allows.
**Solution:**
1. Reduce the time range of your query.
1. Increase the date histogram interval (for example, change from `10s` to `1m`).
1. Add filters to reduce the number of documents being aggregated.
1. Increase the `search.max_buckets` setting in Elasticsearch (requires cluster admin access).
### Required field missing
**Error message:** "Required one of fields [field, script], but none were specified."
**Cause:** A metric aggregation (such as Average, Sum, or Min) was added without specifying a field.
**Solution:**
1. Select a field for the metric aggregation in the query editor.
1. Ensure the selected field exists in your index and contains numeric data.
### Unsupported interval
**Error message:** "unsupported interval '&lt;interval&gt;'"
**Cause:** The interval specified for the index pattern is not valid.
**Solution:**
1. Use a supported interval: `Hourly`, `Daily`, `Weekly`, `Monthly`, or `Yearly`.
1. If you don't need a time-based index pattern, use `No pattern` and specify the exact index name.
## Version errors
The following errors occur when there are Elasticsearch version compatibility issues.
### Unsupported Elasticsearch version
**Error message:** "Support for Elasticsearch versions after their end-of-life (currently versions &lt; 7.16) was removed. Using unsupported version of Elasticsearch may lead to unexpected and incorrect results."
**Cause:** The Elasticsearch version is no longer supported by the Grafana data source.
**Solution:**
1. Upgrade Elasticsearch to a supported version (7.17+, 8.x, or 9.x).
1. Refer to [Elastic Product End of Life Dates](https://www.elastic.co/support/eol) for version support information.
1. Note that queries may still work, but Grafana does not guarantee functionality for unsupported versions.
## Other common issues
The following issues don't produce specific error messages but are commonly encountered.
### Empty query results
**Cause:** The query returns no data.
**Solution:**
1. Verify the time range includes data in your index.
1. Check the Lucene query syntax for errors.
1. Test the query directly in Elasticsearch using the `_search` API.
1. Ensure the index contains documents matching your query filters.
### Slow query performance
**Cause:** Queries take a long time to execute.
**Solution:**
1. Reduce the time range of your query.
1. Add more specific filters to limit the data scanned.
1. Increase the date histogram interval.
1. Check Elasticsearch cluster performance and resource utilization.
1. Consider using index aliases or data streams for better query routing.
### CORS errors in browser console
**Cause:** Cross-Origin Resource Sharing (CORS) is blocking requests from the browser to Elasticsearch.
**Solution:**
1. Use Server (proxy) access mode instead of Browser access mode in the data source configuration.
1. If Browser access is required, configure CORS settings in Elasticsearch:
```yaml
http.cors.enabled: true
http.cors.allow-origin: '<your-grafana-url>'
http.cors.allow-headers: 'Authorization, Content-Type'
http.cors.allow-credentials: true
```
{{< admonition type="note" >}}
Server (proxy) access mode is recommended for security and reliability.
{{< /admonition >}}
## Get additional help
If you continue to experience issues after following this troubleshooting guide:
1. Check the [Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) for API-specific guidance.
1. Review the [Grafana community forums](https://community.grafana.com/) for similar issues.
1. Contact Grafana Support if you have an Enterprise license.

View File

@@ -83,11 +83,6 @@ This topic lists words and abbreviations that are commonly used in the Grafana d
A commonly-used visualization that displays data as points, lines, or bars.
</td>
</tr>
<tr>
<td style="vertical-align: top"><code>grafanactl</code></td>
<td>
A command-line tool that enables users to authenticate, manage multiple environments, and perform administrative tasks through Grafana's REST API.
</tr>
<tr>
<td style="vertical-align: top">mixin</td>
<td>

View File

@@ -99,7 +99,6 @@ Add links to other dashboards at the top of your current dashboard.
- **Include current time range** Select this option to include the dashboard time range in the link. When the user clicks the link, the linked dashboard opens with the indicated time range already set. **Example:** https://play.grafana.org/d/000000010/annotations?orgId=1&from=now-3h&to=now
- **Include current template variable values** Select this option to include template variables currently used as query parameters in the link. When the user clicks the link, any matching templates in the linked dashboard are set to the values from the link. For more information, see [Dashboard URL variables](ref:dashboard-url-variables).
- **Open link in new tab** Select this option if you want the dashboard link to open in a new tab or window.
- **Show in controls menu** Select this option to display the link in the dashboard controls menu instead of at the top of the dashboard. The dashboard controls menu appears as a button in the dashboard toolbar.
1. Click **Save dashboard** in the top-right corner.
1. Click **Back to dashboard** and then **Exit edit**.
@@ -122,7 +121,6 @@ Add a link to a URL at the top of your current dashboard. You can link to any av
- **Include current time range** Select this option to include the dashboard time range in the link. When the user clicks the link, the linked dashboard opens with the indicated time range already set. **Example:** https://play.grafana.org/d/000000010/annotations?orgId=1&from=now-3h&to=now
- **Include current template variable values** Select this option to include template variables currently used as query parameters in the link. When the user clicks the link, any matching templates in the linked dashboard are set to the values from the link.
- **Open link in new tab** Select this option if you want the dashboard link to open in a new tab or window.
- **Show in controls menu** Select this option to display the link in the dashboard controls menu instead of at the top of the dashboard. The dashboard controls menu appears as a button in the dashboard toolbar.
1. Click **Save dashboard** in the top-right corner.
1. Click **Back to dashboard** and then **Exit edit**.

View File

@@ -123,11 +123,10 @@ To create a variable, follow these steps:
If you don't enter a display name, then the drop-down list label is the variable name.
1. Choose a **Display** option:
- **Above dashboard** - The variable drop-down list displays above the dashboard with the variable **Name** or **Label** value. This is the default.
- **Above dashboard, label hidden** - The variable drop-down list displays above the dashboard, but without showing the name of the variable.
- **Controls menu** - The variable is displayed in the dashboard controls menu instead of above the dashboard. The dashboard controls menu appears as a button in the dashboard toolbar.
- **Hidden** - No variable drop-down list is displayed on the dashboard.
1. Choose a **Show on dashboard** option:
- **Label and value** - The variable drop-down list displays the variable **Name** or **Label** value. This is the default.
- **Value:** The variable drop-down list only displays the selected variable value and a down arrow.
- **Nothing:** No variable drop-down list is displayed on the dashboard.
1. Click one of the following links to complete the steps for adding your selected variable type:
- [Query](#add-a-query-variable)

View File

@@ -12,13 +12,12 @@ comments: |
To build this Markdown, do the following:
$ cd /docs (from the root of the repository)
$ make sources/visualizations/panels-visualizations/query-transform-data/transform-data/index.md
$ make sources/panels-visualizations/query-transform-data/transform-data/index.md
$ make docs
Browse to http://localhost:3003/docs/grafana/latest/panels-visualizations/query-transform-data/transform-data/
Refer to ./docs/README.md "Content guidelines" for more information about editing and building these docs.
aliases:
- ../../../panels/transform-data/ # /docs/grafana/next/panels/transform-data/
- ../../../panels/transform-data/about-transformation/ # /docs/grafana/next/panels/transform-data/about-transformation/

View File

@@ -419,9 +419,6 @@ test.describe(
// Select tabs layout
await page.getByLabel('layout-selection-option-Tabs').click();
// confirm layout change
await dashboardPage.getByGrafanaSelector(selectors.pages.ConfirmModal.delete).click();
await expect(dashboardPage.getByGrafanaSelector(selectors.components.Tab.title('New row'))).toBeVisible();
await expect(dashboardPage.getByGrafanaSelector(selectors.components.Tab.title('New row 1'))).toBeVisible();
await expect(
@@ -760,9 +757,6 @@ test.describe(
// Select rows layout
await page.getByLabel('layout-selection-option-Rows').click();
// confirm layout change
await dashboardPage.getByGrafanaSelector(selectors.pages.ConfirmModal.delete).click();
await dashboardPage
.getByGrafanaSelector(selectors.components.DashboardRow.wrapper('New tab 1'))
.scrollIntoViewIfNeeded();

View File

@@ -4,8 +4,6 @@ import { test, expect, E2ESelectorGroups, DashboardPage } from '@grafana/plugin-
import testV2Dashboard from '../dashboards/TestV2Dashboard.json';
import { switchToAutoGrid } from './utils';
test.use({
featureToggles: {
kubernetesDashboards: true,
@@ -35,8 +33,7 @@ test.describe(
).toHaveCount(3);
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await page.getByLabel('layout-selection-option-Auto grid').click();
await expect(
dashboardPage.getByGrafanaSelector(selectors.components.Panels.Panel.title('New panel'))
@@ -67,8 +64,7 @@ test.describe(
).toHaveCount(3);
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await page.getByLabel('layout-selection-option-Auto grid').click();
// Get initial positions - standard width should have panels on different rows
const firstPanelTop = await getPanelTop(dashboardPage, selectors);
@@ -128,8 +124,7 @@ test.describe(
).toHaveCount(3);
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await page.getByLabel('layout-selection-option-Auto grid').click();
await dashboardPage
.getByGrafanaSelector(selectors.components.PanelEditor.ElementEditPane.AutoGridLayout.minColumnWidth)
@@ -186,8 +181,7 @@ test.describe(
).toHaveCount(3);
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await page.getByLabel('layout-selection-option-Auto grid').click();
await dashboardPage
.getByGrafanaSelector(selectors.components.PanelEditor.ElementEditPane.AutoGridLayout.maxColumns)
@@ -222,8 +216,7 @@ test.describe(
).toHaveCount(3);
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await page.getByLabel('layout-selection-option-Auto grid').click();
const regularRowHeight = await getPanelHeight(dashboardPage, selectors);
@@ -278,8 +271,7 @@ test.describe(
).toHaveCount(3);
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await page.getByLabel('layout-selection-option-Auto grid').click();
const regularRowHeight = await getPanelHeight(dashboardPage, selectors);
@@ -336,8 +328,7 @@ test.describe(
).toHaveCount(3);
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await page.getByLabel('layout-selection-option-Auto grid').click();
// Set narrow column width first to ensure panels fit horizontally
await dashboardPage

View File

@@ -1,6 +1,6 @@
import { Page } from 'playwright-core';
import { test, expect, DashboardPage } from '@grafana/plugin-e2e';
import { test, expect } from '@grafana/plugin-e2e';
import testV2DashWithRepeats from '../dashboards/V2DashWithRepeats.json';
@@ -12,7 +12,6 @@ import {
getPanelPosition,
importTestDashboard,
goToEmbeddedPanel,
switchToAutoGrid,
} from './utils';
const repeatTitleBase = 'repeat - ';
@@ -43,7 +42,7 @@ test.describe(
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await switchToAutoGrid(page);
await dashboardPage.getByGrafanaSelector(selectors.components.Panels.Panel.title('New panel')).first().click();
@@ -79,8 +78,7 @@ test.describe(
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await switchToAutoGrid(page);
await saveDashboard(dashboardPage, page, selectors);
await page.reload();
@@ -119,7 +117,7 @@ test.describe(
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await switchToAutoGrid(page);
// select first/original repeat panel to activate edit pane
await dashboardPage
@@ -150,7 +148,7 @@ test.describe(
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await switchToAutoGrid(page);
await saveDashboard(dashboardPage, page, selectors);
await page.reload();
@@ -216,7 +214,7 @@ test.describe(
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await switchToAutoGrid(page);
await saveDashboard(dashboardPage, page, selectors);
// loading directly into panel editor
@@ -273,7 +271,7 @@ test.describe(
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await switchToAutoGrid(page);
// this moving repeated panel between two normal panels
await movePanel(dashboardPage, selectors, `${repeatTitleBase}${repeatOptions.at(0)}`, 'New panel');
@@ -321,7 +319,7 @@ test.describe(
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await switchToAutoGrid(page);
await saveDashboard(dashboardPage, page, selectors);
await page.reload();
@@ -384,7 +382,7 @@ test.describe(
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await switchToAutoGrid(page);
await saveDashboard(dashboardPage, page, selectors);
await page.reload();
@@ -412,7 +410,7 @@ test.describe(
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
await switchToAutoGrid(page, dashboardPage);
await switchToAutoGrid(page);
await saveDashboard(dashboardPage, page, selectors);
await page.reload();
@@ -464,3 +462,7 @@ test.describe(
});
}
);
async function switchToAutoGrid(page: Page) {
await page.getByLabel('layout-selection-option-Auto grid').click();
}

View File

@@ -1,6 +1,5 @@
import { Page } from '@playwright/test';
import { selectors } from '@grafana/e2e-selectors';
import { DashboardPage, E2ESelectorGroups, expect } from '@grafana/plugin-e2e';
import testV2Dashboard from '../dashboards/TestV2Dashboard.json';
@@ -240,12 +239,3 @@ export async function getTabPosition(dashboardPage: DashboardPage, selectors: E2
const boundingBox = await tab.boundingBox();
return boundingBox;
}
export async function switchToAutoGrid(page: Page, dashboardPage: DashboardPage) {
await page.getByLabel('layout-selection-option-Auto grid').click();
// confirm layout change if applicable
const confirmModal = dashboardPage.getByGrafanaSelector(selectors.pages.ConfirmModal.delete);
if (confirmModal) {
await confirmModal.click();
}
}

View File

@@ -2882,6 +2882,11 @@
"count": 1
}
},
"public/app/features/panel/components/VizTypePicker/PanelTypeCard.tsx": {
"@grafana/no-aria-label-selectors": {
"count": 1
}
},
"public/app/features/panel/panellinks/linkSuppliers.ts": {
"@typescript-eslint/no-explicit-any": {
"count": 1

View File

@@ -289,7 +289,7 @@
"@grafana/google-sdk": "0.3.5",
"@grafana/i18n": "workspace:*",
"@grafana/lezer-logql": "0.2.9",
"@grafana/llm": "1.0.1",
"@grafana/llm": "0.22.1",
"@grafana/monaco-logql": "^0.0.8",
"@grafana/o11y-ds-frontend": "workspace:*",
"@grafana/plugin-ui": "^0.11.1",
@@ -459,8 +459,7 @@
"gitconfiglocal": "2.1.0",
"tmp@npm:^0.0.33": "~0.2.1",
"js-yaml@npm:4.1.0": "^4.1.0",
"js-yaml@npm:=4.1.0": "^4.1.0",
"nodemailer": "7.0.7"
"js-yaml@npm:=4.1.0": "^4.1.0"
},
"workspaces": {
"packages": [

View File

@@ -165,19 +165,6 @@ const injectedRtkApi = api
}),
providesTags: ['Search'],
}),
getSearchUsers: build.query<GetSearchUsersApiResponse, GetSearchUsersApiArg>({
query: (queryArg) => ({
url: `/searchUsers`,
params: {
query: queryArg.query,
limit: queryArg.limit,
page: queryArg.page,
offset: queryArg.offset,
sort: queryArg.sort,
},
}),
providesTags: ['Search'],
}),
listServiceAccount: build.query<ListServiceAccountApiResponse, ListServiceAccountApiArg>({
query: (queryArg) => ({
url: `/serviceaccounts`,
@@ -909,18 +896,6 @@ export type GetSearchTeamsApiArg = {
/** page number to start from */
page?: number;
};
export type GetSearchUsersApiResponse = unknown;
export type GetSearchUsersApiArg = {
query?: string;
/** number of results to return */
limit?: number;
/** page number (starting from 1) */
page?: number;
/** number of results to skip */
offset?: number;
/** sortable field */
sort?: string;
};
export type ListServiceAccountApiResponse = /** status 200 OK */ ServiceAccountList;
export type ListServiceAccountApiArg = {
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
@@ -2092,9 +2067,6 @@ export type UserSpec = {
role: string;
title: string;
};
export type UserStatus = {
lastSeenAt: number;
};
export type User = {
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
apiVersion?: string;
@@ -2103,7 +2075,6 @@ export type User = {
metadata: ObjectMeta;
/** Spec is the spec of the User */
spec: UserSpec;
status: UserStatus;
};
export type UserList = {
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
@@ -2149,8 +2120,6 @@ export const {
useUpdateExternalGroupMappingMutation,
useGetSearchTeamsQuery,
useLazyGetSearchTeamsQuery,
useGetSearchUsersQuery,
useLazyGetSearchUsersQuery,
useListServiceAccountQuery,
useLazyListServiceAccountQuery,
useCreateServiceAccountMutation,

View File

@@ -1138,7 +1138,7 @@ export type JobResourceSummary = {
delete?: number;
/** Create or update (export) */
error?: number;
/** Report errors/warnings for this resource type This may not be an exhaustive list and recommend looking at the logs for more info */
/** Report errors for this resource type This may not be an exhaustive list and recommend looking at the logs for more info */
errors?: string[];
group?: string;
kind?: string;
@@ -1146,9 +1146,6 @@ export type JobResourceSummary = {
noop?: number;
total?: number;
update?: number;
/** The error count */
warning?: number;
warnings?: string[];
write?: number;
};
export type RepositoryUrLs = {
@@ -1179,7 +1176,6 @@ export type JobStatus = {
summary?: JobResourceSummary[];
/** URLs contains URLs for the reference branch or commit if applicable. */
url?: RepositoryUrLs;
warnings?: string[];
};
export type Job = {
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */

View File

@@ -3,7 +3,7 @@
// (a <button> with clear text, for example, does not need an aria-label as it's already labeled)
// but you still might need to select it for testing,
// in that case please add the attribute data-testid={selector} in the component and
// prefix your selector string with 'data-testid' so that when we create the selectors we know to search for it on the right attribute
// prefix your selector string with 'data-testid' so that when create the selectors we know to search for it on the right attribute
import { VersionedSelectorGroup } from '../types';
@@ -1057,7 +1057,6 @@ export const versionedComponents = {
},
PluginVisualization: {
item: {
'12.4.0': (title: string) => `data-testid Plugin visualization item ${title}`,
[MIN_GRAFANA_VERSION]: (title: string) => `Plugin visualization item ${title}`,
},
current: {

View File

@@ -17,10 +17,6 @@ export interface Options {
* Controls the height of the rows
*/
cellHeight?: ui.TableCellHeight;
/**
* If true, disables all keyboard events in the table. this is used when previewing a table (i.e. suggestions)
*/
disableKeyboardEvents?: boolean;
/**
* Enable pagination on the table
*/

View File

@@ -13,7 +13,6 @@ import * as common from '@grafana/schema';
export const pluginVersion = "12.4.0-pre";
export interface Options extends common.OptionsWithTimezones, common.OptionsWithAnnotations {
disableKeyboardEvents?: boolean;
legend: common.VizLegendOptions;
orientation?: common.VizOrientation;
timeCompare?: common.TimeCompareOptions;

View File

@@ -106,11 +106,6 @@ export function RadialGauge(props: RadialGaugeProps) {
const gaugeId = useId();
const styles = useStyles2(getStyles);
let effectiveTextMode = textMode;
if (effectiveTextMode === 'auto') {
effectiveTextMode = vizCount === 1 ? 'value' : 'value_and_name';
}
const startAngle = shape === 'gauge' ? 250 : 0;
const endAngle = shape === 'gauge' ? 110 : 360;
@@ -193,7 +188,7 @@ export function RadialGauge(props: RadialGaugeProps) {
// These elements are only added for first value / bar
if (barIndex === 0) {
if (glowBar) {
defs.push(<GlowGradient key="glow-filter" id={glowFilterId} barWidth={dimensions.barWidth} />);
defs.push(<GlowGradient key="glow-filter" id={glowFilterId} radius={dimensions.radius} />);
}
if (glowCenter) {
@@ -203,14 +198,14 @@ export function RadialGauge(props: RadialGaugeProps) {
graphics.push(
<RadialText
key="radial-text"
textMode={effectiveTextMode}
vizCount={vizCount}
textMode={textMode}
displayValue={displayValue.display}
dimensions={dimensions}
theme={theme}
valueManualFontSize={props.valueManualFontSize}
nameManualFontSize={props.nameManualFontSize}
shape={shape}
sparkline={displayValue.sparkline}
/>
);
@@ -259,7 +254,6 @@ export function RadialGauge(props: RadialGaugeProps) {
theme={theme}
color={color}
shape={shape}
textMode={effectiveTextMode}
/>
);
}

View File

@@ -1,9 +1,11 @@
import { css } from '@emotion/css';
import { FieldDisplay, GrafanaTheme2, FieldConfig } from '@grafana/data';
import { GraphFieldConfig, GraphGradientMode, LineInterpolation } from '@grafana/schema';
import { Sparkline } from '../Sparkline/Sparkline';
import { RadialShape, RadialTextMode } from './RadialGauge';
import { RadialShape } from './RadialGauge';
import { GaugeDimensions } from './utils';
interface RadialSparklineProps {
@@ -12,22 +14,23 @@ interface RadialSparklineProps {
theme: GrafanaTheme2;
color?: string;
shape?: RadialShape;
textMode: Exclude<RadialTextMode, 'auto'>;
}
export function RadialSparkline({ sparkline, dimensions, theme, color, shape, textMode }: RadialSparklineProps) {
const { radius, barWidth } = dimensions;
export function RadialSparkline({ sparkline, dimensions, theme, color, shape }: RadialSparklineProps) {
if (!sparkline) {
return null;
}
const showNameAndValue = textMode === 'value_and_name';
const height = radius / (showNameAndValue ? 4 : 3);
const width = radius * (shape === 'gauge' ? 1.6 : 1.4) - barWidth;
const topPos =
shape === 'gauge'
? `${dimensions.gaugeBottomY - height}px`
: `calc(50% + ${radius / (showNameAndValue ? 3.3 : 4)}px)`;
const { radius, barWidth } = dimensions;
const height = radius / 4;
const widthFactor = shape === 'gauge' ? 1.6 : 1.4;
const width = radius * widthFactor - barWidth;
const topPos = shape === 'gauge' ? `${dimensions.gaugeBottomY - height}px` : `calc(50% + ${radius / 2.8}px)`;
const styles = css({
position: 'absolute',
top: topPos,
});
const config: FieldConfig<GraphFieldConfig> = {
color: {
@@ -42,7 +45,7 @@ export function RadialSparkline({ sparkline, dimensions, theme, color, shape, te
};
return (
<div style={{ position: 'absolute', top: topPos }}>
<div className={styles}>
<Sparkline height={height} width={width} sparkline={sparkline} theme={theme} config={config} />
</div>
);

View File

@@ -1,12 +1,6 @@
import { css } from '@emotion/css';
import {
DisplayValue,
DisplayValueAlignmentFactors,
FieldSparkline,
formattedValueToString,
GrafanaTheme2,
} from '@grafana/data';
import { DisplayValue, DisplayValueAlignmentFactors, formattedValueToString, GrafanaTheme2 } from '@grafana/data';
import { useStyles2 } from '../../themes/ThemeContext';
import { calculateFontSize } from '../../utils/measureText';
@@ -14,13 +8,21 @@ import { calculateFontSize } from '../../utils/measureText';
import { RadialShape, RadialTextMode } from './RadialGauge';
import { GaugeDimensions } from './utils';
// function toCartesian(centerX: number, centerY: number, radius: number, angleInDegrees: number) {
// let radian = ((angleInDegrees - 90) * Math.PI) / 180.0;
// return {
// x: centerX + radius * Math.cos(radian),
// y: centerY + radius * Math.sin(radian),
// };
// }
interface RadialTextProps {
displayValue: DisplayValue;
theme: GrafanaTheme2;
dimensions: GaugeDimensions;
textMode: Exclude<RadialTextMode, 'auto'>;
textMode: RadialTextMode;
vizCount: number;
shape: RadialShape;
sparkline?: FieldSparkline;
alignmentFactors?: DisplayValueAlignmentFactors;
valueManualFontSize?: number;
nameManualFontSize?: number;
@@ -31,8 +33,8 @@ export function RadialText({
theme,
dimensions,
textMode,
vizCount,
shape,
sparkline,
alignmentFactors,
valueManualFontSize,
nameManualFontSize,
@@ -44,6 +46,10 @@ export function RadialText({
return null;
}
if (textMode === 'auto') {
textMode = vizCount === 1 ? 'value' : 'value_and_name';
}
const nameToAlignTo = (alignmentFactors ? alignmentFactors.title : displayValue.title) ?? '';
const valueToAlignTo = formattedValueToString(alignmentFactors ? alignmentFactors : displayValue);
@@ -53,7 +59,7 @@ export function RadialText({
// Not sure where this comes from but svg text is not using body line-height
const lineHeight = 1.21;
const valueWidthToRadiusFactor = 0.82;
const valueWidthToRadiusFactor = 0.85;
const nameToHeightFactor = 0.45;
const largeRadiusScalingDecay = 0.86;
@@ -92,23 +98,18 @@ export function RadialText({
const valueHeight = valueFontSize * lineHeight;
const nameHeight = nameFontSize * lineHeight;
const valueY = showName ? centerY - nameHeight * 0.3 : centerY;
const nameY = showValue ? valueY + valueHeight * 0.7 : centerY;
const valueY = showName ? centerY - nameHeight / 2 : centerY;
const valueNameSpacing = valueHeight / 3.5;
const nameY = showValue ? valueY + valueHeight / 2 + valueNameSpacing : centerY;
const nameColor = showValue ? theme.colors.text.secondary : theme.colors.text.primary;
const suffixShift = (valueFontSize - unitFontSize * 1.2) / 2;
// adjust the text up on gauges and when sparklines are present
let yOffset = 0;
if (shape === 'gauge') {
// we render from the center of the gauge, so move up by half of half of the total height
yOffset -= (valueHeight + nameHeight) / 4;
}
if (sparkline) {
yOffset -= 8;
}
// For gauge shape we shift text up a bit
const valueDy = shape === 'gauge' ? -valueFontSize * 0.3 : 0;
const nameDy = shape === 'gauge' ? -nameFontSize * 0.7 : 0;
return (
<g transform={`translate(0, ${yOffset})`}>
<g>
{showValue && (
<text
x={centerX}
@@ -118,6 +119,7 @@ export function RadialText({
className={styles.text}
textAnchor="middle"
dominantBaseline="middle"
dy={valueDy}
>
<tspan fontSize={unitFontSize}>{displayValue.prefix ?? ''}</tspan>
<tspan>{displayValue.text}</tspan>
@@ -131,6 +133,7 @@ export function RadialText({
fontSize={nameFontSize}
x={centerX}
y={nameY}
dy={nameDy}
textAnchor="middle"
dominantBaseline="middle"
fill={nameColor}

View File

@@ -4,12 +4,11 @@ import { GaugeDimensions } from './utils';
export interface GlowGradientProps {
id: string;
barWidth: number;
radius: number;
}
export function GlowGradient({ id, barWidth }: GlowGradientProps) {
// 0.75 is the minimum glow size, and it scales with bar width
const glowSize = 0.75 + barWidth * 0.08;
export function GlowGradient({ id, radius }: GlowGradientProps) {
const glowSize = 0.02 * radius;
return (
<filter id={id} filterUnits="userSpaceOnUse">
@@ -83,7 +82,7 @@ export function MiddleCircleGlow({ dimensions, gaugeId, color }: CenterGlowProps
<>
<defs>
<radialGradient id={gradientId} r={'50%'} fr={'0%'}>
<stop offset="0%" stopColor={color} stopOpacity={0.15} />
<stop offset="0%" stopColor={color} stopOpacity={0.2} />
<stop offset="90%" stopColor={color} stopOpacity={0} />
</radialGradient>
</defs>

View File

@@ -16,7 +16,7 @@ export interface SparklineProps extends Themeable2 {
sparkline: FieldSparkline;
}
const SparklineFn: React.FC<SparklineProps> = memo((props) => {
export const Sparkline: React.FC<SparklineProps> = memo((props) => {
const { sparkline, config: fieldConfig, theme, width, height } = props;
const { frame: alignedDataFrame, warning } = prepareSeries(sparkline, fieldConfig);
@@ -30,14 +30,4 @@ const SparklineFn: React.FC<SparklineProps> = memo((props) => {
return <UPlotChart data={data} config={configBuilder} width={width} height={height} />;
});
SparklineFn.displayName = 'Sparkline';
// we converted to function component above, but some apps extend Sparkline, so we need
// to keep exporting a class component until those apps are all rolled out.
// see https://github.com/grafana/app-observability-plugin/pull/2079
// eslint-disable-next-line react-prefer-function-component/react-prefer-function-component
export class Sparkline extends React.PureComponent<SparklineProps> {
render() {
return <SparklineFn {...this.props} />;
}
}
Sparkline.displayName = 'Sparkline';

View File

@@ -105,7 +105,6 @@ export function TableNG(props: TableNGProps) {
const {
cellHeight,
data,
disableKeyboardEvents,
disableSanitizeHtml,
enablePagination = false,
enableSharedCrosshair = false,
@@ -820,9 +819,9 @@ export function TableNG(props: TableNGProps) {
}
}}
onCellKeyDown={
hasNestedFrames || disableKeyboardEvents
hasNestedFrames
? (_, event) => {
if (disableKeyboardEvents || event.isDefaultPrevented()) {
if (event.isDefaultPrevented()) {
// skip parent grid keyboard navigation if nested grid handled it
event.preventGridDefault();
}

View File

@@ -138,8 +138,6 @@ export interface BaseTableProps {
enableVirtualization?: boolean;
// for MarkdownCell, this flag disables sanitization of HTML content. Configured via config.ini.
disableSanitizeHtml?: boolean;
// if true, disables all keyboard events in the table. this is used when previewing a table (i.e. suggestions)
disableKeyboardEvents?: boolean;
}
/* ---------------------------- Table cell props ---------------------------- */

View File

@@ -187,15 +187,6 @@ func (hs *HTTPServer) registerRoutes() {
publicdashboardsapi.CountPublicDashboardRequest(),
hs.Index,
)
r.Get("/bootdata/:accessToken",
reqNoAuth,
hs.PublicDashboardsApi.Middleware.HandleView,
publicdashboardsapi.SetPublicDashboardAccessToken,
publicdashboardsapi.SetPublicDashboardOrgIdOnContext(hs.PublicDashboardsApi.PublicDashboardService),
publicdashboardsapi.CountPublicDashboardRequest(),
hs.GetBootdata,
)
}
r.Get("/explore", authorize(ac.EvalPermission(ac.ActionDatasourcesExplore)), hs.Index)

View File

@@ -1,29 +0,0 @@
package auditing
import (
auditinternal "k8s.io/apiserver/pkg/apis/audit"
"k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/authorization/authorizer"
)
// NoopBackend is a no-op implementation of audit.Backend
type NoopBackend struct{}
func ProvideNoopBackend() audit.Backend { return &NoopBackend{} }
func (b *NoopBackend) ProcessEvents(k8sEvents ...*auditinternal.Event) bool { return false }
func (NoopBackend) Run(stopCh <-chan struct{}) error { return nil }
func (NoopBackend) Shutdown() {}
func (NoopBackend) String() string { return "" }
// NoopPolicyRuleEvaluator is a no-op implementation of audit.PolicyRuleEvaluator
type NoopPolicyRuleEvaluator struct{}
func ProvideNoopPolicyRuleEvaluator() audit.PolicyRuleEvaluator { return &NoopPolicyRuleEvaluator{} }
func (NoopPolicyRuleEvaluator) EvaluatePolicyRule(authorizer.Attributes) audit.RequestAuditConfig {
return audit.RequestAuditConfig{Level: auditinternal.LevelNone}
}

View File

@@ -61,24 +61,20 @@ func (s *legacyStorage) List(ctx context.Context, options *internalversion.ListO
}
func (s *legacyStorage) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
if s.dsConfigHandlerRequestsDuration != nil {
start := time.Now()
defer func() {
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Get"), time.Since(start).Seconds())
}()
}
start := time.Now()
defer func() {
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Get"), time.Since(start).Seconds())
}()
return s.datasources.GetDataSource(ctx, name)
}
// Create implements rest.Creater.
func (s *legacyStorage) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
if s.dsConfigHandlerRequestsDuration != nil {
start := time.Now()
defer func() {
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
}()
}
start := time.Now()
defer func() {
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
}()
ds, ok := obj.(*v0alpha1.DataSource)
if !ok {
@@ -89,12 +85,10 @@ func (s *legacyStorage) Create(ctx context.Context, obj runtime.Object, createVa
// Update implements rest.Updater.
func (s *legacyStorage) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
if s.dsConfigHandlerRequestsDuration != nil {
start := time.Now()
defer func() {
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
}()
}
start := time.Now()
defer func() {
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
}()
old, err := s.Get(ctx, name, &metav1.GetOptions{})
if err != nil {
@@ -132,12 +126,10 @@ func (s *legacyStorage) Update(ctx context.Context, name string, objInfo rest.Up
// Delete implements rest.GracefulDeleter.
func (s *legacyStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) {
if s.dsConfigHandlerRequestsDuration != nil {
start := time.Now()
defer func() {
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
}()
}
start := time.Now()
defer func() {
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
}()
err := s.datasources.DeleteDataSource(ctx, name)
return nil, false, err

View File

@@ -3,7 +3,6 @@ package datasource
import (
"context"
"encoding/json"
"errors"
"fmt"
"maps"
@@ -39,14 +38,14 @@ var (
// DataSourceAPIBuilder is used just so wire has something unique to return
type DataSourceAPIBuilder struct {
datasourceResourceInfo utils.ResourceInfo
pluginJSON plugins.JSONData
client PluginClient // will only ever be called with the same plugin id!
datasources PluginDatasourceProvider
contextProvider PluginContextWrapper
accessControl accesscontrol.AccessControl
queryTypes *queryV0.QueryTypeDefinitionList
configCrudUseNewApis bool
dataSourceCRUDMetric *prometheus.HistogramVec
pluginJSON plugins.JSONData
client PluginClient // will only ever be called with the same plugin id!
datasources PluginDatasourceProvider
contextProvider PluginContextWrapper
accessControl accesscontrol.AccessControl
queryTypes *queryV0.QueryTypeDefinitionList
configCrudUseNewApis bool
}
func RegisterAPIService(
@@ -67,16 +66,6 @@ func RegisterAPIService(
var err error
var builder *DataSourceAPIBuilder
dataSourceCRUDMetric := metricutil.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "grafana",
Name: "ds_config_handler_requests_duration_seconds",
Help: "Duration of requests handled by datasource configuration handlers",
}, []string{"code_path", "handler"})
regErr := reg.Register(dataSourceCRUDMetric)
if regErr != nil && !errors.As(regErr, &prometheus.AlreadyRegisteredError{}) {
return nil, regErr
}
pluginJSONs, err := getDatasourcePlugins(pluginSources)
if err != nil {
return nil, fmt.Errorf("error getting list of datasource plugins: %s", err)
@@ -102,7 +91,6 @@ func RegisterAPIService(
if err != nil {
return nil, err
}
builder.SetDataSourceCRUDMetrics(dataSourceCRUDMetric)
apiRegistrar.RegisterAPI(builder)
}
@@ -173,10 +161,6 @@ func (b *DataSourceAPIBuilder) GetGroupVersion() schema.GroupVersion {
return b.datasourceResourceInfo.GroupVersion()
}
func (b *DataSourceAPIBuilder) SetDataSourceCRUDMetrics(datasourceCRUDMetric *prometheus.HistogramVec) {
b.dataSourceCRUDMetric = datasourceCRUDMetric
}
func addKnownTypes(scheme *runtime.Scheme, gv schema.GroupVersion) {
scheme.AddKnownTypes(gv,
&datasourceV0.DataSource{},
@@ -234,9 +218,13 @@ func (b *DataSourceAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver
if b.configCrudUseNewApis {
legacyStore := &legacyStorage{
datasources: b.datasources,
resourceInfo: &ds,
dsConfigHandlerRequestsDuration: b.dataSourceCRUDMetric,
datasources: b.datasources,
resourceInfo: &ds,
dsConfigHandlerRequestsDuration: metricutil.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "grafana",
Name: "ds_config_handler_requests_duration_seconds",
Help: "Duration of requests handled by datasource configuration handlers",
}, []string{"code_path", "handler"}),
}
unified, err := grafanaregistry.NewRegistryStore(opts.Scheme, ds, opts.OptsGetter)
if err != nil {

View File

@@ -22,7 +22,6 @@ type iamAuthorizer struct {
func newIAMAuthorizer(accessClient authlib.AccessClient, legacyAccessClient authlib.AccessClient) authorizer.Authorizer {
resourceAuthorizer := make(map[string]authorizer.Authorizer)
serviceAuthorizer := gfauthorizer.NewServiceAuthorizer()
// Authorizer that allows any authenticated user
// To be used when authorization is handled at the storage layer
allowAuthorizer := authorizer.AuthorizerFunc(func(
@@ -51,7 +50,8 @@ func newIAMAuthorizer(accessClient authlib.AccessClient, legacyAccessClient auth
resourceAuthorizer[iamv0.UserResourceInfo.GetName()] = authorizer
resourceAuthorizer[iamv0.ExternalGroupMappingResourceInfo.GetName()] = authorizer
resourceAuthorizer[iamv0.TeamResourceInfo.GetName()] = authorizer
resourceAuthorizer["searchUsers"] = serviceAuthorizer
serviceAuthorizer := gfauthorizer.NewServiceAuthorizer()
resourceAuthorizer["searchTeams"] = serviceAuthorizer
return &iamAuthorizer{resourceAuthorizer: resourceAuthorizer}

View File

@@ -1,164 +0,0 @@
package authorizer
import (
"context"
"errors"
"fmt"
"net/http"
"sync"
"github.com/grafana/authlib/authn"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
dashboardv1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1"
folderv1 "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1"
"github.com/grafana/grafana/apps/provisioning/pkg/auth"
"github.com/grafana/grafana/pkg/apimachinery/utils"
)
var (
ErrNoConfigProvider = errors.New("no config provider for group resource")
ErrNoVersionInfo = errors.New("no version info for group resource")
Versions = map[schema.GroupResource]string{
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: folderv1.VERSION,
{Group: dashboardv1.GROUP, Resource: dashboardv1.DASHBOARD_RESOURCE}: dashboardv1.VERSION,
}
)
// ConfigProvider is a function that provides a rest.Config for a given context.
type ConfigProvider func(ctx context.Context) (*rest.Config, error)
// DynamicClientFactory is a function that creates a dynamic.Interface from a rest.Config.
// This can be overridden in tests.
type DynamicClientFactory func(config *rest.Config) (dynamic.Interface, error)
// ParentProvider implementation that fetches the parent folder information from remote API servers.
type ParentProviderImpl struct {
configProviders map[schema.GroupResource]ConfigProvider
versions map[schema.GroupResource]string
dynamicClientFactory DynamicClientFactory
// Cache of dynamic clients for each group resource
// This is used to avoid creating a new dynamic client for each request
// and to reuse the same client for the same group resource.
clients map[schema.GroupResource]dynamic.Interface
clientsMu sync.Mutex
}
// DialConfig holds the configuration for dialing a remote API server.
type DialConfig struct {
Host string
Insecure bool
CAFile string
Audience string
}
// NewLocalConfigProvider creates a map of ConfigProviders that return the same given config for local API servers.
func NewLocalConfigProvider(
configProvider ConfigProvider,
) map[schema.GroupResource]ConfigProvider {
return map[schema.GroupResource]ConfigProvider{
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: configProvider,
{Group: dashboardv1.GROUP, Resource: dashboardv1.DASHBOARD_RESOURCE}: configProvider,
}
}
// NewRemoteConfigProvider creates a map of ConfigProviders for remote API servers based on the given DialConfig.
func NewRemoteConfigProvider(cfg map[schema.GroupResource]DialConfig, exchangeClient authn.TokenExchanger) map[schema.GroupResource]ConfigProvider {
configProviders := make(map[schema.GroupResource]ConfigProvider, len(cfg))
for gr, dialConfig := range cfg {
configProviders[gr] = func(ctx context.Context) (*rest.Config, error) {
return &rest.Config{
Host: dialConfig.Host,
WrapTransport: func(rt http.RoundTripper) http.RoundTripper {
return auth.NewRoundTripper(exchangeClient, rt, dialConfig.Audience)
},
TLSClientConfig: rest.TLSClientConfig{
Insecure: dialConfig.Insecure,
CAFile: dialConfig.CAFile,
},
QPS: 50,
Burst: 100,
}, nil
}
}
return configProviders
}
// NewApiParentProvider creates a new ParentProviderImpl with the given config providers and version info.
func NewApiParentProvider(
configProviders map[schema.GroupResource]ConfigProvider,
version map[schema.GroupResource]string,
) *ParentProviderImpl {
return &ParentProviderImpl{
configProviders: configProviders,
versions: version,
dynamicClientFactory: func(config *rest.Config) (dynamic.Interface, error) {
return dynamic.NewForConfig(config)
},
clients: make(map[schema.GroupResource]dynamic.Interface),
}
}
func (p *ParentProviderImpl) HasParent(gr schema.GroupResource) bool {
_, ok := p.configProviders[gr]
return ok
}
func (p *ParentProviderImpl) getClient(ctx context.Context, gr schema.GroupResource) (dynamic.Interface, error) {
p.clientsMu.Lock()
client, ok := p.clients[gr]
p.clientsMu.Unlock()
if ok {
return client, nil
}
provider, ok := p.configProviders[gr]
if !ok {
return nil, fmt.Errorf("%w: %s", ErrNoConfigProvider, gr.String())
}
restConfig, err := provider(ctx)
if err != nil {
return nil, err
}
client, err = p.dynamicClientFactory(restConfig)
if err != nil {
return nil, err
}
p.clientsMu.Lock()
p.clients[gr] = client
p.clientsMu.Unlock()
return client, nil
}
func (p *ParentProviderImpl) GetParent(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error) {
client, err := p.getClient(ctx, gr)
if err != nil {
return "", err
}
version, ok := p.versions[gr]
if !ok {
return "", fmt.Errorf("%w: %s", ErrNoVersionInfo, gr.String())
}
resourceClient := client.Resource(schema.GroupVersionResource{
Group: gr.Group,
Resource: gr.Resource,
Version: version,
}).Namespace(namespace)
unstructObj, err := resourceClient.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return "", err
}
return unstructObj.GetAnnotations()[utils.AnnoKeyFolder], nil
}

View File

@@ -1,198 +0,0 @@
package authorizer
import (
"context"
"errors"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
folderv1 "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1"
"github.com/grafana/grafana/pkg/apimachinery/utils"
)
var configProvider = func(ctx context.Context) (*rest.Config, error) {
return &rest.Config{}, nil
}
func TestParentProviderImpl_GetParent(t *testing.T) {
tests := []struct {
name string
gr schema.GroupResource
namespace string
resourceName string
parentFolder string
setupFake func(*fakeDynamicClient, *fakeResourceInterface)
configProviders map[schema.GroupResource]ConfigProvider
versions map[schema.GroupResource]string
expectedError string
expectedParent string
}{
{
name: "successfully get parent folder",
gr: schema.GroupResource{Group: folderv1.GROUP, Resource: folderv1.RESOURCE},
namespace: "org-1",
resourceName: "dash1",
parentFolder: "fold1",
setupFake: func(fakeClient *fakeDynamicClient, fakeResource *fakeResourceInterface) {
fakeClient.resourceInterface = fakeResource
fakeResource.getFunc = func(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
obj := &unstructured.Unstructured{}
obj.SetAnnotations(map[string]string{utils.AnnoKeyFolder: "fold1"})
return obj, nil
}
},
configProviders: map[schema.GroupResource]ConfigProvider{
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: configProvider,
},
versions: Versions,
expectedParent: "fold1",
},
{
name: "resource without parent annotation returns empty",
gr: schema.GroupResource{Group: folderv1.GROUP, Resource: folderv1.RESOURCE},
namespace: "org-1",
resourceName: "dash1",
setupFake: func(fakeClient *fakeDynamicClient, fakeResource *fakeResourceInterface) {
fakeClient.resourceInterface = fakeResource
fakeResource.getFunc = func(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
obj := &unstructured.Unstructured{}
obj.SetAnnotations(map[string]string{})
return obj, nil
}
},
configProviders: map[schema.GroupResource]ConfigProvider{
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: configProvider,
},
versions: Versions,
expectedParent: "",
},
{
name: "no config provider returns error",
gr: schema.GroupResource{Group: "unknown.group", Resource: "unknown"},
namespace: "org-1",
resourceName: "resource-1",
configProviders: map[schema.GroupResource]ConfigProvider{},
versions: Versions,
expectedError: ErrNoConfigProvider.Error(),
},
{
name: "config provider returns error",
gr: schema.GroupResource{Group: folderv1.GROUP, Resource: folderv1.RESOURCE},
namespace: "org-1",
resourceName: "resource-1",
configProviders: map[schema.GroupResource]ConfigProvider{
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: func(ctx context.Context) (*rest.Config, error) {
return nil, errors.New("config provider error")
},
},
versions: Versions,
expectedError: "config provider error",
},
{
name: "no version info returns error",
gr: schema.GroupResource{Group: folderv1.GROUP, Resource: folderv1.RESOURCE},
namespace: "org-1",
resourceName: "resource-1",
configProviders: map[schema.GroupResource]ConfigProvider{
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: func(ctx context.Context) (*rest.Config, error) {
return &rest.Config{}, nil
},
},
versions: map[schema.GroupResource]string{},
expectedError: ErrNoVersionInfo.Error(),
},
{
name: "resource get returns error",
gr: schema.GroupResource{Group: folderv1.GROUP, Resource: folderv1.RESOURCE},
namespace: "org-1",
resourceName: "resource-1",
setupFake: func(fakeClient *fakeDynamicClient, fakeResource *fakeResourceInterface) {
fakeClient.resourceInterface = fakeResource
fakeResource.getFunc = func(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
return nil, errors.New("resource not found")
}
},
configProviders: map[schema.GroupResource]ConfigProvider{
{Group: folderv1.GROUP, Resource: folderv1.RESOURCE}: func(ctx context.Context) (*rest.Config, error) {
return &rest.Config{}, nil
},
},
versions: Versions,
expectedError: "resource not found",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fakeClient := &fakeDynamicClient{}
fakeResource := &fakeResourceInterface{}
if tt.setupFake != nil {
tt.setupFake(fakeClient, fakeResource)
}
provider := &ParentProviderImpl{
configProviders: tt.configProviders,
versions: tt.versions,
dynamicClientFactory: func(config *rest.Config) (dynamic.Interface, error) {
return fakeClient, nil
},
clients: make(map[schema.GroupResource]dynamic.Interface),
}
parent, err := provider.GetParent(context.Background(), tt.gr, tt.namespace, tt.resourceName)
if tt.expectedError != "" {
require.Error(t, err)
assert.Contains(t, err.Error(), tt.expectedError)
assert.Empty(t, parent)
} else {
require.NoError(t, err)
assert.Equal(t, tt.expectedParent, parent)
}
})
}
}
// fakeDynamicClient is a fake implementation of dynamic.Interface
type fakeDynamicClient struct {
resourceInterface dynamic.ResourceInterface
}
func (f *fakeDynamicClient) Resource(resource schema.GroupVersionResource) dynamic.NamespaceableResourceInterface {
return &fakeNamespaceableResourceInterface{
resourceInterface: f.resourceInterface,
}
}
// fakeNamespaceableResourceInterface is a fake implementation of dynamic.NamespaceableResourceInterface
type fakeNamespaceableResourceInterface struct {
dynamic.NamespaceableResourceInterface
resourceInterface dynamic.ResourceInterface
}
func (f *fakeNamespaceableResourceInterface) Namespace(namespace string) dynamic.ResourceInterface {
if f.resourceInterface != nil {
return f.resourceInterface
}
return &fakeResourceInterface{}
}
// fakeResourceInterface is a fake implementation of dynamic.ResourceInterface
type fakeResourceInterface struct {
dynamic.ResourceInterface
getFunc func(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error)
}
func (f *fakeResourceInterface) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
if f.getFunc != nil {
return f.getFunc(ctx, name, opts, subresources...)
}
return &unstructured.Unstructured{}, nil
}

View File

@@ -10,44 +10,24 @@ import (
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/services/apiserver/auth/authorizer/storewrapper"
)
// TODO: Logs, Metrics, Traces?
// ParentProvider interface for fetching parent information of resources
type ParentProvider interface {
// HasParent checks if the given GroupResource has a parent folder
HasParent(gr schema.GroupResource) bool
// GetParent fetches the parent folder name for the given resource
GetParent(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error)
}
// ResourcePermissionsAuthorizer
type ResourcePermissionsAuthorizer struct {
accessClient types.AccessClient
parentProvider ParentProvider
logger log.Logger
accessClient types.AccessClient
}
var _ storewrapper.ResourceStorageAuthorizer = (*ResourcePermissionsAuthorizer)(nil)
func NewResourcePermissionsAuthorizer(
accessClient types.AccessClient,
parentProvider ParentProvider,
) *ResourcePermissionsAuthorizer {
func NewResourcePermissionsAuthorizer(accessClient types.AccessClient) *ResourcePermissionsAuthorizer {
return &ResourcePermissionsAuthorizer{
accessClient: accessClient,
parentProvider: parentProvider,
logger: log.New("iam.resource-permissions-authorizer"),
accessClient: accessClient,
}
}
func isAccessPolicy(authInfo types.AuthInfo) bool {
return types.IsIdentityType(authInfo.GetIdentityType(), types.TypeAccessPolicy)
}
// AfterGet implements ResourceStorageAuthorizer.
func (r *ResourcePermissionsAuthorizer) AfterGet(ctx context.Context, obj runtime.Object) error {
authInfo, ok := types.AuthInfoFrom(ctx)
@@ -57,24 +37,9 @@ func (r *ResourcePermissionsAuthorizer) AfterGet(ctx context.Context, obj runtim
switch o := obj.(type) {
case *iamv0.ResourcePermission:
target := o.Spec.Resource
targetGR := schema.GroupResource{Group: target.ApiGroup, Resource: target.Resource}
// TODO: Fetch the resource to retrieve its parent folder.
parent := ""
// Fetch the parent of the resource
// Access Policies have global scope, so no parent check needed
if !isAccessPolicy(authInfo) && r.parentProvider.HasParent(targetGR) {
p, err := r.parentProvider.GetParent(ctx, targetGR, o.Namespace, target.Name)
if err != nil {
r.logger.Error("after get: error fetching parent", "error", err.Error(),
"namespace", o.Namespace,
"group", target.ApiGroup,
"resource", target.Resource,
"name", target.Name,
)
return err
}
parent = p
}
checkReq := types.CheckRequest{
Namespace: o.Namespace,
@@ -107,24 +72,9 @@ func (r *ResourcePermissionsAuthorizer) beforeWrite(ctx context.Context, obj run
switch o := obj.(type) {
case *iamv0.ResourcePermission:
target := o.Spec.Resource
targetGR := schema.GroupResource{Group: target.ApiGroup, Resource: target.Resource}
// TODO: Fetch the resource to retrieve its parent folder.
parent := ""
// Fetch the parent of the resource
// Access Policies have global scope, so no parent check needed
if !isAccessPolicy(authInfo) && r.parentProvider.HasParent(targetGR) {
p, err := r.parentProvider.GetParent(ctx, targetGR, o.Namespace, target.Name)
if err != nil {
r.logger.Error("before write: error fetching parent", "error", err.Error(),
"namespace", o.Namespace,
"group", target.ApiGroup,
"resource", target.Resource,
"name", target.Name,
)
return err
}
parent = p
}
checkReq := types.CheckRequest{
Namespace: o.Namespace,
@@ -203,29 +153,8 @@ func (r *ResourcePermissionsAuthorizer) FilterList(ctx context.Context, list run
canViewFuncs[gr] = canView
}
target := item.Spec.Resource
targetGR := schema.GroupResource{Group: target.ApiGroup, Resource: target.Resource}
// TODO : Fetch the resource to retrieve its parent folder.
parent := ""
// Fetch the parent of the resource
// It's not efficient to do for every item in the list, but it's a good starting point.
// Access Policies have global scope, so no parent check needed
if !isAccessPolicy(authInfo) && r.parentProvider.HasParent(targetGR) {
p, err := r.parentProvider.GetParent(ctx, targetGR, item.Namespace, target.Name)
if err != nil {
// Skip item on error fetching parent
r.logger.Warn("filter list: error fetching parent, skipping item",
"error", err.Error(),
"namespace",
item.Namespace,
"group", target.ApiGroup,
"resource", target.Resource,
"name", target.Name,
)
continue
}
parent = p
}
allowed := canView(item.Spec.Resource.Name, parent)
if allowed {

View File

@@ -5,15 +5,13 @@ import (
"testing"
"github.com/go-jose/go-jose/v4/jwt"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/grafana/authlib/authn"
"github.com/grafana/authlib/types"
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var (
@@ -65,7 +63,6 @@ func TestResourcePermissions_AfterGet(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
parent := "fold-1"
checkFunc := func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error) {
require.NotNil(t, id)
// Check is called with the user's identity
@@ -77,18 +74,12 @@ func TestResourcePermissions_AfterGet(t *testing.T) {
require.Equal(t, fold1.Spec.Resource.Resource, req.Resource)
require.Equal(t, fold1.Spec.Resource.Name, req.Name)
require.Equal(t, utils.VerbGetPermissions, req.Verb)
require.Equal(t, parent, folder)
return types.CheckResponse{Allowed: tt.shouldAllow}, nil
}
getParentFunc := func(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error) {
// For this test, we can return a fixed parent folder ID
return parent, nil
}
accessClient := &fakeAccessClient{checkFunc: checkFunc}
fakeParentProvider := &fakeParentProvider{hasParent: true, getParentFunc: getParentFunc}
resPermAuthz := NewResourcePermissionsAuthorizer(accessClient, fakeParentProvider)
resPermAuthz := NewResourcePermissionsAuthorizer(accessClient)
ctx := types.WithAuthInfo(context.Background(), user)
err := resPermAuthz.AfterGet(ctx, fold1)
@@ -98,7 +89,6 @@ func TestResourcePermissions_AfterGet(t *testing.T) {
require.Error(t, err, "expected error for denied access")
}
require.True(t, accessClient.checkCalled, "accessClient.Check should be called")
require.True(t, fakeParentProvider.getParentCalled, "parentProvider.GetParent should be called")
})
}
}
@@ -131,32 +121,23 @@ func TestResourcePermissions_FilterList(t *testing.T) {
require.Equal(t, "dashboards", req.Resource)
}
// Return a checker that allows access to fold-1 and its content
// Return a checker that allows only specific resources: fold-1 and dash-2
return func(name, folder string) bool {
if name == "fold-1" || folder == "fold-1" {
if name == "fold-1" || name == "dash-2" {
return true
}
return false
}, &types.NoopZookie{}, nil
}
getParentFunc := func(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error) {
if name == "dash-2" {
return "fold-1", nil
}
return "", nil
}
accessClient := &fakeAccessClient{compileFunc: compileFunc}
fakeParentProvider := &fakeParentProvider{hasParent: true, getParentFunc: getParentFunc}
resPermAuthz := NewResourcePermissionsAuthorizer(accessClient, fakeParentProvider)
resPermAuthz := NewResourcePermissionsAuthorizer(accessClient)
ctx := types.WithAuthInfo(context.Background(), user)
obj, err := resPermAuthz.FilterList(ctx, list)
require.NoError(t, err)
require.NotNil(t, list)
require.True(t, accessClient.compileCalled, "accessClient.Compile should be called")
require.True(t, fakeParentProvider.getParentCalled, "parentProvider.GetParent should be called")
filtered, ok := obj.(*iamv0.ResourcePermissionList)
require.True(t, ok, "response should be of type ResourcePermissionList")
@@ -184,7 +165,6 @@ func TestResourcePermissions_beforeWrite(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
parent := "fold-1"
checkFunc := func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error) {
require.NotNil(t, id)
// Check is called with the user's identity
@@ -196,18 +176,12 @@ func TestResourcePermissions_beforeWrite(t *testing.T) {
require.Equal(t, fold1.Spec.Resource.Resource, req.Resource)
require.Equal(t, fold1.Spec.Resource.Name, req.Name)
require.Equal(t, utils.VerbSetPermissions, req.Verb)
require.Equal(t, parent, folder)
return types.CheckResponse{Allowed: tt.shouldAllow}, nil
}
getParentFunc := func(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error) {
return parent, nil
}
accessClient := &fakeAccessClient{checkFunc: checkFunc}
fakeParentProvider := &fakeParentProvider{hasParent: true, getParentFunc: getParentFunc}
resPermAuthz := NewResourcePermissionsAuthorizer(accessClient, fakeParentProvider)
resPermAuthz := NewResourcePermissionsAuthorizer(accessClient)
ctx := types.WithAuthInfo(context.Background(), user)
err := resPermAuthz.beforeWrite(ctx, fold1)
@@ -217,7 +191,6 @@ func TestResourcePermissions_beforeWrite(t *testing.T) {
require.Error(t, err, "expected error for denied delete")
}
require.True(t, accessClient.checkCalled, "accessClient.Check should be called")
require.True(t, fakeParentProvider.getParentCalled, "parentProvider.GetParent should be called")
})
}
}
@@ -241,18 +214,3 @@ func (m *fakeAccessClient) Compile(ctx context.Context, id types.AuthInfo, req t
}
var _ types.AccessClient = (*fakeAccessClient)(nil)
type fakeParentProvider struct {
hasParent bool
getParentCalled bool
getParentFunc func(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error)
}
func (f *fakeParentProvider) HasParent(gr schema.GroupResource) bool {
return f.hasParent
}
func (f *fakeParentProvider) GetParent(ctx context.Context, gr schema.GroupResource, namespace, name string) (string, error) {
f.getParentCalled = true
return f.getParentFunc(ctx, gr, namespace, name)
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/grafana/authlib/types"
"github.com/grafana/grafana/pkg/infra/log"
iamauthorizer "github.com/grafana/grafana/pkg/registry/apis/iam/authorizer"
"github.com/grafana/grafana/pkg/registry/apis/iam/externalgroupmapping"
"github.com/grafana/grafana/pkg/registry/apis/iam/legacy"
"github.com/grafana/grafana/pkg/registry/apis/iam/serviceaccount"
@@ -61,10 +60,6 @@ type IdentityAccessManagementAPIBuilder struct {
roleBindingsStorage RoleBindingStorageBackend
externalGroupMappingStorage ExternalGroupMappingStorageBackend
// Required for resource permissions authorization
// fetches resources parent folders
resourceParentProvider iamauthorizer.ParentProvider
// Access Control
authorizer authorizer.Authorizer
// legacyAccessClient is used for the identity apis, we need to migrate to the access client
@@ -82,11 +77,10 @@ type IdentityAccessManagementAPIBuilder struct {
reg prometheus.Registerer
logger log.Logger
dual dualwrite.Service
unified resource.ResourceClient
userSearchClient resourcepb.ResourceIndexClient
userSearchHandler *user.SearchHandler
teamSearch *TeamSearchHandler
dual dualwrite.Service
unified resource.ResourceClient
userSearchClient resourcepb.ResourceIndexClient
teamSearch *TeamSearchHandler
teamGroupsHandler externalgroupmapping.TeamGroupsHandler

View File

@@ -41,13 +41,11 @@ import (
"github.com/grafana/grafana/pkg/registry/apis/iam/teambinding"
"github.com/grafana/grafana/pkg/registry/apis/iam/user"
"github.com/grafana/grafana/pkg/services/accesscontrol"
"github.com/grafana/grafana/pkg/services/apiserver"
gfauthorizer "github.com/grafana/grafana/pkg/services/apiserver/auth/authorizer"
"github.com/grafana/grafana/pkg/services/apiserver/auth/authorizer/storewrapper"
"github.com/grafana/grafana/pkg/services/apiserver/builder"
"github.com/grafana/grafana/pkg/services/authz/zanzana"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/services/ssosettings"
teamservice "github.com/grafana/grafana/pkg/services/team"
legacyuser "github.com/grafana/grafana/pkg/services/user"
@@ -78,10 +76,8 @@ func RegisterAPIService(
teamGroupsHandlerImpl externalgroupmapping.TeamGroupsHandler,
dual dualwrite.Service,
unified resource.ResourceClient,
orgService org.Service,
userService legacyuser.Service,
teamService teamservice.Service,
restConfig apiserver.RestConfigProvider,
) (*IdentityAccessManagementAPIBuilder, error) {
dbProvider := legacysql.NewDatabaseProvider(sql)
store := legacy.NewLegacySQLStores(dbProvider)
@@ -92,11 +88,6 @@ func RegisterAPIService(
//nolint:staticcheck // not yet migrated to OpenFeature
enableAuthnMutation := features.IsEnabledGlobally(featuremgmt.FlagKubernetesAuthnMutation)
resourceParentProvider := iamauthorizer.NewApiParentProvider(
iamauthorizer.NewLocalConfigProvider(restConfig.GetRestConfig),
iamauthorizer.Versions,
)
builder := &IdentityAccessManagementAPIBuilder{
store: store,
userLegacyStore: user.NewLegacyStore(store, accessClient, enableAuthnMutation, tracing),
@@ -111,7 +102,6 @@ func RegisterAPIService(
externalGroupMappingStorage: externalGroupMappingStorageBackend,
teamGroupsHandler: teamGroupsHandlerImpl,
sso: ssoService,
resourceParentProvider: resourceParentProvider,
authorizer: authorizer,
legacyAccessClient: legacyAccessClient,
accessClient: accessClient,
@@ -124,11 +114,9 @@ func RegisterAPIService(
dual: dual,
unified: unified,
userSearchClient: resource.NewSearchClient(dualwrite.NewSearchAdapter(dual), iamv0.UserResourceInfo.GroupResource(),
unified, user.NewUserLegacySearchClient(orgService, tracing, cfg), features),
unified, user.NewUserLegacySearchClient(userService, tracing), features),
teamSearch: NewTeamSearchHandler(tracing, dual, team.NewLegacyTeamSearchClient(teamService), unified, features),
}
builder.userSearchHandler = user.NewSearchHandler(tracing, builder.userSearchClient, features, cfg)
apiregistration.RegisterAPI(builder)
return builder, nil
@@ -150,12 +138,6 @@ func NewAPIService(
resourceAuthorizer := gfauthorizer.NewResourceAuthorizer(accessClient)
coreRoleAuthorizer := iamauthorizer.NewCoreRoleAuthorizer(accessClient)
// TODO: in a follow up PR, make this configurable
resourceParentProvider := iamauthorizer.NewApiParentProvider(
iamauthorizer.NewRemoteConfigProvider(map[schema.GroupResource]iamauthorizer.DialConfig{}, nil),
iamauthorizer.Versions,
)
return &IdentityAccessManagementAPIBuilder{
store: store,
display: user.NewLegacyDisplayREST(store),
@@ -166,7 +148,6 @@ func NewAPIService(
logger: log.New("iam.apis"),
features: features,
accessClient: accessClient,
resourceParentProvider: resourceParentProvider,
zClient: zClient,
zTickets: make(chan bool, MaxConcurrentZanzanaWrites),
reg: reg,
@@ -459,7 +440,7 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateResourcePermissionsAPIGroup(
return fmt.Errorf("expected RegistryStoreDualWrite, got %T", dw)
}
authzWrapper := storewrapper.New(regStoreDW, iamauthorizer.NewResourcePermissionsAuthorizer(b.accessClient, b.resourceParentProvider))
authzWrapper := storewrapper.New(regStoreDW, iamauthorizer.NewResourcePermissionsAuthorizer(b.accessClient))
storage[iamv0.ResourcePermissionInfo.StoragePath()] = authzWrapper
return nil
@@ -529,18 +510,10 @@ func (b *IdentityAccessManagementAPIBuilder) PostProcessOpenAPI(oas *spec3.OpenA
func (b *IdentityAccessManagementAPIBuilder) GetAPIRoutes(gv schema.GroupVersion) *builder.APIRoutes {
defs := b.GetOpenAPIDefinitions()(func(path string) spec.Ref { return spec.Ref{} })
searchRoutes := make([]*builder.APIRoutes, 0, 2)
if b.userSearchHandler != nil {
searchRoutes = append(searchRoutes, b.userSearchHandler.GetAPIRoutes(defs))
}
routes := b.teamSearch.GetAPIRoutes(defs)
routes.Namespace = append(routes.Namespace, b.display.GetAPIRoutes(defs).Namespace...)
if b.teamSearch != nil {
searchRoutes = append(searchRoutes, b.teamSearch.GetAPIRoutes(defs))
}
routes := []*builder.APIRoutes{b.display.GetAPIRoutes(defs)}
routes = append(routes, searchRoutes...)
return mergeAPIRoutes(routes...)
return routes
}
func (b *IdentityAccessManagementAPIBuilder) GetAuthorizer() authorizer.Authorizer {
@@ -648,15 +621,3 @@ func NewLocalStore(resourceInfo utils.ResourceInfo, scheme *runtime.Scheme, defa
store, err := grafanaregistry.NewRegistryStore(scheme, resourceInfo, optsGetter)
return store, err
}
func mergeAPIRoutes(routes ...*builder.APIRoutes) *builder.APIRoutes {
merged := &builder.APIRoutes{}
for _, r := range routes {
if r == nil {
continue
}
merged.Root = append(merged.Root, r.Root...)
merged.Namespace = append(merged.Namespace, r.Namespace...)
}
return merged
}

View File

@@ -2,22 +2,16 @@ package user
import (
"context"
"encoding/binary"
"fmt"
"log/slog"
"math"
"regexp"
"sort"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/services/search/model"
"github.com/grafana/grafana/pkg/services/searchusers/sortopts"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/grafana/grafana/pkg/services/user"
res "github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
"github.com/grafana/grafana/pkg/storage/unified/search/builders"
)
@@ -27,36 +21,28 @@ const (
UserResourceGroup = "iam.grafana.com"
)
var (
_ resourcepb.ResourceIndexClient = (*UserLegacySearchClient)(nil)
fieldLogin = fmt.Sprintf("%s%s", resource.SEARCH_FIELD_PREFIX, builders.USER_LOGIN)
fieldEmail = fmt.Sprintf("%s%s", resource.SEARCH_FIELD_PREFIX, builders.USER_EMAIL)
fieldLastSeenAt = fmt.Sprintf("%s%s", resource.SEARCH_FIELD_PREFIX, builders.USER_LAST_SEEN_AT)
fieldRole = fmt.Sprintf("%s%s", resource.SEARCH_FIELD_PREFIX, builders.USER_ROLE)
wildcardsMatcher = regexp.MustCompile(`[\*\?\\]`)
)
var _ resourcepb.ResourceIndexClient = (*UserLegacySearchClient)(nil)
// UserLegacySearchClient is a client for searching for users in the legacy search engine.
type UserLegacySearchClient struct {
resourcepb.ResourceIndexClient
orgService org.Service
log *slog.Logger
tracer trace.Tracer
cfg *setting.Cfg
userService user.Service
log *slog.Logger
tracer trace.Tracer
}
// NewUserLegacySearchClient creates a new UserLegacySearchClient.
func NewUserLegacySearchClient(orgService org.Service, tracer trace.Tracer, cfg *setting.Cfg) *UserLegacySearchClient {
func NewUserLegacySearchClient(userService user.Service, tracer trace.Tracer) *UserLegacySearchClient {
return &UserLegacySearchClient{
orgService: orgService,
log: slog.Default().With("logger", "legacy-user-search-client"),
tracer: tracer,
cfg: cfg,
userService: userService,
log: slog.Default().With("logger", "legacy-user-search-client"),
tracer: tracer,
}
}
// Search searches for users in the legacy search engine.
// It only supports exact matching for title, login, or email.
// FIXME: This implementation only supports a single field query and will be extended in the future.
func (c *UserLegacySearchClient) Search(ctx context.Context, req *resourcepb.ResourceSearchRequest, _ ...grpc.CallOption) (*resourcepb.ResourceSearchResponse, error) {
ctx, span := c.tracer.Start(ctx, "user.Search")
defer span.End()
@@ -66,30 +52,21 @@ func (c *UserLegacySearchClient) Search(ctx context.Context, req *resourcepb.Res
return nil, err
}
if req.Limit > maxLimit {
req.Limit = maxLimit
if req.Limit > 100 {
req.Limit = 100
}
if req.Limit <= 0 {
req.Limit = 30
req.Limit = 1
}
if req.Page > math.MaxInt32 || req.Page < 0 {
return nil, fmt.Errorf("invalid page number: %d", req.Page)
}
if req.Page < 1 {
req.Page = 1
}
legacySortOptions := convertToSortOptions(req.SortBy)
query := &org.SearchOrgUsersQuery{
OrgID: signedInUser.GetOrgID(),
Limit: int(req.Limit),
Page: int(req.Page),
SortOpts: legacySortOptions,
User: signedInUser,
query := &user.SearchUsersQuery{
SignedInUser: signedInUser,
Limit: int(req.Limit),
Page: int(req.Page),
}
var title, login, email string
@@ -99,15 +76,19 @@ func (c *UserLegacySearchClient) Search(ctx context.Context, req *resourcepb.Res
c.log.Warn("only single value fields are supported for legacy search, using first value", "field", field.Key, "values", vals)
}
switch field.Key {
case resource.SEARCH_FIELD_TITLE:
case res.SEARCH_FIELD_TITLE:
title = vals[0]
case fieldLogin:
case "fields.login":
login = vals[0]
case fieldEmail:
case "fields.email":
email = vals[0]
}
}
if title == "" && login == "" && email == "" {
return nil, fmt.Errorf("at least one of title, login, or email must be provided for the query")
}
// The user store's Search method combines these into an OR.
// For legacy search we can only supply one.
if title != "" {
@@ -118,35 +99,20 @@ func (c *UserLegacySearchClient) Search(ctx context.Context, req *resourcepb.Res
query.Query = email
}
// Unified search `query` has wildcards, but legacy search does not support them.
// We have to remove them here to make legacy search work as expected with SQL LIKE queries.
if req.Query != "" {
query.Query = wildcardsMatcher.ReplaceAllString(req.Query, "")
}
fields := req.Fields
if len(fields) == 0 {
fields = []string{resource.SEARCH_FIELD_TITLE, fieldEmail, fieldLogin, fieldLastSeenAt, fieldRole}
}
columns := getColumns(fields)
columns := getColumns(req.Fields)
list := &resourcepb.ResourceSearchResponse{
Results: &resourcepb.ResourceTable{
Columns: columns,
},
}
res, err := c.orgService.SearchOrgUsers(ctx, query)
res, err := c.userService.Search(ctx, query)
if err != nil {
return nil, err
}
for _, u := range res.OrgUsers {
if c.isHiddenUser(u.Login, signedInUser) {
continue
}
cells := createCells(u, req.Fields)
for _, u := range res.Users {
cells := createBaseCells(u, req.Fields)
list.Results.Rows = append(list.Results.Rows, &resourcepb.ResourceTableRow{
Key: getResourceKey(u, req.Options.Key.Namespace),
Cells: cells,
@@ -157,19 +123,7 @@ func (c *UserLegacySearchClient) Search(ctx context.Context, req *resourcepb.Res
return list, nil
}
func (c *UserLegacySearchClient) isHiddenUser(login string, signedInUser identity.Requester) bool {
if login == "" || signedInUser.GetIsGrafanaAdmin() || login == signedInUser.GetUsername() {
return false
}
if _, hidden := c.cfg.HiddenUsers[login]; hidden {
return true
}
return false
}
func getResourceKey(item *org.OrgUserDTO, namespace string) *resourcepb.ResourceKey {
func getResourceKey(item *user.UserSearchHitDTO, namespace string) *resourcepb.ResourceKey {
return &resourcepb.ResourceKey{
Namespace: namespace,
Group: UserResourceGroup,
@@ -179,74 +133,42 @@ func getResourceKey(item *org.OrgUserDTO, namespace string) *resourcepb.Resource
}
func getColumns(fields []string) []*resourcepb.ResourceTableColumnDefinition {
cols := make([]*resourcepb.ResourceTableColumnDefinition, 0, len(fields))
standardSearchFields := resource.StandardSearchFields()
columns := defaultColumns()
for _, field := range fields {
switch field {
case resource.SEARCH_FIELD_TITLE:
cols = append(cols, standardSearchFields.Field(resource.SEARCH_FIELD_TITLE))
case fieldLastSeenAt:
cols = append(cols, builders.UserTableColumnDefinitions[builders.USER_LAST_SEEN_AT])
case fieldRole:
cols = append(cols, builders.UserTableColumnDefinitions[builders.USER_ROLE])
case fieldEmail:
cols = append(cols, builders.UserTableColumnDefinitions[builders.USER_EMAIL])
case fieldLogin:
cols = append(cols, builders.UserTableColumnDefinitions[builders.USER_LOGIN])
case "email":
columns = append(columns, builders.UserTableColumnDefinitions[builders.USER_EMAIL])
case "login":
columns = append(columns, builders.UserTableColumnDefinitions[builders.USER_LOGIN])
}
}
return cols
return columns
}
func createCells(u *org.OrgUserDTO, fields []string) [][]byte {
cells := make([][]byte, 0, len(fields))
func createBaseCells(u *user.UserSearchHitDTO, fields []string) [][]byte {
cells := createDefaultCells(u)
for _, field := range fields {
switch field {
case resource.SEARCH_FIELD_TITLE:
cells = append(cells, []byte(u.Name))
case fieldEmail:
case "email":
cells = append(cells, []byte(u.Email))
case fieldLogin:
case "login":
cells = append(cells, []byte(u.Login))
case fieldLastSeenAt:
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(u.LastSeenAt.Unix()))
cells = append(cells, b)
case fieldRole:
cells = append(cells, []byte(u.Role))
}
}
return cells
}
func convertToSortOptions(sortBy []*resourcepb.ResourceSearchRequest_Sort) []model.SortOption {
opts := []model.SortOption{}
for _, s := range sortBy {
field := s.Field
// Handle mapping if necessary
switch field {
case fieldLastSeenAt:
field = "lastSeenAtAge"
case resource.SEARCH_FIELD_TITLE:
field = "name"
case fieldLogin:
field = "login"
case fieldEmail:
field = "email"
}
suffix := "asc"
if s.Desc {
suffix = "desc"
}
key := fmt.Sprintf("%s-%s", field, suffix)
if opt, ok := sortopts.SortOptionsByQueryParam[key]; ok {
opts = append(opts, opt)
}
func createDefaultCells(u *user.UserSearchHitDTO) [][]byte {
return [][]byte{
[]byte(u.UID),
[]byte(u.Name),
}
}
func defaultColumns() []*resourcepb.ResourceTableColumnDefinition {
searchFields := res.StandardSearchFields()
return []*resourcepb.ResourceTableColumnDefinition{
searchFields.Field(res.SEARCH_FIELD_NAME),
searchFields.Field(res.SEARCH_FIELD_TITLE),
}
sort.Slice(opts, func(i, j int) bool {
return opts[i].Index < opts[j].Index || (opts[i].Index == opts[j].Index && opts[i].Name < opts[j].Name)
})
return opts
}

View File

@@ -5,7 +5,7 @@ import (
"google.golang.org/grpc"
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
)
@@ -13,7 +13,7 @@ import (
type FakeUserLegacySearchClient struct {
resourcepb.ResourceIndexClient
SearchFunc func(ctx context.Context, req *resourcepb.ResourceSearchRequest, opts ...grpc.CallOption) (*resourcepb.ResourceSearchResponse, error)
Users []*org.OrgUserDTO
Users []*user.UserSearchHitDTO
}
// Search calls the underlying SearchFunc or simulates a search over the Users slice.
@@ -23,7 +23,7 @@ func (c *FakeUserLegacySearchClient) Search(ctx context.Context, req *resourcepb
}
// Basic filtering for testing purposes
var filteredUsers []*org.OrgUserDTO
var filteredUsers []*user.UserSearchHitDTO
var queryValue string
for _, field := range req.Options.Fields {
@@ -43,7 +43,7 @@ func (c *FakeUserLegacySearchClient) Search(ctx context.Context, req *resourcepb
for _, u := range filteredUsers {
rows = append(rows, &resourcepb.ResourceTableRow{
Key: getResourceKey(u, req.Options.Key.Namespace),
Cells: createCells(u, req.Fields),
Cells: createBaseCells(u, req.Fields),
})
}

View File

@@ -9,15 +9,29 @@ import (
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/services/org"
"github.com/grafana/grafana/pkg/services/org/orgtest"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/services/user/usertest"
res "github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
)
func TestUserLegacySearchClient_Search(t *testing.T) {
t.Run("should return error if no query fields are provided", func(t *testing.T) {
mockUserService := usertest.NewMockService(t)
client := NewUserLegacySearchClient(mockUserService, tracing.NewNoopTracerService())
ctx := identity.WithRequester(context.Background(), &user.SignedInUser{OrgID: 1, UserID: 1})
req := &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{
Key: &resourcepb.ResourceKey{Namespace: "default"},
},
}
_, err := client.Search(ctx, req)
require.Error(t, err)
require.Equal(t, "at least one of title, login, or email must be provided for the query", err.Error())
})
testCases := []struct {
name string
fieldKey string
@@ -52,8 +66,8 @@ func TestUserLegacySearchClient_Search(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
mockOrgService := orgtest.NewMockService(t)
client := NewUserLegacySearchClient(mockOrgService, tracing.NewNoopTracerService(), &setting.Cfg{})
mockUserService := usertest.NewMockService(t)
client := NewUserLegacySearchClient(mockUserService, tracing.NewNoopTracerService())
ctx := identity.WithRequester(context.Background(), &user.SignedInUser{OrgID: 1, UserID: 1})
req := &resourcepb.ResourceSearchRequest{
Limit: 10,
@@ -67,14 +81,14 @@ func TestUserLegacySearchClient_Search(t *testing.T) {
Fields: []string{"email", "login"},
}
mockUsers := []*org.OrgUserDTO{
{UID: "uid1", Name: "Test User 1", Email: "test1@example.com", Login: "testlogin1"},
mockUsers := []*user.UserSearchHitDTO{
{ID: 1, UID: "uid1", Name: "Test User 1", Email: "test1@example.com", Login: "testlogin1"},
}
mockOrgService.On("SearchOrgUsers", mock.Anything, mock.MatchedBy(func(q *org.SearchOrgUsersQuery) bool {
mockUserService.On("Search", mock.Anything, mock.MatchedBy(func(q *user.SearchUsersQuery) bool {
return q.Query == tc.expectedQuery && q.Limit == 10 && q.Page == 1
})).Return(&org.SearchOrgUsersQueryResult{
OrgUsers: mockUsers,
})).Return(&user.SearchUserQueryResult{
Users: mockUsers,
TotalCount: 1,
}, nil)
@@ -99,7 +113,7 @@ func TestUserLegacySearchClient_Search(t *testing.T) {
require.Equal(t, UserResource, row.Key.Resource)
require.Equal(t, u.UID, row.Key.Name)
expectedCells := createCells(&org.OrgUserDTO{
expectedCells := createBaseCells(&user.UserSearchHitDTO{
UID: u.UID,
Name: u.Name,
Email: u.Email,
@@ -111,8 +125,8 @@ func TestUserLegacySearchClient_Search(t *testing.T) {
}
t.Run("title should have precedence over login and email", func(t *testing.T) {
mockOrgService := orgtest.NewMockService(t)
client := NewUserLegacySearchClient(mockOrgService, tracing.NewNoopTracerService(), &setting.Cfg{})
mockUserService := usertest.NewMockService(t)
client := NewUserLegacySearchClient(mockUserService, tracing.NewNoopTracerService())
ctx := identity.WithRequester(context.Background(), &user.SignedInUser{OrgID: 1, UserID: 1})
req := &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{
@@ -125,9 +139,9 @@ func TestUserLegacySearchClient_Search(t *testing.T) {
},
}
mockOrgService.On("SearchOrgUsers", mock.Anything, mock.MatchedBy(func(q *org.SearchOrgUsersQuery) bool {
mockUserService.On("Search", mock.Anything, mock.MatchedBy(func(q *user.SearchUsersQuery) bool {
return q.Query == "title"
})).Return(&org.SearchOrgUsersQueryResult{OrgUsers: []*org.OrgUserDTO{}, TotalCount: 0}, nil)
})).Return(&user.SearchUserQueryResult{Users: []*user.UserSearchHitDTO{}, TotalCount: 0}, nil)
_, err := client.Search(ctx, req)
require.NoError(t, err)

View File

@@ -1,399 +0,0 @@
package user
import (
"encoding/binary"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"net/url"
"regexp"
"slices"
"strconv"
"strings"
"time"
"go.opentelemetry.io/otel/trace"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/kube-openapi/pkg/common"
"k8s.io/kube-openapi/pkg/spec3"
"k8s.io/kube-openapi/pkg/validation/spec"
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/services/apiserver/builder"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
"github.com/grafana/grafana/pkg/storage/unified/search/builders"
"github.com/grafana/grafana/pkg/util"
"github.com/grafana/grafana/pkg/util/errhttp"
)
const maxLimit = 100
type SearchHandler struct {
log *slog.Logger
client resourcepb.ResourceIndexClient
tracer trace.Tracer
features featuremgmt.FeatureToggles
cfg *setting.Cfg
}
func NewSearchHandler(tracer trace.Tracer, searchClient resourcepb.ResourceIndexClient, features featuremgmt.FeatureToggles, cfg *setting.Cfg) *SearchHandler {
return &SearchHandler{
client: searchClient,
log: slog.Default().With("logger", "grafana-apiserver.user.search"),
tracer: tracer,
features: features,
cfg: cfg,
}
}
func (s *SearchHandler) GetAPIRoutes(defs map[string]common.OpenAPIDefinition) *builder.APIRoutes {
searchResults := defs["github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GetSearchUsers"].Schema
return &builder.APIRoutes{
Namespace: []builder.APIRouteHandler{
{
Path: "searchUsers",
Spec: &spec3.PathProps{
Get: &spec3.Operation{
OperationProps: spec3.OperationProps{
Description: "User search",
Tags: []string{"Search"},
OperationId: "getSearchUsers",
Parameters: []*spec3.Parameter{
{
ParameterProps: spec3.ParameterProps{
Name: "namespace",
In: "path",
Required: true,
Example: "default",
Description: "workspace",
Schema: spec.StringProperty(),
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "query",
In: "query",
Required: false,
Schema: spec.StringProperty(),
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "limit",
In: "query",
Description: "number of results to return",
Example: 30,
Required: false,
Schema: spec.Int64Property(),
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "page",
In: "query",
Description: "page number (starting from 1)",
Example: 1,
Required: false,
Schema: spec.Int64Property(),
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "offset",
In: "query",
Description: "number of results to skip",
Example: 0,
Required: false,
Schema: spec.Int64Property(),
},
},
{
ParameterProps: spec3.ParameterProps{
Name: "sort",
In: "query",
Description: "sortable field",
Example: "",
Examples: map[string]*spec3.Example{
"": {
ExampleProps: spec3.ExampleProps{
Summary: "default sorting",
Value: "",
},
},
"title": {
ExampleProps: spec3.ExampleProps{
Summary: "title ascending",
Value: "title",
},
},
"-title": {
ExampleProps: spec3.ExampleProps{
Summary: "title descending",
Value: "-title",
},
},
"lastSeenAt": {
ExampleProps: spec3.ExampleProps{
Summary: "last seen at ascending",
Value: "lastSeenAt",
},
},
"-lastSeenAt": {
ExampleProps: spec3.ExampleProps{
Summary: "last seen at descending",
Value: "-lastSeenAt",
},
},
"email": {
ExampleProps: spec3.ExampleProps{
Summary: "email ascending",
Value: "email",
},
},
"-email": {
ExampleProps: spec3.ExampleProps{
Summary: "email descending",
Value: "-email",
},
},
"login": {
ExampleProps: spec3.ExampleProps{
Summary: "login ascending",
Value: "login",
},
},
"-login": {
ExampleProps: spec3.ExampleProps{
Summary: "login descending",
Value: "-login",
},
},
},
Required: false,
Schema: spec.StringProperty(),
},
},
},
Responses: &spec3.Responses{
ResponsesProps: spec3.ResponsesProps{
Default: &spec3.Response{
ResponseProps: spec3.ResponseProps{
Description: "Default OK response",
Content: map[string]*spec3.MediaType{
"application/json": {
MediaTypeProps: spec3.MediaTypeProps{
Schema: &searchResults,
},
},
},
},
},
},
},
},
},
},
Handler: s.DoSearch,
},
},
}
}
func (s *SearchHandler) DoSearch(w http.ResponseWriter, r *http.Request) {
ctx, span := s.tracer.Start(r.Context(), "user.search")
defer span.End()
queryParams, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
errhttp.Write(ctx, err, w)
return
}
requester, err := identity.GetRequester(ctx)
if err != nil {
errhttp.Write(ctx, fmt.Errorf("no identity found for request: %w", err), w)
return
}
limit := 30
offset := 0
page := 1
if queryParams.Has("limit") {
limit, _ = strconv.Atoi(queryParams.Get("limit"))
}
if queryParams.Has("offset") {
offset, _ = strconv.Atoi(queryParams.Get("offset"))
if offset > 0 && limit > 0 {
page = (offset / limit) + 1
}
} else if queryParams.Has("page") {
page, _ = strconv.Atoi(queryParams.Get("page"))
offset = (page - 1) * limit
}
// Escape characters that are used by bleve wildcard search to be literal strings.
rawQuery := escapeBleveQuery(queryParams.Get("query"))
searchQuery := fmt.Sprintf(`*%s*`, rawQuery)
userGvr := iamv0.UserResourceInfo.GroupResource()
request := &resourcepb.ResourceSearchRequest{
Options: &resourcepb.ListOptions{
Key: &resourcepb.ResourceKey{
Group: userGvr.Group,
Resource: userGvr.Resource,
Namespace: requester.GetNamespace(),
},
},
Query: searchQuery,
Fields: []string{resource.SEARCH_FIELD_TITLE, fieldEmail, fieldLogin, fieldLastSeenAt, fieldRole},
Limit: int64(limit),
Page: int64(page),
Offset: int64(offset),
}
if !requester.GetIsGrafanaAdmin() {
// FIXME: Use the new config service instead of the legacy one
hiddenUsers := []string{}
for user := range s.cfg.HiddenUsers {
if user != requester.GetUsername() {
hiddenUsers = append(hiddenUsers, user)
}
}
if len(hiddenUsers) > 0 {
request.Options.Fields = append(request.Options.Fields, &resourcepb.Requirement{
Key: fieldLogin,
Operator: string(selection.NotIn),
Values: hiddenUsers,
})
}
}
if queryParams.Has("sort") {
for _, sort := range queryParams["sort"] {
currField := sort
desc := false
if strings.HasPrefix(sort, "-") {
currField = sort[1:]
desc = true
}
if slices.Contains(builders.UserSortableExtraFields, currField) {
sort = resource.SEARCH_FIELD_PREFIX + currField
} else {
sort = currField
}
s := &resourcepb.ResourceSearchRequest_Sort{
Field: sort,
Desc: desc,
}
request.SortBy = append(request.SortBy, s)
}
}
resp, err := s.client.Search(ctx, request)
if err != nil {
errhttp.Write(ctx, err, w)
return
}
result, err := ParseResults(resp)
if err != nil {
errhttp.Write(ctx, err, w)
return
}
s.write(w, result)
}
func (s *SearchHandler) write(w http.ResponseWriter, obj any) {
w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(obj); err != nil {
s.log.Error("failed to encode JSON response", "error", err)
}
}
func ParseResults(result *resourcepb.ResourceSearchResponse) (*iamv0.GetSearchUsers, error) {
if result == nil {
return iamv0.NewGetSearchUsers(), nil
} else if result.Error != nil {
return iamv0.NewGetSearchUsers(), fmt.Errorf("%d error searching: %s: %s", result.Error.Code, result.Error.Message, result.Error.Details)
} else if result.Results == nil {
return iamv0.NewGetSearchUsers(), nil
}
titleIDX := -1
emailIDX := -1
loginIDX := -1
lastSeenAtIDX := -1
roleIDX := -1
for i, v := range result.Results.Columns {
switch v.Name {
case resource.SEARCH_FIELD_TITLE:
titleIDX = i
case builders.USER_EMAIL:
emailIDX = i
case builders.USER_LOGIN:
loginIDX = i
case builders.USER_LAST_SEEN_AT:
lastSeenAtIDX = i
case builders.USER_ROLE:
roleIDX = i
}
}
sr := iamv0.NewGetSearchUsers()
sr.TotalHits = result.TotalHits
sr.QueryCost = result.QueryCost
sr.MaxScore = result.MaxScore
sr.Hits = make([]iamv0.UserHit, 0, len(result.Results.Rows))
for _, row := range result.Results.Rows {
if len(row.Cells) != len(result.Results.Columns) {
return iamv0.NewGetSearchUsers(), fmt.Errorf("error parsing user search response: mismatch number of columns and cells")
}
var login string
if loginIDX >= 0 && row.Cells[loginIDX] != nil {
login = string(row.Cells[loginIDX])
}
hit := iamv0.UserHit{
Name: row.Key.Name,
Login: login,
}
if titleIDX >= 0 && row.Cells[titleIDX] != nil {
hit.Title = string(row.Cells[titleIDX])
}
if emailIDX >= 0 && row.Cells[emailIDX] != nil {
hit.Email = string(row.Cells[emailIDX])
}
if roleIDX >= 0 && row.Cells[roleIDX] != nil {
hit.Role = string(row.Cells[roleIDX])
}
if lastSeenAtIDX >= 0 && row.Cells[lastSeenAtIDX] != nil {
if len(row.Cells[lastSeenAtIDX]) == 8 {
hit.LastSeenAt = int64(binary.BigEndian.Uint64(row.Cells[lastSeenAtIDX]))
hit.LastSeenAtAge = util.GetAgeString(time.Unix(hit.LastSeenAt, 0))
}
}
sr.Hits = append(sr.Hits, hit)
}
return sr, nil
}
var bleveEscapeRegex = regexp.MustCompile(`([\\*?])`)
func escapeBleveQuery(query string) string {
return bleveEscapeRegex.ReplaceAllString(query, `\$1`)
}

View File

@@ -1,169 +0,0 @@
package user
import (
"context"
"net/http/httptest"
"testing"
"google.golang.org/grpc"
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/apiserver/rest"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/services/featuremgmt"
legacyuser "github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/legacysql/dualwrite"
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
)
func TestSearchFallback(t *testing.T) {
tests := []struct {
name string
mode rest.DualWriterMode
expectUnified bool
}{
{name: "should hit legacy search handler on mode 0", mode: rest.Mode0, expectUnified: false},
{name: "should hit legacy search handler on mode 1", mode: rest.Mode1, expectUnified: false},
{name: "should hit legacy search handler on mode 2", mode: rest.Mode2, expectUnified: false},
{name: "should hit unified storage search handler on mode 3", mode: rest.Mode3, expectUnified: true},
{name: "should hit unified storage search handler on mode 4", mode: rest.Mode4, expectUnified: true},
{name: "should hit unified storage search handler on mode 5", mode: rest.Mode5, expectUnified: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mockClient := &MockClient{}
mockLegacyClient := &MockClient{}
cfg := &setting.Cfg{
UnifiedStorage: map[string]setting.UnifiedStorageConfig{
"users.iam.grafana.app": {DualWriterMode: tt.mode},
},
}
dual := dualwrite.ProvideStaticServiceForTests(cfg)
searchClient := resource.NewSearchClient(dualwrite.NewSearchAdapter(dual), iamv0.UserResourceInfo.GroupResource(), mockClient, mockLegacyClient, featuremgmt.WithFeatures())
searchHandler := NewSearchHandler(tracing.NewNoopTracerService(), searchClient, featuremgmt.WithFeatures(), cfg)
rr := httptest.NewRecorder()
req := httptest.NewRequest("GET", "/searchUsers", nil)
req.Header.Add("content-type", "application/json")
req = req.WithContext(identity.WithRequester(req.Context(), &legacyuser.SignedInUser{Namespace: "test"}))
searchHandler.DoSearch(rr, req)
if tt.expectUnified {
if mockClient.LastSearchRequest == nil {
t.Fatalf("expected Unified Search to be called, but it was not")
}
} else {
if mockLegacyClient.LastSearchRequest == nil {
t.Fatalf("expected Legacy Search to be called, but it was not")
}
}
})
}
}
// MockClient implements the ResourceIndexClient interface for testing
type MockClient struct {
resourcepb.ResourceIndexClient
resource.ResourceIndex
LastSearchRequest *resourcepb.ResourceSearchRequest
MockResponses []*resourcepb.ResourceSearchResponse
MockCalls []*resourcepb.ResourceSearchRequest
CallCount int
}
func (m *MockClient) Search(ctx context.Context, in *resourcepb.ResourceSearchRequest, opts ...grpc.CallOption) (*resourcepb.ResourceSearchResponse, error) {
m.LastSearchRequest = in
m.MockCalls = append(m.MockCalls, in)
var response *resourcepb.ResourceSearchResponse
if m.CallCount < len(m.MockResponses) {
response = m.MockResponses[m.CallCount]
}
m.CallCount = m.CallCount + 1
if response == nil {
response = &resourcepb.ResourceSearchResponse{}
}
return response, nil
}
func (m *MockClient) GetStats(ctx context.Context, in *resourcepb.ResourceStatsRequest, opts ...grpc.CallOption) (*resourcepb.ResourceStatsResponse, error) {
return nil, nil
}
func (m *MockClient) CountManagedObjects(ctx context.Context, in *resourcepb.CountManagedObjectsRequest, opts ...grpc.CallOption) (*resourcepb.CountManagedObjectsResponse, error) {
return nil, nil
}
func (m *MockClient) Watch(ctx context.Context, in *resourcepb.WatchRequest, opts ...grpc.CallOption) (resourcepb.ResourceStore_WatchClient, error) {
return nil, nil
}
func (m *MockClient) Delete(ctx context.Context, in *resourcepb.DeleteRequest, opts ...grpc.CallOption) (*resourcepb.DeleteResponse, error) {
return nil, nil
}
func (m *MockClient) Create(ctx context.Context, in *resourcepb.CreateRequest, opts ...grpc.CallOption) (*resourcepb.CreateResponse, error) {
return nil, nil
}
func (m *MockClient) Update(ctx context.Context, in *resourcepb.UpdateRequest, opts ...grpc.CallOption) (*resourcepb.UpdateResponse, error) {
return nil, nil
}
func (m *MockClient) Read(ctx context.Context, in *resourcepb.ReadRequest, opts ...grpc.CallOption) (*resourcepb.ReadResponse, error) {
return nil, nil
}
func (m *MockClient) GetBlob(ctx context.Context, in *resourcepb.GetBlobRequest, opts ...grpc.CallOption) (*resourcepb.GetBlobResponse, error) {
return nil, nil
}
func (m *MockClient) PutBlob(ctx context.Context, in *resourcepb.PutBlobRequest, opts ...grpc.CallOption) (*resourcepb.PutBlobResponse, error) {
return nil, nil
}
func (m *MockClient) List(ctx context.Context, in *resourcepb.ListRequest, opts ...grpc.CallOption) (*resourcepb.ListResponse, error) {
return nil, nil
}
func (m *MockClient) ListManagedObjects(ctx context.Context, in *resourcepb.ListManagedObjectsRequest, opts ...grpc.CallOption) (*resourcepb.ListManagedObjectsResponse, error) {
return nil, nil
}
func (m *MockClient) IsHealthy(ctx context.Context, in *resourcepb.HealthCheckRequest, opts ...grpc.CallOption) (*resourcepb.HealthCheckResponse, error) {
return nil, nil
}
func (m *MockClient) BulkProcess(ctx context.Context, opts ...grpc.CallOption) (resourcepb.BulkStore_BulkProcessClient, error) {
return nil, nil
}
func (m *MockClient) UpdateIndex(ctx context.Context, reason string) error {
return nil
}
func (m *MockClient) GetQuotaUsage(ctx context.Context, req *resourcepb.QuotaUsageRequest, opts ...grpc.CallOption) (*resourcepb.QuotaUsageResponse, error) {
return nil, nil
}
func TestEscapeBleveQuery(t *testing.T) {
tests := []struct {
input string
expected string
}{
{input: "normal", expected: "normal"},
{input: "*", expected: "\\*"},
{input: "?", expected: "\\?"},
{input: "\\", expected: "\\\\"},
{input: "\\*", expected: "\\\\\\*"},
{input: "*\\?", expected: "\\*\\\\\\?"},
{input: "foo*bar", expected: "foo\\*bar"},
}
for _, tt := range tests {
t.Run(tt.input, func(t *testing.T) {
got := escapeBleveQuery(tt.input)
if got != tt.expected {
t.Errorf("escapeBleveQuery(%q) = %q, want %q", tt.input, got, tt.expected)
}
})
}
}

View File

@@ -3,6 +3,7 @@ package user
import (
"context"
"fmt"
"time"
"go.opentelemetry.io/otel/trace"
"k8s.io/apimachinery/pkg/apis/meta/internalversion"
@@ -34,7 +35,7 @@ var (
_ rest.TableConvertor = (*LegacyStore)(nil)
)
var userResource = iamv0alpha1.UserResourceInfo
var resource = iamv0alpha1.UserResourceInfo
func NewLegacyStore(store legacy.LegacyIdentityStore, ac claims.AccessClient, enableAuthnMutation bool, tracer trace.Tracer) *LegacyStore {
return &LegacyStore{store, ac, enableAuthnMutation, tracer}
@@ -53,7 +54,7 @@ func (s *LegacyStore) Update(ctx context.Context, name string, objInfo rest.Upda
defer span.End()
if !s.enableAuthnMutation {
return nil, false, apierrors.NewMethodNotSupported(userResource.GroupResource(), "update")
return nil, false, apierrors.NewMethodNotSupported(resource.GroupResource(), "update")
}
ns, err := request.NamespaceInfoFrom(ctx, true)
@@ -104,7 +105,7 @@ func (s *LegacyStore) Update(ctx context.Context, name string, objInfo rest.Upda
// DeleteCollection implements rest.CollectionDeleter.
func (s *LegacyStore) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *internalversion.ListOptions) (runtime.Object, error) {
return nil, apierrors.NewMethodNotSupported(userResource.GroupResource(), "deletecollection")
return nil, apierrors.NewMethodNotSupported(resource.GroupResource(), "deletecollection")
}
// Delete implements rest.GracefulDeleter.
@@ -113,7 +114,7 @@ func (s *LegacyStore) Delete(ctx context.Context, name string, deleteValidation
defer span.End()
if !s.enableAuthnMutation {
return nil, false, apierrors.NewMethodNotSupported(userResource.GroupResource(), "delete")
return nil, false, apierrors.NewMethodNotSupported(resource.GroupResource(), "delete")
}
ns, err := request.NamespaceInfoFrom(ctx, true)
@@ -130,7 +131,7 @@ func (s *LegacyStore) Delete(ctx context.Context, name string, deleteValidation
return nil, false, err
}
if found == nil || len(found.Items) < 1 {
return nil, false, userResource.NewNotFound(name)
return nil, false, resource.NewNotFound(name)
}
userToDelete := &found.Items[0]
@@ -156,7 +157,7 @@ func (s *LegacyStore) Delete(ctx context.Context, name string, deleteValidation
}
func (s *LegacyStore) New() runtime.Object {
return userResource.NewFunc()
return resource.NewFunc()
}
func (s *LegacyStore) Destroy() {}
@@ -166,15 +167,15 @@ func (s *LegacyStore) NamespaceScoped() bool {
}
func (s *LegacyStore) GetSingularName() string {
return userResource.GetSingularName()
return resource.GetSingularName()
}
func (s *LegacyStore) NewList() runtime.Object {
return userResource.NewListFunc()
return resource.NewListFunc()
}
func (s *LegacyStore) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {
return userResource.TableConverter().ConvertToTable(ctx, object, tableOptions)
return resource.TableConverter().ConvertToTable(ctx, object, tableOptions)
}
func (s *LegacyStore) List(ctx context.Context, options *internalversion.ListOptions) (runtime.Object, error) {
@@ -182,7 +183,7 @@ func (s *LegacyStore) List(ctx context.Context, options *internalversion.ListOpt
defer span.End()
res, err := common.List(
ctx, userResource, s.ac, common.PaginationFromListOptions(options),
ctx, resource, s.ac, common.PaginationFromListOptions(options),
func(ctx context.Context, ns claims.NamespaceInfo, p common.Pagination) (*common.ListResponse[iamv0alpha1.User], error) {
found, err := s.store.ListUsers(ctx, ns, legacy.ListUserQuery{
Pagination: p,
@@ -230,10 +231,10 @@ func (s *LegacyStore) Get(ctx context.Context, name string, options *metav1.GetO
Pagination: common.Pagination{Limit: 1},
})
if found == nil || err != nil {
return nil, userResource.NewNotFound(name)
return nil, resource.NewNotFound(name)
}
if len(found.Items) < 1 {
return nil, userResource.NewNotFound(name)
return nil, resource.NewNotFound(name)
}
obj := toUserItem(&found.Items[0], ns.Value)
@@ -246,7 +247,7 @@ func (s *LegacyStore) Create(ctx context.Context, obj runtime.Object, createVali
defer span.End()
if !s.enableAuthnMutation {
return nil, apierrors.NewMethodNotSupported(userResource.GroupResource(), "create")
return nil, apierrors.NewMethodNotSupported(resource.GroupResource(), "create")
}
ns, err := request.NamespaceInfoFrom(ctx, true)
@@ -309,12 +310,18 @@ func toUserItem(u *common.UserWithRole, ns string) iamv0alpha1.User {
Provisioned: u.IsProvisioned,
Role: u.Role,
},
Status: iamv0alpha1.UserStatus{
LastSeenAt: u.LastSeenAt.Unix(),
},
}
obj, _ := utils.MetaAccessor(item)
obj.SetUpdatedTimestamp(&u.Updated)
obj.SetAnnotation(AnnoKeyLastSeenAt, formatTime(&u.LastSeenAt))
obj.SetDeprecatedInternalID(u.ID) // nolint:staticcheck
return *item
}
func formatTime(v *time.Time) string {
txt := ""
if v != nil && v.Unix() != 0 {
txt = v.UTC().Format(time.RFC3339)
}
return txt
}

Some files were not shown because too many files have changed in this diff Show More