Compare commits
16 Commits
docs/add-t
...
toddtreece
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
07bb48e874 | ||
|
|
306186c4ea | ||
|
|
a28c70bbcc | ||
|
|
1ebcd2319a | ||
|
|
5dc3767854 | ||
|
|
040dbfb5e3 | ||
|
|
32d43f5b5d | ||
|
|
fef9c760a0 | ||
|
|
1fe9a38a2a | ||
|
|
59bf7896f4 | ||
|
|
4b4ad544a8 | ||
|
|
7e3289f2c9 | ||
|
|
0d0b5b757b | ||
|
|
c49261cce2 | ||
|
|
d5efce72f3 | ||
|
|
881c81f0b3 |
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -520,7 +520,7 @@ i18next.config.ts @grafana/grafana-frontend-platform
|
||||
/e2e-playwright/various-suite/solo-route.spec.ts @grafana/dashboards-squad
|
||||
/e2e-playwright/various-suite/trace-view-scrolling.spec.ts @grafana/observability-traces-and-profiling
|
||||
/e2e-playwright/various-suite/verify-i18n.spec.ts @grafana/grafana-frontend-platform
|
||||
/e2e-playwright/various-suite/visualization-suggestions.spec.ts @grafana/dataviz-squad
|
||||
/e2e-playwright/various-suite/visualization-suggestions.spec.ts @grafana/dashboards-squad
|
||||
/e2e-playwright/various-suite/perf-test.spec.ts @grafana/grafana-frontend-platform
|
||||
|
||||
# Packages
|
||||
@@ -956,7 +956,6 @@ playwright.storybook.config.ts @grafana/grafana-frontend-platform
|
||||
/public/app/features/notifications/ @grafana/grafana-search-navigate-organise
|
||||
/public/app/features/org/ @grafana/grafana-search-navigate-organise
|
||||
/public/app/features/panel/ @grafana/dashboards-squad
|
||||
/public/app/features/panel/components/VizTypePicker/VisualizationSuggestions.tsx @grafana/dataviz-squad
|
||||
/public/app/features/panel/suggestions/ @grafana/dataviz-squad
|
||||
/public/app/features/playlist/ @grafana/dashboards-squad
|
||||
/public/app/features/plugins/ @grafana/plugins-platform-frontend
|
||||
|
||||
@@ -1603,6 +1603,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1670,6 +1671,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 98,
|
||||
"min": 5,
|
||||
"noise": 22,
|
||||
@@ -1687,6 +1689,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1754,6 +1757,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 98,
|
||||
"min": 5,
|
||||
"noise": 22,
|
||||
@@ -1784,6 +1788,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1852,6 +1857,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 8,
|
||||
"min": 1,
|
||||
"noise": 2,
|
||||
@@ -1869,6 +1875,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1937,6 +1944,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 12,
|
||||
"min": 1,
|
||||
"noise": 2,
|
||||
@@ -1954,6 +1962,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -2021,6 +2030,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 100,
|
||||
"min": 10,
|
||||
"noise": 22,
|
||||
@@ -2038,6 +2048,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -2105,6 +2116,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 100,
|
||||
"min": 10,
|
||||
"noise": 22,
|
||||
@@ -2117,147 +2129,6 @@
|
||||
],
|
||||
"title": "Backend",
|
||||
"type": "radialbar"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 66
|
||||
},
|
||||
"id": 35,
|
||||
"panels": [],
|
||||
"title": "Empty data",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": 0
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 6,
|
||||
"x": 0,
|
||||
"y": 67
|
||||
},
|
||||
"id": 36,
|
||||
"options": {
|
||||
"barWidthFactor": 0.5,
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"segmentCount": 1,
|
||||
"segmentSpacing": 0.3,
|
||||
"shape": "gauge",
|
||||
"showThresholdLabels": false,
|
||||
"showThresholdMarkers": true,
|
||||
"sparkline": true
|
||||
},
|
||||
"pluginVersion": "13.0.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"refId": "A",
|
||||
"scenarioId": "random_walk",
|
||||
"seriesCount": 0
|
||||
}
|
||||
],
|
||||
"title": "Numeric, no series",
|
||||
"type": "gauge"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": 0
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 6,
|
||||
"x": 6,
|
||||
"y": 67
|
||||
},
|
||||
"id": 37,
|
||||
"options": {
|
||||
"barWidthFactor": 0.5,
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"segmentCount": 1,
|
||||
"segmentSpacing": 0.3,
|
||||
"shape": "gauge",
|
||||
"showThresholdLabels": false,
|
||||
"showThresholdMarkers": true,
|
||||
"sparkline": true
|
||||
},
|
||||
"pluginVersion": "13.0.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"refId": "A",
|
||||
"scenarioId": "logs"
|
||||
}
|
||||
],
|
||||
"title": "Non-numeric",
|
||||
"type": "gauge"
|
||||
}
|
||||
],
|
||||
"preload": false,
|
||||
|
||||
@@ -22,40 +22,13 @@ v0alpha1: {
|
||||
serviceaccountv0alpha1,
|
||||
externalGroupMappingv0alpha1
|
||||
]
|
||||
|
||||
routes: {
|
||||
namespaced: {
|
||||
"/searchUsers": {
|
||||
"GET": {
|
||||
request: {
|
||||
query: {
|
||||
query?: string
|
||||
limit?: int64 | 10
|
||||
offset?: int64 | 0
|
||||
page?: int64 | 1
|
||||
}
|
||||
}
|
||||
response: {
|
||||
offset: int64
|
||||
totalHits: int64
|
||||
hits: [...#UserHit]
|
||||
queryCost: float64
|
||||
maxScore: float64
|
||||
}
|
||||
responseMetadata: {
|
||||
typeMeta: false
|
||||
objectMeta: false
|
||||
}
|
||||
}
|
||||
}
|
||||
"/searchTeams": {
|
||||
"GET": {
|
||||
request: {
|
||||
query: {
|
||||
query?: string
|
||||
limit?: int64 | 50
|
||||
offset?: int64 | 0
|
||||
page?: int64 | 1
|
||||
}
|
||||
}
|
||||
response: {
|
||||
@@ -78,15 +51,3 @@ v0alpha1: {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#UserHit: {
|
||||
name: string
|
||||
title: string
|
||||
login: string
|
||||
email: string
|
||||
role: string
|
||||
lastSeenAt: int64
|
||||
lastSeenAtAge: string
|
||||
provisioned: bool
|
||||
score: float64
|
||||
}
|
||||
|
||||
@@ -29,9 +29,6 @@ userv0alpha1: userKind & {
|
||||
// }
|
||||
schema: {
|
||||
spec: v0alpha1.UserSpec
|
||||
status: {
|
||||
lastSeenAt: int64 | 0
|
||||
}
|
||||
}
|
||||
// TODO: Uncomment when the custom routes implementation is done
|
||||
// routes: {
|
||||
|
||||
@@ -3,10 +3,7 @@
|
||||
package v0alpha1
|
||||
|
||||
type GetSearchTeamsRequestParams struct {
|
||||
Query *string `json:"query,omitempty"`
|
||||
Limit int64 `json:"limit,omitempty"`
|
||||
Offset int64 `json:"offset,omitempty"`
|
||||
Page int64 `json:"page,omitempty"`
|
||||
Query *string `json:"query,omitempty"`
|
||||
}
|
||||
|
||||
// NewGetSearchTeamsRequestParams creates a new GetSearchTeamsRequestParams object.
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
// Code generated - EDITING IS FUTILE. DO NOT EDIT.
|
||||
|
||||
package v0alpha1
|
||||
|
||||
import (
|
||||
"github.com/grafana/grafana-app-sdk/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
type GetSearchUsersRequestParamsObject struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
GetSearchUsersRequestParams `json:",inline"`
|
||||
}
|
||||
|
||||
func NewGetSearchUsersRequestParamsObject() *GetSearchUsersRequestParamsObject {
|
||||
return &GetSearchUsersRequestParamsObject{}
|
||||
}
|
||||
|
||||
func (o *GetSearchUsersRequestParamsObject) DeepCopyObject() runtime.Object {
|
||||
dst := NewGetSearchUsersRequestParamsObject()
|
||||
o.DeepCopyInto(dst)
|
||||
return dst
|
||||
}
|
||||
|
||||
func (o *GetSearchUsersRequestParamsObject) DeepCopyInto(dst *GetSearchUsersRequestParamsObject) {
|
||||
dst.TypeMeta.APIVersion = o.TypeMeta.APIVersion
|
||||
dst.TypeMeta.Kind = o.TypeMeta.Kind
|
||||
dstGetSearchUsersRequestParams := GetSearchUsersRequestParams{}
|
||||
_ = resource.CopyObjectInto(&dstGetSearchUsersRequestParams, &o.GetSearchUsersRequestParams)
|
||||
}
|
||||
|
||||
var _ runtime.Object = NewGetSearchUsersRequestParamsObject()
|
||||
@@ -1,15 +0,0 @@
|
||||
// Code generated - EDITING IS FUTILE. DO NOT EDIT.
|
||||
|
||||
package v0alpha1
|
||||
|
||||
type GetSearchUsersRequestParams struct {
|
||||
Query *string `json:"query,omitempty"`
|
||||
Limit int64 `json:"limit,omitempty"`
|
||||
Offset int64 `json:"offset,omitempty"`
|
||||
Page int64 `json:"page,omitempty"`
|
||||
}
|
||||
|
||||
// NewGetSearchUsersRequestParams creates a new GetSearchUsersRequestParams object.
|
||||
func NewGetSearchUsersRequestParams() *GetSearchUsersRequestParams {
|
||||
return &GetSearchUsersRequestParams{}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
// Code generated - EDITING IS FUTILE. DO NOT EDIT.
|
||||
|
||||
package v0alpha1
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type UserHit struct {
|
||||
Name string `json:"name"`
|
||||
Title string `json:"title"`
|
||||
Login string `json:"login"`
|
||||
Email string `json:"email"`
|
||||
Role string `json:"role"`
|
||||
LastSeenAt int64 `json:"lastSeenAt"`
|
||||
LastSeenAtAge string `json:"lastSeenAtAge"`
|
||||
Provisioned bool `json:"provisioned"`
|
||||
Score float64 `json:"score"`
|
||||
}
|
||||
|
||||
// NewUserHit creates a new UserHit object.
|
||||
func NewUserHit() *UserHit {
|
||||
return &UserHit{}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type GetSearchUsers struct {
|
||||
Offset int64 `json:"offset"`
|
||||
TotalHits int64 `json:"totalHits"`
|
||||
Hits []UserHit `json:"hits"`
|
||||
QueryCost float64 `json:"queryCost"`
|
||||
MaxScore float64 `json:"maxScore"`
|
||||
}
|
||||
|
||||
// NewGetSearchUsers creates a new GetSearchUsers object.
|
||||
func NewGetSearchUsers() *GetSearchUsers {
|
||||
return &GetSearchUsers{
|
||||
Hits: []UserHit{},
|
||||
}
|
||||
}
|
||||
19
apps/iam/pkg/apis/iam/v0alpha1/user_client_gen.go
generated
19
apps/iam/pkg/apis/iam/v0alpha1/user_client_gen.go
generated
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana-app-sdk/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type UserClient struct {
|
||||
@@ -76,24 +75,6 @@ func (c *UserClient) Patch(ctx context.Context, identifier resource.Identifier,
|
||||
return c.client.Patch(ctx, identifier, req, opts)
|
||||
}
|
||||
|
||||
func (c *UserClient) UpdateStatus(ctx context.Context, identifier resource.Identifier, newStatus UserStatus, opts resource.UpdateOptions) (*User, error) {
|
||||
return c.client.Update(ctx, &User{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: UserKind().Kind(),
|
||||
APIVersion: GroupVersion.Identifier(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: opts.ResourceVersion,
|
||||
Namespace: identifier.Namespace,
|
||||
Name: identifier.Name,
|
||||
},
|
||||
Status: newStatus,
|
||||
}, resource.UpdateOptions{
|
||||
Subresource: "status",
|
||||
ResourceVersion: opts.ResourceVersion,
|
||||
})
|
||||
}
|
||||
|
||||
func (c *UserClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error {
|
||||
return c.client.Delete(ctx, identifier, opts)
|
||||
}
|
||||
|
||||
31
apps/iam/pkg/apis/iam/v0alpha1/user_object_gen.go
generated
31
apps/iam/pkg/apis/iam/v0alpha1/user_object_gen.go
generated
@@ -21,14 +21,11 @@ type User struct {
|
||||
|
||||
// Spec is the spec of the User
|
||||
Spec UserSpec `json:"spec" yaml:"spec"`
|
||||
|
||||
Status UserStatus `json:"status" yaml:"status"`
|
||||
}
|
||||
|
||||
func NewUser() *User {
|
||||
return &User{
|
||||
Spec: *NewUserSpec(),
|
||||
Status: *NewUserStatus(),
|
||||
Spec: *NewUserSpec(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,15 +43,11 @@ func (o *User) SetSpec(spec any) error {
|
||||
}
|
||||
|
||||
func (o *User) GetSubresources() map[string]any {
|
||||
return map[string]any{
|
||||
"status": o.Status,
|
||||
}
|
||||
return map[string]any{}
|
||||
}
|
||||
|
||||
func (o *User) GetSubresource(name string) (any, bool) {
|
||||
switch name {
|
||||
case "status":
|
||||
return o.Status, true
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
@@ -62,13 +55,6 @@ func (o *User) GetSubresource(name string) (any, bool) {
|
||||
|
||||
func (o *User) SetSubresource(name string, value any) error {
|
||||
switch name {
|
||||
case "status":
|
||||
cast, ok := value.(UserStatus)
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot set status type %#v, not of type UserStatus", value)
|
||||
}
|
||||
o.Status = cast
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("subresource '%s' does not exist", name)
|
||||
}
|
||||
@@ -240,7 +226,6 @@ func (o *User) DeepCopyInto(dst *User) {
|
||||
dst.TypeMeta.Kind = o.TypeMeta.Kind
|
||||
o.ObjectMeta.DeepCopyInto(&dst.ObjectMeta)
|
||||
o.Spec.DeepCopyInto(&dst.Spec)
|
||||
o.Status.DeepCopyInto(&dst.Status)
|
||||
}
|
||||
|
||||
// Interface compliance compile-time check
|
||||
@@ -312,15 +297,3 @@ func (s *UserSpec) DeepCopy() *UserSpec {
|
||||
func (s *UserSpec) DeepCopyInto(dst *UserSpec) {
|
||||
resource.CopyObjectInto(dst, s)
|
||||
}
|
||||
|
||||
// DeepCopy creates a full deep copy of UserStatus
|
||||
func (s *UserStatus) DeepCopy() *UserStatus {
|
||||
cpy := &UserStatus{}
|
||||
s.DeepCopyInto(cpy)
|
||||
return cpy
|
||||
}
|
||||
|
||||
// DeepCopyInto deep copies UserStatus into another UserStatus object
|
||||
func (s *UserStatus) DeepCopyInto(dst *UserStatus) {
|
||||
resource.CopyObjectInto(dst, s)
|
||||
}
|
||||
|
||||
33
apps/iam/pkg/apis/iam/v0alpha1/user_status_gen.go
generated
33
apps/iam/pkg/apis/iam/v0alpha1/user_status_gen.go
generated
@@ -2,12 +2,43 @@
|
||||
|
||||
package v0alpha1
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type UserstatusOperatorState struct {
|
||||
// lastEvaluation is the ResourceVersion last evaluated
|
||||
LastEvaluation string `json:"lastEvaluation"`
|
||||
// state describes the state of the lastEvaluation.
|
||||
// It is limited to three possible states for machine evaluation.
|
||||
State UserStatusOperatorStateState `json:"state"`
|
||||
// descriptiveState is an optional more descriptive state field which has no requirements on format
|
||||
DescriptiveState *string `json:"descriptiveState,omitempty"`
|
||||
// details contains any extra information that is operator-specific
|
||||
Details map[string]interface{} `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
// NewUserstatusOperatorState creates a new UserstatusOperatorState object.
|
||||
func NewUserstatusOperatorState() *UserstatusOperatorState {
|
||||
return &UserstatusOperatorState{}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type UserStatus struct {
|
||||
LastSeenAt int64 `json:"lastSeenAt"`
|
||||
// operatorStates is a map of operator ID to operator state evaluations.
|
||||
// Any operator which consumes this kind SHOULD add its state evaluation information to this field.
|
||||
OperatorStates map[string]UserstatusOperatorState `json:"operatorStates,omitempty"`
|
||||
// additionalFields is reserved for future use
|
||||
AdditionalFields map[string]interface{} `json:"additionalFields,omitempty"`
|
||||
}
|
||||
|
||||
// NewUserStatus creates a new UserStatus object.
|
||||
func NewUserStatus() *UserStatus {
|
||||
return &UserStatus{}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type UserStatusOperatorStateState string
|
||||
|
||||
const (
|
||||
UserStatusOperatorStateStateSuccess UserStatusOperatorStateState = "success"
|
||||
UserStatusOperatorStateStateInProgress UserStatusOperatorStateState = "in_progress"
|
||||
UserStatusOperatorStateStateFailed UserStatusOperatorStateState = "failed"
|
||||
)
|
||||
|
||||
230
apps/iam/pkg/apis/iam/v0alpha1/zz_openapi_gen.go
generated
230
apps/iam/pkg/apis/iam/v0alpha1/zz_openapi_gen.go
generated
@@ -21,7 +21,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GetGroupsBody": schema_pkg_apis_iam_v0alpha1_GetGroupsBody(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GetSearchTeams": schema_pkg_apis_iam_v0alpha1_GetSearchTeams(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GetSearchTeamsBody": schema_pkg_apis_iam_v0alpha1_GetSearchTeamsBody(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GetSearchUsers": schema_pkg_apis_iam_v0alpha1_GetSearchUsers(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRole": schema_pkg_apis_iam_v0alpha1_GlobalRole(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBinding": schema_pkg_apis_iam_v0alpha1_GlobalRoleBinding(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.GlobalRoleBindingList": schema_pkg_apis_iam_v0alpha1_GlobalRoleBindingList(ref),
|
||||
@@ -73,10 +72,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamStatus": schema_pkg_apis_iam_v0alpha1_TeamStatus(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.TeamstatusOperatorState": schema_pkg_apis_iam_v0alpha1_TeamstatusOperatorState(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.User": schema_pkg_apis_iam_v0alpha1_User(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserHit": schema_pkg_apis_iam_v0alpha1_UserHit(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserList": schema_pkg_apis_iam_v0alpha1_UserList(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec": schema_pkg_apis_iam_v0alpha1_UserSpec(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserStatus": schema_pkg_apis_iam_v0alpha1_UserStatus(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserstatusOperatorState": schema_pkg_apis_iam_v0alpha1_UserstatusOperatorState(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.VersionsV0alpha1Kinds7RoutesGroupsGETResponseExternalGroupMapping": schema_pkg_apis_iam_v0alpha1_VersionsV0alpha1Kinds7RoutesGroupsGETResponseExternalGroupMapping(ref),
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.VersionsV0alpha1RoutesNamespacedSearchTeamsGETResponseTeamHit": schema_pkg_apis_iam_v0alpha1_VersionsV0alpha1RoutesNamespacedSearchTeamsGETResponseTeamHit(ref),
|
||||
}
|
||||
@@ -689,62 +688,6 @@ func schema_pkg_apis_iam_v0alpha1_GetSearchTeamsBody(ref common.ReferenceCallbac
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_iam_v0alpha1_GetSearchUsers(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"offset": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: 0,
|
||||
Type: []string{"integer"},
|
||||
Format: "int64",
|
||||
},
|
||||
},
|
||||
"totalHits": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: 0,
|
||||
Type: []string{"integer"},
|
||||
Format: "int64",
|
||||
},
|
||||
},
|
||||
"hits": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserHit"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"queryCost": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: 0,
|
||||
Type: []string{"number"},
|
||||
Format: "double",
|
||||
},
|
||||
},
|
||||
"maxScore": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: 0,
|
||||
Type: []string{"number"},
|
||||
Format: "double",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"offset", "totalHits", "hits", "queryCost", "maxScore"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserHit"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_iam_v0alpha1_GlobalRole(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
@@ -2890,94 +2833,12 @@ func schema_pkg_apis_iam_v0alpha1_User(ref common.ReferenceCallback) common.Open
|
||||
Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec"),
|
||||
},
|
||||
},
|
||||
"status": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserStatus"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"metadata", "spec", "status"},
|
||||
Required: []string{"metadata", "spec"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec", "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_iam_v0alpha1_UserHit(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"name": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"title": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"login": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"email": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"role": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"lastSeenAt": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: 0,
|
||||
Type: []string{"integer"},
|
||||
Format: "int64",
|
||||
},
|
||||
},
|
||||
"lastSeenAtAge": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"provisioned": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: false,
|
||||
Type: []string{"boolean"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"score": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: 0,
|
||||
Type: []string{"number"},
|
||||
Format: "double",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"name", "title", "login", "email", "role", "lastSeenAt", "lastSeenAtAge", "provisioned", "score"},
|
||||
},
|
||||
},
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3104,15 +2965,90 @@ func schema_pkg_apis_iam_v0alpha1_UserStatus(ref common.ReferenceCallback) commo
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"lastSeenAt": {
|
||||
"operatorStates": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: 0,
|
||||
Type: []string{"integer"},
|
||||
Format: "int64",
|
||||
Description: "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.",
|
||||
Type: []string{"object"},
|
||||
AdditionalProperties: &spec.SchemaOrBool{
|
||||
Allows: true,
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserstatusOperatorState"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"additionalFields": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "additionalFields is reserved for future use",
|
||||
Type: []string{"object"},
|
||||
AdditionalProperties: &spec.SchemaOrBool{
|
||||
Allows: true,
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"lastSeenAt"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1.UserstatusOperatorState"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_iam_v0alpha1_UserstatusOperatorState(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"lastEvaluation": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "lastEvaluation is the ResourceVersion last evaluated",
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"state": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.",
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"descriptiveState": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "descriptiveState is an optional more descriptive state field which has no requirements on format",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"details": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "details contains any extra information that is operator-specific",
|
||||
Type: []string{"object"},
|
||||
AdditionalProperties: &spec.SchemaOrBool{
|
||||
Allows: true,
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"lastEvaluation", "state"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
206
apps/iam/pkg/apis/iam_manifest.go
generated
206
apps/iam/pkg/apis/iam_manifest.go
generated
@@ -173,36 +173,6 @@ var appManifestData = app.ManifestData{
|
||||
|
||||
Parameters: []*spec3.Parameter{
|
||||
|
||||
{
|
||||
ParameterProps: spec3.ParameterProps{
|
||||
Name: "limit",
|
||||
In: "query",
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
ParameterProps: spec3.ParameterProps{
|
||||
Name: "offset",
|
||||
In: "query",
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
ParameterProps: spec3.ParameterProps{
|
||||
Name: "page",
|
||||
In: "query",
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
ParameterProps: spec3.ParameterProps{
|
||||
Name: "query",
|
||||
@@ -291,118 +261,6 @@ var appManifestData = app.ManifestData{
|
||||
},
|
||||
},
|
||||
},
|
||||
"/searchUsers": {
|
||||
Get: &spec3.Operation{
|
||||
OperationProps: spec3.OperationProps{
|
||||
|
||||
OperationId: "getSearchUsers",
|
||||
|
||||
Parameters: []*spec3.Parameter{
|
||||
|
||||
{
|
||||
ParameterProps: spec3.ParameterProps{
|
||||
Name: "limit",
|
||||
In: "query",
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
ParameterProps: spec3.ParameterProps{
|
||||
Name: "offset",
|
||||
In: "query",
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
ParameterProps: spec3.ParameterProps{
|
||||
Name: "page",
|
||||
In: "query",
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
ParameterProps: spec3.ParameterProps{
|
||||
Name: "query",
|
||||
In: "query",
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
Responses: &spec3.Responses{
|
||||
ResponsesProps: spec3.ResponsesProps{
|
||||
Default: &spec3.Response{
|
||||
ResponseProps: spec3.ResponseProps{
|
||||
Description: "Default OK response",
|
||||
Content: map[string]*spec3.MediaType{
|
||||
"application/json": {
|
||||
MediaTypeProps: spec3.MediaTypeProps{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"hits": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
|
||||
Ref: spec.MustCreateRef("#/components/schemas/getSearchUsersUserHit"),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
"maxScore": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"number"},
|
||||
},
|
||||
},
|
||||
"offset": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"integer"},
|
||||
},
|
||||
},
|
||||
"queryCost": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"number"},
|
||||
},
|
||||
},
|
||||
"totalHits": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"integer"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{
|
||||
"offset",
|
||||
"totalHits",
|
||||
"hits",
|
||||
"queryCost",
|
||||
"maxScore",
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Cluster: map[string]spec3.PathProps{},
|
||||
Schemas: map[string]spec.Schema{
|
||||
@@ -445,69 +303,6 @@ var appManifestData = app.ManifestData{
|
||||
},
|
||||
},
|
||||
},
|
||||
"getSearchUsersUserHit": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"email": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
},
|
||||
},
|
||||
"lastSeenAt": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"integer"},
|
||||
},
|
||||
},
|
||||
"lastSeenAtAge": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
},
|
||||
},
|
||||
"login": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
},
|
||||
},
|
||||
"name": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
},
|
||||
},
|
||||
"provisioned": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"boolean"},
|
||||
},
|
||||
},
|
||||
"role": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
},
|
||||
},
|
||||
"score": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"number"},
|
||||
},
|
||||
},
|
||||
"title": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{
|
||||
"name",
|
||||
"title",
|
||||
"login",
|
||||
"email",
|
||||
"role",
|
||||
"lastSeenAt",
|
||||
"lastSeenAtAge",
|
||||
"provisioned",
|
||||
"score",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -547,7 +342,6 @@ var customRouteToGoResponseType = map[string]any{
|
||||
"v0alpha1|Team|groups|GET": v0alpha1.GetGroups{},
|
||||
|
||||
"v0alpha1||<namespace>/searchTeams|GET": v0alpha1.GetSearchTeams{},
|
||||
"v0alpha1||<namespace>/searchUsers|GET": v0alpha1.GetSearchUsers{},
|
||||
}
|
||||
|
||||
// ManifestCustomRouteResponsesAssociator returns the associated response go type for a given kind, version, custom route path, and method, if one exists.
|
||||
|
||||
@@ -24,6 +24,7 @@ require (
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.6 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
||||
github.com/apache/arrow-go/v18 v18.4.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
@@ -35,16 +36,21 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 // indirect
|
||||
github.com/aws/smithy-go v1.23.1 // indirect
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/bluele/gcache v0.0.2 // indirect
|
||||
github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 // indirect
|
||||
github.com/bwmarrin/snowflake v0.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cheekybits/genny v1.0.0 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/diegoholiveira/jsonlogic/v3 v3.7.4 // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
@@ -132,11 +138,15 @@ require (
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/nikunjy/rules v1.5.0 // indirect
|
||||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect
|
||||
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect
|
||||
github.com/oklog/run v1.1.0 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/open-feature/go-sdk v1.16.0 // indirect
|
||||
github.com/open-feature/go-sdk-contrib/providers/go-feature-flag v0.2.6 // indirect
|
||||
github.com/open-feature/go-sdk-contrib/providers/ofrep v0.1.6 // indirect
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
||||
github.com/perimeterx/marshmallow v1.1.5 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
@@ -155,6 +165,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.1 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/thomaspoignant/go-feature-flag v1.42.0 // indirect
|
||||
github.com/tjhop/slog-gokit v0.1.5 // indirect
|
||||
github.com/woodsbury/decimal128 v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
@@ -176,6 +187,8 @@ require (
|
||||
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/mock v0.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.1 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
|
||||
@@ -4,9 +4,13 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw=
|
||||
github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -38,12 +42,18 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 h1:+LVB0xBqEgjQoqr9bGZbRzvg212B
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5/go.mod h1:xoaxeqnnUaZjPjaICgIy5B+MHCSb/ZSOn4MvkFNOUA0=
|
||||
github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M=
|
||||
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df h1:GSoSVRLoBaFpOOds6QyY1L8AX7uoY+Ln3BHc22W40X0=
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df/go.mod h1:hiVxq5OP2bUGBRNS3Z/bt/reCLFNbdcST6gISi1fiOM=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
|
||||
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 h1:N7oVaKyGp8bttX0bfZGmcGkjz7DLQXhAn3DNd3T0ous=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c=
|
||||
github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw=
|
||||
@@ -60,6 +70,8 @@ github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wX
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
|
||||
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
@@ -69,6 +81,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/diegoholiveira/jsonlogic/v3 v3.7.4 h1:92HSmB9bwM/o0ZvrCpcvTP2EsPXSkKtAniIr2W/dcIM=
|
||||
github.com/diegoholiveira/jsonlogic/v3 v3.7.4/go.mod h1:OYRb6FSTVmMM+MNQ7ElmMsczyNSepw+OU4Z8emDSi4w=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes=
|
||||
@@ -341,6 +355,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nikunjy/rules v1.5.0 h1:KJDSLOsFhwt7kcXUyZqwkgrQg5YoUwj+TVu6ItCQShw=
|
||||
github.com/nikunjy/rules v1.5.0/go.mod h1:TlZtZdBChrkqi8Lr2AXocme8Z7EsbxtFdDoKeI6neBQ=
|
||||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY=
|
||||
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw=
|
||||
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c=
|
||||
@@ -355,6 +371,12 @@ github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU
|
||||
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
|
||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/open-feature/go-sdk v1.16.0 h1:5NCHYv5slvNBIZhYXAzAufo0OI59OACZ5tczVqSE+Tg=
|
||||
github.com/open-feature/go-sdk v1.16.0/go.mod h1:EIF40QcoYT1VbQkMPy2ZJH4kvZeY+qGUXAorzSWgKSo=
|
||||
github.com/open-feature/go-sdk-contrib/providers/go-feature-flag v0.2.6 h1:megzzlQGjsRVWDX8oJnLaa5eEcsAHekiL4Uvl3jSAcY=
|
||||
github.com/open-feature/go-sdk-contrib/providers/go-feature-flag v0.2.6/go.mod h1:K1gDKvt76CGFLSUMHUydd5ba2V5Cv69gQZsdbnXhAm8=
|
||||
github.com/open-feature/go-sdk-contrib/providers/ofrep v0.1.6 h1:WinefYxeVx5rV0uQmuWbxQf8iACu/JiRubo5w0saToc=
|
||||
github.com/open-feature/go-sdk-contrib/providers/ofrep v0.1.6/go.mod h1:Dwcaoma6lZVqYwyfVlY7eB6RXbG+Ju3b9cnpTlUN+Hc=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
@@ -440,6 +462,10 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/thejerf/slogassert v0.3.4 h1:VoTsXixRbXMrRSSxDjYTiEDCM4VWbsYPW5rB/hX24kM=
|
||||
github.com/thejerf/slogassert v0.3.4/go.mod h1:0zn9ISLVKo1aPMTqcGfG1o6dWwt+Rk574GlUxHD4rs8=
|
||||
github.com/thomaspoignant/go-feature-flag v1.42.0 h1:C7embmOTzaLyRki+OoU2RvtVjJE9IrvgBA2C1mRN1lc=
|
||||
github.com/thomaspoignant/go-feature-flag v1.42.0/go.mod h1:y0QiWH7chHWhGATb/+XqwAwErORmPSH2MUsQlCmmWlM=
|
||||
github.com/tjhop/slog-gokit v0.1.5 h1:ayloIUi5EK2QYB8eY4DOPO95/mRtMW42lUkp3quJohc=
|
||||
github.com/tjhop/slog-gokit v0.1.5/go.mod h1:yA48zAHvV+Sg4z4VRyeFyFUNNXd3JY5Zg84u3USICq0=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
@@ -507,8 +533,12 @@ go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJr
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
|
||||
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
|
||||
|
||||
@@ -5,7 +5,24 @@ metaV0Alpha1: {
|
||||
scope: "Namespaced"
|
||||
schema: {
|
||||
spec: {
|
||||
pluginJSON: #JSONData,
|
||||
pluginJson: #JSONData
|
||||
module?: {
|
||||
path: string
|
||||
hash?: string
|
||||
loadingStrategy?: "fetch" | "script"
|
||||
}
|
||||
baseURL?: string
|
||||
signature?: {
|
||||
status: "internal" | "valid" | "invalid" | "modified" | "unsigned"
|
||||
type?: "grafana" | "commercial" | "community" | "private" | "private-glob"
|
||||
org?: string
|
||||
}
|
||||
angular?: {
|
||||
detected: bool
|
||||
}
|
||||
translations?: [string]: string
|
||||
// +listType=atomic
|
||||
children?: [...string]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -208,13 +208,20 @@ func NewMetaExtensions() *MetaExtensions {
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type MetaSpec struct {
|
||||
PluginJSON MetaJSONData `json:"pluginJSON"`
|
||||
PluginJson MetaJSONData `json:"pluginJson"`
|
||||
Module *MetaV0alpha1SpecModule `json:"module,omitempty"`
|
||||
BaseURL *string `json:"baseURL,omitempty"`
|
||||
Signature *MetaV0alpha1SpecSignature `json:"signature,omitempty"`
|
||||
Angular *MetaV0alpha1SpecAngular `json:"angular,omitempty"`
|
||||
Translations map[string]string `json:"translations,omitempty"`
|
||||
// +listType=atomic
|
||||
Children []string `json:"children,omitempty"`
|
||||
}
|
||||
|
||||
// NewMetaSpec creates a new MetaSpec object.
|
||||
func NewMetaSpec() *MetaSpec {
|
||||
return &MetaSpec{
|
||||
PluginJSON: *NewMetaJSONData(),
|
||||
PluginJson: *NewMetaJSONData(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -412,6 +419,40 @@ func NewMetaV0alpha1ExtensionsExtensionPoints() *MetaV0alpha1ExtensionsExtension
|
||||
return &MetaV0alpha1ExtensionsExtensionPoints{}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type MetaV0alpha1SpecModule struct {
|
||||
Path string `json:"path"`
|
||||
Hash *string `json:"hash,omitempty"`
|
||||
LoadingStrategy *MetaV0alpha1SpecModuleLoadingStrategy `json:"loadingStrategy,omitempty"`
|
||||
}
|
||||
|
||||
// NewMetaV0alpha1SpecModule creates a new MetaV0alpha1SpecModule object.
|
||||
func NewMetaV0alpha1SpecModule() *MetaV0alpha1SpecModule {
|
||||
return &MetaV0alpha1SpecModule{}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type MetaV0alpha1SpecSignature struct {
|
||||
Status MetaV0alpha1SpecSignatureStatus `json:"status"`
|
||||
Type *MetaV0alpha1SpecSignatureType `json:"type,omitempty"`
|
||||
Org *string `json:"org,omitempty"`
|
||||
}
|
||||
|
||||
// NewMetaV0alpha1SpecSignature creates a new MetaV0alpha1SpecSignature object.
|
||||
func NewMetaV0alpha1SpecSignature() *MetaV0alpha1SpecSignature {
|
||||
return &MetaV0alpha1SpecSignature{}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type MetaV0alpha1SpecAngular struct {
|
||||
Detected bool `json:"detected"`
|
||||
}
|
||||
|
||||
// NewMetaV0alpha1SpecAngular creates a new MetaV0alpha1SpecAngular object.
|
||||
func NewMetaV0alpha1SpecAngular() *MetaV0alpha1SpecAngular {
|
||||
return &MetaV0alpha1SpecAngular{}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type MetaJSONDataType string
|
||||
|
||||
@@ -472,3 +513,33 @@ const (
|
||||
MetaV0alpha1DependenciesPluginsTypeDatasource MetaV0alpha1DependenciesPluginsType = "datasource"
|
||||
MetaV0alpha1DependenciesPluginsTypePanel MetaV0alpha1DependenciesPluginsType = "panel"
|
||||
)
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type MetaV0alpha1SpecModuleLoadingStrategy string
|
||||
|
||||
const (
|
||||
MetaV0alpha1SpecModuleLoadingStrategyFetch MetaV0alpha1SpecModuleLoadingStrategy = "fetch"
|
||||
MetaV0alpha1SpecModuleLoadingStrategyScript MetaV0alpha1SpecModuleLoadingStrategy = "script"
|
||||
)
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type MetaV0alpha1SpecSignatureStatus string
|
||||
|
||||
const (
|
||||
MetaV0alpha1SpecSignatureStatusInternal MetaV0alpha1SpecSignatureStatus = "internal"
|
||||
MetaV0alpha1SpecSignatureStatusValid MetaV0alpha1SpecSignatureStatus = "valid"
|
||||
MetaV0alpha1SpecSignatureStatusInvalid MetaV0alpha1SpecSignatureStatus = "invalid"
|
||||
MetaV0alpha1SpecSignatureStatusModified MetaV0alpha1SpecSignatureStatus = "modified"
|
||||
MetaV0alpha1SpecSignatureStatusUnsigned MetaV0alpha1SpecSignatureStatus = "unsigned"
|
||||
)
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type MetaV0alpha1SpecSignatureType string
|
||||
|
||||
const (
|
||||
MetaV0alpha1SpecSignatureTypeGrafana MetaV0alpha1SpecSignatureType = "grafana"
|
||||
MetaV0alpha1SpecSignatureTypeCommercial MetaV0alpha1SpecSignatureType = "commercial"
|
||||
MetaV0alpha1SpecSignatureTypeCommunity MetaV0alpha1SpecSignatureType = "community"
|
||||
MetaV0alpha1SpecSignatureTypePrivate MetaV0alpha1SpecSignatureType = "private"
|
||||
MetaV0alpha1SpecSignatureTypePrivateGlob MetaV0alpha1SpecSignatureType = "private-glob"
|
||||
)
|
||||
|
||||
2
apps/plugins/pkg/apis/plugins_manifest.go
generated
2
apps/plugins/pkg/apis/plugins_manifest.go
generated
File diff suppressed because one or more lines are too long
@@ -10,8 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-app-sdk/logging"
|
||||
|
||||
pluginsv0alpha1 "github.com/grafana/grafana/apps/plugins/pkg/apis/plugins/v0alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -87,45 +85,9 @@ func (p *CatalogProvider) GetMeta(ctx context.Context, pluginID, version string)
|
||||
return nil, fmt.Errorf("failed to decode response: %w", err)
|
||||
}
|
||||
|
||||
metaSpec := grafanaComPluginVersionMetaToMetaSpec(gcomMeta)
|
||||
return &Result{
|
||||
Meta: gcomMeta.JSON,
|
||||
Meta: metaSpec,
|
||||
TTL: p.ttl,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// grafanaComPluginVersionMeta represents the response from grafana.com API
|
||||
// GET /api/plugins/{pluginId}/versions/{version}
|
||||
type grafanaComPluginVersionMeta struct {
|
||||
PluginID string `json:"pluginSlug"`
|
||||
Version string `json:"version"`
|
||||
URL string `json:"url"`
|
||||
Commit string `json:"commit"`
|
||||
Description string `json:"description"`
|
||||
Keywords []string `json:"keywords"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
JSON pluginsv0alpha1.MetaJSONData `json:"json"`
|
||||
Readme string `json:"readme"`
|
||||
Downloads int `json:"downloads"`
|
||||
Verified bool `json:"verified"`
|
||||
Status string `json:"status"`
|
||||
StatusContext string `json:"statusContext"`
|
||||
DownloadSlug string `json:"downloadSlug"`
|
||||
SignatureType string `json:"signatureType"`
|
||||
SignedByOrg string `json:"signedByOrg"`
|
||||
SignedByOrgName string `json:"signedByOrgName"`
|
||||
Packages struct {
|
||||
Any struct {
|
||||
Md5 string `json:"md5"`
|
||||
Sha256 string `json:"sha256"`
|
||||
PackageName string `json:"packageName"`
|
||||
DownloadURL string `json:"downloadUrl"`
|
||||
} `json:"any"`
|
||||
} `json:"packages"`
|
||||
Links []struct {
|
||||
Rel string `json:"rel"`
|
||||
Href string `json:"href"`
|
||||
} `json:"links"`
|
||||
AngularDetected bool `json:"angularDetected"`
|
||||
Scopes []string `json:"scopes"`
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ func TestCatalogProvider_GetMeta(t *testing.T) {
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
assert.Equal(t, expectedMeta, result.Meta)
|
||||
assert.Equal(t, expectedMeta, result.Meta.PluginJson)
|
||||
assert.Equal(t, defaultCatalogTTL, result.TTL)
|
||||
})
|
||||
|
||||
|
||||
725
apps/plugins/pkg/app/meta/converter.go
Normal file
725
apps/plugins/pkg/app/meta/converter.go
Normal file
@@ -0,0 +1,725 @@
|
||||
package meta
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
pluginsv0alpha1 "github.com/grafana/grafana/apps/plugins/pkg/apis/plugins/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
|
||||
)
|
||||
|
||||
// jsonDataToMetaJSONData converts a plugins.JSONData to a pluginsv0alpha1.MetaJSONData.
|
||||
// nolint:gocyclo
|
||||
func jsonDataToMetaJSONData(jsonData plugins.JSONData) pluginsv0alpha1.MetaJSONData {
|
||||
meta := pluginsv0alpha1.MetaJSONData{
|
||||
Id: jsonData.ID,
|
||||
Name: jsonData.Name,
|
||||
}
|
||||
|
||||
// Map plugin type
|
||||
switch jsonData.Type {
|
||||
case plugins.TypeApp:
|
||||
meta.Type = pluginsv0alpha1.MetaJSONDataTypeApp
|
||||
case plugins.TypeDataSource:
|
||||
meta.Type = pluginsv0alpha1.MetaJSONDataTypeDatasource
|
||||
case plugins.TypePanel:
|
||||
meta.Type = pluginsv0alpha1.MetaJSONDataTypePanel
|
||||
case plugins.TypeRenderer:
|
||||
meta.Type = pluginsv0alpha1.MetaJSONDataTypeRenderer
|
||||
}
|
||||
|
||||
// Map Info
|
||||
meta.Info = pluginsv0alpha1.MetaInfo{
|
||||
Keywords: jsonData.Info.Keywords,
|
||||
Logos: pluginsv0alpha1.MetaV0alpha1InfoLogos{
|
||||
Small: jsonData.Info.Logos.Small,
|
||||
Large: jsonData.Info.Logos.Large,
|
||||
},
|
||||
Updated: jsonData.Info.Updated,
|
||||
Version: jsonData.Info.Version,
|
||||
}
|
||||
|
||||
if jsonData.Info.Description != "" {
|
||||
meta.Info.Description = &jsonData.Info.Description
|
||||
}
|
||||
|
||||
if jsonData.Info.Author.Name != "" || jsonData.Info.Author.URL != "" {
|
||||
author := &pluginsv0alpha1.MetaV0alpha1InfoAuthor{}
|
||||
if jsonData.Info.Author.Name != "" {
|
||||
author.Name = &jsonData.Info.Author.Name
|
||||
}
|
||||
if jsonData.Info.Author.URL != "" {
|
||||
author.Url = &jsonData.Info.Author.URL
|
||||
}
|
||||
meta.Info.Author = author
|
||||
}
|
||||
|
||||
if len(jsonData.Info.Links) > 0 {
|
||||
meta.Info.Links = make([]pluginsv0alpha1.MetaV0alpha1InfoLinks, 0, len(jsonData.Info.Links))
|
||||
for _, link := range jsonData.Info.Links {
|
||||
v0Link := pluginsv0alpha1.MetaV0alpha1InfoLinks{}
|
||||
if link.Name != "" {
|
||||
v0Link.Name = &link.Name
|
||||
}
|
||||
if link.URL != "" {
|
||||
v0Link.Url = &link.URL
|
||||
}
|
||||
meta.Info.Links = append(meta.Info.Links, v0Link)
|
||||
}
|
||||
}
|
||||
|
||||
if len(jsonData.Info.Screenshots) > 0 {
|
||||
meta.Info.Screenshots = make([]pluginsv0alpha1.MetaV0alpha1InfoScreenshots, 0, len(jsonData.Info.Screenshots))
|
||||
for _, screenshot := range jsonData.Info.Screenshots {
|
||||
v0Screenshot := pluginsv0alpha1.MetaV0alpha1InfoScreenshots{}
|
||||
if screenshot.Name != "" {
|
||||
v0Screenshot.Name = &screenshot.Name
|
||||
}
|
||||
if screenshot.Path != "" {
|
||||
v0Screenshot.Path = &screenshot.Path
|
||||
}
|
||||
meta.Info.Screenshots = append(meta.Info.Screenshots, v0Screenshot)
|
||||
}
|
||||
}
|
||||
|
||||
// Map Dependencies
|
||||
meta.Dependencies = pluginsv0alpha1.MetaDependencies{
|
||||
GrafanaDependency: jsonData.Dependencies.GrafanaDependency,
|
||||
}
|
||||
|
||||
if jsonData.Dependencies.GrafanaVersion != "" {
|
||||
meta.Dependencies.GrafanaVersion = &jsonData.Dependencies.GrafanaVersion
|
||||
}
|
||||
|
||||
if len(jsonData.Dependencies.Plugins) > 0 {
|
||||
meta.Dependencies.Plugins = make([]pluginsv0alpha1.MetaV0alpha1DependenciesPlugins, 0, len(jsonData.Dependencies.Plugins))
|
||||
for _, dep := range jsonData.Dependencies.Plugins {
|
||||
var depType pluginsv0alpha1.MetaV0alpha1DependenciesPluginsType
|
||||
switch dep.Type {
|
||||
case "app":
|
||||
depType = pluginsv0alpha1.MetaV0alpha1DependenciesPluginsTypeApp
|
||||
case "datasource":
|
||||
depType = pluginsv0alpha1.MetaV0alpha1DependenciesPluginsTypeDatasource
|
||||
case "panel":
|
||||
depType = pluginsv0alpha1.MetaV0alpha1DependenciesPluginsTypePanel
|
||||
}
|
||||
meta.Dependencies.Plugins = append(meta.Dependencies.Plugins, pluginsv0alpha1.MetaV0alpha1DependenciesPlugins{
|
||||
Id: dep.ID,
|
||||
Type: depType,
|
||||
Name: dep.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(jsonData.Dependencies.Extensions.ExposedComponents) > 0 {
|
||||
meta.Dependencies.Extensions = &pluginsv0alpha1.MetaV0alpha1DependenciesExtensions{
|
||||
ExposedComponents: jsonData.Dependencies.Extensions.ExposedComponents,
|
||||
}
|
||||
}
|
||||
|
||||
// Map optional boolean fields
|
||||
if jsonData.Alerting {
|
||||
meta.Alerting = &jsonData.Alerting
|
||||
}
|
||||
if jsonData.Annotations {
|
||||
meta.Annotations = &jsonData.Annotations
|
||||
}
|
||||
if jsonData.AutoEnabled {
|
||||
meta.AutoEnabled = &jsonData.AutoEnabled
|
||||
}
|
||||
if jsonData.Backend {
|
||||
meta.Backend = &jsonData.Backend
|
||||
}
|
||||
if jsonData.BuiltIn {
|
||||
meta.BuiltIn = &jsonData.BuiltIn
|
||||
}
|
||||
if jsonData.HideFromList {
|
||||
meta.HideFromList = &jsonData.HideFromList
|
||||
}
|
||||
if jsonData.Logs {
|
||||
meta.Logs = &jsonData.Logs
|
||||
}
|
||||
if jsonData.Metrics {
|
||||
meta.Metrics = &jsonData.Metrics
|
||||
}
|
||||
if jsonData.MultiValueFilterOperators {
|
||||
meta.MultiValueFilterOperators = &jsonData.MultiValueFilterOperators
|
||||
}
|
||||
if jsonData.Preload {
|
||||
meta.Preload = &jsonData.Preload
|
||||
}
|
||||
if jsonData.SkipDataQuery {
|
||||
meta.SkipDataQuery = &jsonData.SkipDataQuery
|
||||
}
|
||||
if jsonData.Streaming {
|
||||
meta.Streaming = &jsonData.Streaming
|
||||
}
|
||||
if jsonData.Tracing {
|
||||
meta.Tracing = &jsonData.Tracing
|
||||
}
|
||||
|
||||
// Map category
|
||||
if jsonData.Category != "" {
|
||||
var category pluginsv0alpha1.MetaJSONDataCategory
|
||||
switch jsonData.Category {
|
||||
case "tsdb":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryTsdb
|
||||
case "logging":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryLogging
|
||||
case "cloud":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryCloud
|
||||
case "tracing":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryTracing
|
||||
case "profiling":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryProfiling
|
||||
case "sql":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategorySql
|
||||
case "enterprise":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryEnterprise
|
||||
case "iot":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryIot
|
||||
case "other":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryOther
|
||||
default:
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryOther
|
||||
}
|
||||
meta.Category = &category
|
||||
}
|
||||
|
||||
// Map state
|
||||
if jsonData.State != "" {
|
||||
var state pluginsv0alpha1.MetaJSONDataState
|
||||
switch jsonData.State {
|
||||
case plugins.ReleaseStateAlpha:
|
||||
state = pluginsv0alpha1.MetaJSONDataStateAlpha
|
||||
case plugins.ReleaseStateBeta:
|
||||
state = pluginsv0alpha1.MetaJSONDataStateBeta
|
||||
default:
|
||||
}
|
||||
if state != "" {
|
||||
meta.State = &state
|
||||
}
|
||||
}
|
||||
|
||||
// Map executable
|
||||
if jsonData.Executable != "" {
|
||||
meta.Executable = &jsonData.Executable
|
||||
}
|
||||
|
||||
// Map QueryOptions
|
||||
if len(jsonData.QueryOptions) > 0 {
|
||||
queryOptions := &pluginsv0alpha1.MetaQueryOptions{}
|
||||
if val, ok := jsonData.QueryOptions["maxDataPoints"]; ok {
|
||||
queryOptions.MaxDataPoints = &val
|
||||
}
|
||||
if val, ok := jsonData.QueryOptions["minInterval"]; ok {
|
||||
queryOptions.MinInterval = &val
|
||||
}
|
||||
if val, ok := jsonData.QueryOptions["cacheTimeout"]; ok {
|
||||
queryOptions.CacheTimeout = &val
|
||||
}
|
||||
meta.QueryOptions = queryOptions
|
||||
}
|
||||
|
||||
// Map Includes
|
||||
if len(jsonData.Includes) > 0 {
|
||||
meta.Includes = make([]pluginsv0alpha1.MetaInclude, 0, len(jsonData.Includes))
|
||||
for _, include := range jsonData.Includes {
|
||||
v0Include := pluginsv0alpha1.MetaInclude{}
|
||||
if include.UID != "" {
|
||||
v0Include.Uid = &include.UID
|
||||
}
|
||||
if include.Type != "" {
|
||||
var includeType pluginsv0alpha1.MetaIncludeType
|
||||
switch include.Type {
|
||||
case "dashboard":
|
||||
includeType = pluginsv0alpha1.MetaIncludeTypeDashboard
|
||||
case "page":
|
||||
includeType = pluginsv0alpha1.MetaIncludeTypePage
|
||||
case "panel":
|
||||
includeType = pluginsv0alpha1.MetaIncludeTypePanel
|
||||
case "datasource":
|
||||
includeType = pluginsv0alpha1.MetaIncludeTypeDatasource
|
||||
}
|
||||
v0Include.Type = &includeType
|
||||
}
|
||||
if include.Name != "" {
|
||||
v0Include.Name = &include.Name
|
||||
}
|
||||
if include.Component != "" {
|
||||
v0Include.Component = &include.Component
|
||||
}
|
||||
if include.Role != "" {
|
||||
var role pluginsv0alpha1.MetaIncludeRole
|
||||
switch include.Role {
|
||||
case "Admin":
|
||||
role = pluginsv0alpha1.MetaIncludeRoleAdmin
|
||||
case "Editor":
|
||||
role = pluginsv0alpha1.MetaIncludeRoleEditor
|
||||
case "Viewer":
|
||||
role = pluginsv0alpha1.MetaIncludeRoleViewer
|
||||
}
|
||||
v0Include.Role = &role
|
||||
}
|
||||
if include.Action != "" {
|
||||
v0Include.Action = &include.Action
|
||||
}
|
||||
if include.Path != "" {
|
||||
v0Include.Path = &include.Path
|
||||
}
|
||||
if include.AddToNav {
|
||||
v0Include.AddToNav = &include.AddToNav
|
||||
}
|
||||
if include.DefaultNav {
|
||||
v0Include.DefaultNav = &include.DefaultNav
|
||||
}
|
||||
if include.Icon != "" {
|
||||
v0Include.Icon = &include.Icon
|
||||
}
|
||||
meta.Includes = append(meta.Includes, v0Include)
|
||||
}
|
||||
}
|
||||
|
||||
// Map Routes
|
||||
if len(jsonData.Routes) > 0 {
|
||||
meta.Routes = make([]pluginsv0alpha1.MetaRoute, 0, len(jsonData.Routes))
|
||||
for _, route := range jsonData.Routes {
|
||||
v0Route := pluginsv0alpha1.MetaRoute{}
|
||||
if route.Path != "" {
|
||||
v0Route.Path = &route.Path
|
||||
}
|
||||
if route.Method != "" {
|
||||
v0Route.Method = &route.Method
|
||||
}
|
||||
if route.URL != "" {
|
||||
v0Route.Url = &route.URL
|
||||
}
|
||||
if route.ReqRole != "" {
|
||||
reqRole := string(route.ReqRole)
|
||||
v0Route.ReqRole = &reqRole
|
||||
}
|
||||
if route.ReqAction != "" {
|
||||
v0Route.ReqAction = &route.ReqAction
|
||||
}
|
||||
if len(route.Headers) > 0 {
|
||||
headers := make([]string, 0, len(route.Headers))
|
||||
for _, header := range route.Headers {
|
||||
headers = append(headers, header.Name+": "+header.Content)
|
||||
}
|
||||
v0Route.Headers = headers
|
||||
}
|
||||
if len(route.URLParams) > 0 {
|
||||
v0Route.UrlParams = make([]pluginsv0alpha1.MetaV0alpha1RouteUrlParams, 0, len(route.URLParams))
|
||||
for _, param := range route.URLParams {
|
||||
v0Param := pluginsv0alpha1.MetaV0alpha1RouteUrlParams{}
|
||||
if param.Name != "" {
|
||||
v0Param.Name = ¶m.Name
|
||||
}
|
||||
if param.Content != "" {
|
||||
v0Param.Content = ¶m.Content
|
||||
}
|
||||
v0Route.UrlParams = append(v0Route.UrlParams, v0Param)
|
||||
}
|
||||
}
|
||||
if route.TokenAuth != nil {
|
||||
v0Route.TokenAuth = &pluginsv0alpha1.MetaV0alpha1RouteTokenAuth{}
|
||||
if route.TokenAuth.Url != "" {
|
||||
v0Route.TokenAuth.Url = &route.TokenAuth.Url
|
||||
}
|
||||
if len(route.TokenAuth.Scopes) > 0 {
|
||||
v0Route.TokenAuth.Scopes = route.TokenAuth.Scopes
|
||||
}
|
||||
if len(route.TokenAuth.Params) > 0 {
|
||||
v0Route.TokenAuth.Params = make(map[string]interface{})
|
||||
for k, v := range route.TokenAuth.Params {
|
||||
v0Route.TokenAuth.Params[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
if route.JwtTokenAuth != nil {
|
||||
v0Route.JwtTokenAuth = &pluginsv0alpha1.MetaV0alpha1RouteJwtTokenAuth{}
|
||||
if route.JwtTokenAuth.Url != "" {
|
||||
v0Route.JwtTokenAuth.Url = &route.JwtTokenAuth.Url
|
||||
}
|
||||
if len(route.JwtTokenAuth.Scopes) > 0 {
|
||||
v0Route.JwtTokenAuth.Scopes = route.JwtTokenAuth.Scopes
|
||||
}
|
||||
if len(route.JwtTokenAuth.Params) > 0 {
|
||||
v0Route.JwtTokenAuth.Params = make(map[string]interface{})
|
||||
for k, v := range route.JwtTokenAuth.Params {
|
||||
v0Route.JwtTokenAuth.Params[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(route.Body) > 0 {
|
||||
var bodyMap map[string]interface{}
|
||||
if err := json.Unmarshal(route.Body, &bodyMap); err == nil {
|
||||
v0Route.Body = bodyMap
|
||||
}
|
||||
}
|
||||
meta.Routes = append(meta.Routes, v0Route)
|
||||
}
|
||||
}
|
||||
|
||||
// Map Extensions
|
||||
if len(jsonData.Extensions.AddedLinks) > 0 || len(jsonData.Extensions.AddedComponents) > 0 ||
|
||||
len(jsonData.Extensions.ExposedComponents) > 0 || len(jsonData.Extensions.ExtensionPoints) > 0 {
|
||||
extensions := &pluginsv0alpha1.MetaExtensions{}
|
||||
|
||||
if len(jsonData.Extensions.AddedLinks) > 0 {
|
||||
extensions.AddedLinks = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsAddedLinks, 0, len(jsonData.Extensions.AddedLinks))
|
||||
for _, link := range jsonData.Extensions.AddedLinks {
|
||||
v0Link := pluginsv0alpha1.MetaV0alpha1ExtensionsAddedLinks{
|
||||
Targets: link.Targets,
|
||||
Title: link.Title,
|
||||
}
|
||||
if link.Description != "" {
|
||||
v0Link.Description = &link.Description
|
||||
}
|
||||
extensions.AddedLinks = append(extensions.AddedLinks, v0Link)
|
||||
}
|
||||
}
|
||||
|
||||
if len(jsonData.Extensions.AddedComponents) > 0 {
|
||||
extensions.AddedComponents = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsAddedComponents, 0, len(jsonData.Extensions.AddedComponents))
|
||||
for _, comp := range jsonData.Extensions.AddedComponents {
|
||||
v0Comp := pluginsv0alpha1.MetaV0alpha1ExtensionsAddedComponents{
|
||||
Targets: comp.Targets,
|
||||
Title: comp.Title,
|
||||
}
|
||||
if comp.Description != "" {
|
||||
v0Comp.Description = &comp.Description
|
||||
}
|
||||
extensions.AddedComponents = append(extensions.AddedComponents, v0Comp)
|
||||
}
|
||||
}
|
||||
|
||||
if len(jsonData.Extensions.ExposedComponents) > 0 {
|
||||
extensions.ExposedComponents = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsExposedComponents, 0, len(jsonData.Extensions.ExposedComponents))
|
||||
for _, comp := range jsonData.Extensions.ExposedComponents {
|
||||
v0Comp := pluginsv0alpha1.MetaV0alpha1ExtensionsExposedComponents{
|
||||
Id: comp.Id,
|
||||
}
|
||||
if comp.Title != "" {
|
||||
v0Comp.Title = &comp.Title
|
||||
}
|
||||
if comp.Description != "" {
|
||||
v0Comp.Description = &comp.Description
|
||||
}
|
||||
extensions.ExposedComponents = append(extensions.ExposedComponents, v0Comp)
|
||||
}
|
||||
}
|
||||
|
||||
if len(jsonData.Extensions.ExtensionPoints) > 0 {
|
||||
extensions.ExtensionPoints = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsExtensionPoints, 0, len(jsonData.Extensions.ExtensionPoints))
|
||||
for _, point := range jsonData.Extensions.ExtensionPoints {
|
||||
v0Point := pluginsv0alpha1.MetaV0alpha1ExtensionsExtensionPoints{
|
||||
Id: point.Id,
|
||||
}
|
||||
if point.Title != "" {
|
||||
v0Point.Title = &point.Title
|
||||
}
|
||||
if point.Description != "" {
|
||||
v0Point.Description = &point.Description
|
||||
}
|
||||
extensions.ExtensionPoints = append(extensions.ExtensionPoints, v0Point)
|
||||
}
|
||||
}
|
||||
|
||||
meta.Extensions = extensions
|
||||
}
|
||||
|
||||
// Map Roles
|
||||
if len(jsonData.Roles) > 0 {
|
||||
meta.Roles = make([]pluginsv0alpha1.MetaRole, 0, len(jsonData.Roles))
|
||||
for _, role := range jsonData.Roles {
|
||||
v0Role := pluginsv0alpha1.MetaRole{
|
||||
Grants: role.Grants,
|
||||
}
|
||||
if role.Role.Name != "" || role.Role.Description != "" || len(role.Role.Permissions) > 0 {
|
||||
v0RoleRole := &pluginsv0alpha1.MetaV0alpha1RoleRole{}
|
||||
if role.Role.Name != "" {
|
||||
v0RoleRole.Name = &role.Role.Name
|
||||
}
|
||||
if role.Role.Description != "" {
|
||||
v0RoleRole.Description = &role.Role.Description
|
||||
}
|
||||
if len(role.Role.Permissions) > 0 {
|
||||
v0RoleRole.Permissions = make([]pluginsv0alpha1.MetaV0alpha1RoleRolePermissions, 0, len(role.Role.Permissions))
|
||||
for _, perm := range role.Role.Permissions {
|
||||
v0Perm := pluginsv0alpha1.MetaV0alpha1RoleRolePermissions{}
|
||||
if perm.Action != "" {
|
||||
v0Perm.Action = &perm.Action
|
||||
}
|
||||
if perm.Scope != "" {
|
||||
v0Perm.Scope = &perm.Scope
|
||||
}
|
||||
v0RoleRole.Permissions = append(v0RoleRole.Permissions, v0Perm)
|
||||
}
|
||||
}
|
||||
v0Role.Role = v0RoleRole
|
||||
}
|
||||
meta.Roles = append(meta.Roles, v0Role)
|
||||
}
|
||||
}
|
||||
|
||||
// Map IAM
|
||||
if jsonData.IAM != nil && len(jsonData.IAM.Permissions) > 0 {
|
||||
iam := &pluginsv0alpha1.MetaIAM{
|
||||
Permissions: make([]pluginsv0alpha1.MetaV0alpha1IAMPermissions, 0, len(jsonData.IAM.Permissions)),
|
||||
}
|
||||
for _, perm := range jsonData.IAM.Permissions {
|
||||
v0Perm := pluginsv0alpha1.MetaV0alpha1IAMPermissions{}
|
||||
if perm.Action != "" {
|
||||
v0Perm.Action = &perm.Action
|
||||
}
|
||||
if perm.Scope != "" {
|
||||
v0Perm.Scope = &perm.Scope
|
||||
}
|
||||
iam.Permissions = append(iam.Permissions, v0Perm)
|
||||
}
|
||||
meta.Iam = iam
|
||||
}
|
||||
|
||||
return meta
|
||||
}
|
||||
|
||||
// pluginStorePluginToMeta converts a pluginstore.Plugin to a pluginsv0alpha1.MetaSpec.
|
||||
// This is similar to pluginToPluginMetaSpec but works with the plugin store DTO.
|
||||
// loadingStrategy and moduleHash are optional calculated values that can be provided.
|
||||
func pluginStorePluginToMeta(plugin pluginstore.Plugin, loadingStrategy plugins.LoadingStrategy, moduleHash string) pluginsv0alpha1.MetaSpec {
|
||||
metaSpec := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: jsonDataToMetaJSONData(plugin.JSONData),
|
||||
}
|
||||
|
||||
if plugin.Module != "" {
|
||||
module := &pluginsv0alpha1.MetaV0alpha1SpecModule{
|
||||
Path: plugin.Module,
|
||||
}
|
||||
if moduleHash != "" {
|
||||
module.Hash = &moduleHash
|
||||
}
|
||||
if loadingStrategy != "" {
|
||||
var ls pluginsv0alpha1.MetaV0alpha1SpecModuleLoadingStrategy
|
||||
switch loadingStrategy {
|
||||
case plugins.LoadingStrategyFetch:
|
||||
ls = pluginsv0alpha1.MetaV0alpha1SpecModuleLoadingStrategyFetch
|
||||
case plugins.LoadingStrategyScript:
|
||||
ls = pluginsv0alpha1.MetaV0alpha1SpecModuleLoadingStrategyScript
|
||||
}
|
||||
module.LoadingStrategy = &ls
|
||||
}
|
||||
metaSpec.Module = module
|
||||
}
|
||||
|
||||
if plugin.BaseURL != "" {
|
||||
metaSpec.BaseURL = &plugin.BaseURL
|
||||
}
|
||||
|
||||
if plugin.Signature != "" {
|
||||
signature := &pluginsv0alpha1.MetaV0alpha1SpecSignature{
|
||||
Status: convertSignatureStatus(plugin.Signature),
|
||||
}
|
||||
|
||||
if plugin.SignatureType != "" {
|
||||
sigType := convertSignatureType(plugin.SignatureType)
|
||||
signature.Type = &sigType
|
||||
}
|
||||
|
||||
if plugin.SignatureOrg != "" {
|
||||
signature.Org = &plugin.SignatureOrg
|
||||
}
|
||||
|
||||
metaSpec.Signature = signature
|
||||
}
|
||||
|
||||
if len(plugin.Children) > 0 {
|
||||
metaSpec.Children = plugin.Children
|
||||
}
|
||||
|
||||
metaSpec.Angular = &pluginsv0alpha1.MetaV0alpha1SpecAngular{
|
||||
Detected: plugin.Angular.Detected,
|
||||
}
|
||||
|
||||
if len(plugin.Translations) > 0 {
|
||||
metaSpec.Translations = plugin.Translations
|
||||
}
|
||||
|
||||
return metaSpec
|
||||
}
|
||||
|
||||
// convertSignatureStatus converts plugins.SignatureStatus to pluginsv0alpha1.MetaV0alpha1SpecSignatureStatus.
|
||||
func convertSignatureStatus(status plugins.SignatureStatus) pluginsv0alpha1.MetaV0alpha1SpecSignatureStatus {
|
||||
switch status {
|
||||
case plugins.SignatureStatusInternal:
|
||||
return pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusInternal
|
||||
case plugins.SignatureStatusValid:
|
||||
return pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusValid
|
||||
case plugins.SignatureStatusInvalid:
|
||||
return pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusInvalid
|
||||
case plugins.SignatureStatusModified:
|
||||
return pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusModified
|
||||
case plugins.SignatureStatusUnsigned:
|
||||
return pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusUnsigned
|
||||
default:
|
||||
return pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusUnsigned
|
||||
}
|
||||
}
|
||||
|
||||
// convertSignatureType converts plugins.SignatureType to pluginsv0alpha1.MetaV0alpha1SpecSignatureType.
|
||||
func convertSignatureType(sigType plugins.SignatureType) pluginsv0alpha1.MetaV0alpha1SpecSignatureType {
|
||||
switch sigType {
|
||||
case plugins.SignatureTypeGrafana:
|
||||
return pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeGrafana
|
||||
case plugins.SignatureTypeCommercial:
|
||||
return pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeCommercial
|
||||
case plugins.SignatureTypeCommunity:
|
||||
return pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeCommunity
|
||||
case plugins.SignatureTypePrivate:
|
||||
return pluginsv0alpha1.MetaV0alpha1SpecSignatureTypePrivate
|
||||
case plugins.SignatureTypePrivateGlob:
|
||||
return pluginsv0alpha1.MetaV0alpha1SpecSignatureTypePrivateGlob
|
||||
default:
|
||||
return pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeGrafana
|
||||
}
|
||||
}
|
||||
|
||||
// pluginToMetaSpec converts a fully loaded *plugins.Plugin to a pluginsv0alpha1.MetaSpec.
|
||||
func pluginToMetaSpec(plugin *plugins.Plugin) pluginsv0alpha1.MetaSpec {
|
||||
metaSpec := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: jsonDataToMetaJSONData(plugin.JSONData),
|
||||
}
|
||||
|
||||
// Set module information
|
||||
if plugin.Module != "" {
|
||||
module := &pluginsv0alpha1.MetaV0alpha1SpecModule{
|
||||
Path: plugin.Module,
|
||||
}
|
||||
|
||||
loadingStrategy := pluginsv0alpha1.MetaV0alpha1SpecModuleLoadingStrategyScript
|
||||
module.LoadingStrategy = &loadingStrategy
|
||||
|
||||
metaSpec.Module = module
|
||||
}
|
||||
|
||||
// Set BaseURL
|
||||
if plugin.BaseURL != "" {
|
||||
metaSpec.BaseURL = &plugin.BaseURL
|
||||
}
|
||||
|
||||
// Set signature information
|
||||
signature := &pluginsv0alpha1.MetaV0alpha1SpecSignature{
|
||||
Status: convertSignatureStatus(plugin.Signature),
|
||||
}
|
||||
|
||||
if plugin.SignatureType != "" {
|
||||
sigType := convertSignatureType(plugin.SignatureType)
|
||||
signature.Type = &sigType
|
||||
}
|
||||
|
||||
if plugin.SignatureOrg != "" {
|
||||
signature.Org = &plugin.SignatureOrg
|
||||
}
|
||||
|
||||
metaSpec.Signature = signature
|
||||
|
||||
if len(plugin.Children) > 0 {
|
||||
children := make([]string, 0, len(plugin.Children))
|
||||
for _, child := range plugin.Children {
|
||||
children = append(children, child.ID)
|
||||
}
|
||||
metaSpec.Children = children
|
||||
}
|
||||
|
||||
metaSpec.Angular = &pluginsv0alpha1.MetaV0alpha1SpecAngular{
|
||||
Detected: plugin.Angular.Detected,
|
||||
}
|
||||
|
||||
if len(plugin.Translations) > 0 {
|
||||
metaSpec.Translations = plugin.Translations
|
||||
}
|
||||
|
||||
return metaSpec
|
||||
}
|
||||
|
||||
// grafanaComPluginVersionMeta represents the response from grafana.com API
|
||||
// GET /api/plugins/{pluginId}/versions/{version}
|
||||
type grafanaComPluginVersionMeta struct {
|
||||
PluginID string `json:"pluginSlug"`
|
||||
Version string `json:"version"`
|
||||
URL string `json:"url"`
|
||||
Commit string `json:"commit"`
|
||||
Description string `json:"description"`
|
||||
Keywords []string `json:"keywords"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
JSON pluginsv0alpha1.MetaJSONData `json:"json"`
|
||||
Readme string `json:"readme"`
|
||||
Downloads int `json:"downloads"`
|
||||
Verified bool `json:"verified"`
|
||||
Status string `json:"status"`
|
||||
StatusContext string `json:"statusContext"`
|
||||
DownloadSlug string `json:"downloadSlug"`
|
||||
SignatureType string `json:"signatureType"`
|
||||
SignedByOrg string `json:"signedByOrg"`
|
||||
SignedByOrgName string `json:"signedByOrgName"`
|
||||
Packages struct {
|
||||
Any struct {
|
||||
Md5 string `json:"md5"`
|
||||
Sha256 string `json:"sha256"`
|
||||
PackageName string `json:"packageName"`
|
||||
DownloadURL string `json:"downloadUrl"`
|
||||
} `json:"any"`
|
||||
} `json:"packages"`
|
||||
Links []struct {
|
||||
Rel string `json:"rel"`
|
||||
Href string `json:"href"`
|
||||
} `json:"links"`
|
||||
AngularDetected bool `json:"angularDetected"`
|
||||
Scopes []string `json:"scopes"`
|
||||
}
|
||||
|
||||
// grafanaComPluginVersionMetaToMetaSpec converts a grafanaComPluginVersionMeta to a pluginsv0alpha1.MetaSpec.
|
||||
func grafanaComPluginVersionMetaToMetaSpec(gcomMeta grafanaComPluginVersionMeta) pluginsv0alpha1.MetaSpec {
|
||||
metaSpec := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: gcomMeta.JSON,
|
||||
}
|
||||
|
||||
if gcomMeta.SignatureType != "" {
|
||||
signature := &pluginsv0alpha1.MetaV0alpha1SpecSignature{
|
||||
Status: pluginsv0alpha1.MetaV0alpha1SpecSignatureStatusValid,
|
||||
}
|
||||
|
||||
switch gcomMeta.SignatureType {
|
||||
case "grafana":
|
||||
sigType := pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeGrafana
|
||||
signature.Type = &sigType
|
||||
case "commercial":
|
||||
sigType := pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeCommercial
|
||||
signature.Type = &sigType
|
||||
case "community":
|
||||
sigType := pluginsv0alpha1.MetaV0alpha1SpecSignatureTypeCommunity
|
||||
signature.Type = &sigType
|
||||
case "private":
|
||||
sigType := pluginsv0alpha1.MetaV0alpha1SpecSignatureTypePrivate
|
||||
signature.Type = &sigType
|
||||
case "private-glob":
|
||||
sigType := pluginsv0alpha1.MetaV0alpha1SpecSignatureTypePrivateGlob
|
||||
signature.Type = &sigType
|
||||
}
|
||||
|
||||
if gcomMeta.SignedByOrg != "" {
|
||||
signature.Org = &gcomMeta.SignedByOrg
|
||||
}
|
||||
|
||||
metaSpec.Signature = signature
|
||||
}
|
||||
|
||||
// Set angular info
|
||||
metaSpec.Angular = &pluginsv0alpha1.MetaV0alpha1SpecAngular{
|
||||
Detected: gcomMeta.AngularDetected,
|
||||
}
|
||||
|
||||
return metaSpec
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package meta
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -13,7 +12,15 @@ import (
|
||||
|
||||
pluginsv0alpha1 "github.com/grafana/grafana/apps/plugins/pkg/apis/plugins/v0alpha1"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/plugins/config"
|
||||
pluginsLoader "github.com/grafana/grafana/pkg/plugins/manager/loader"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/pipeline/bootstrap"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/pipeline/discovery"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/pipeline/initialization"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/pipeline/termination"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/pipeline/validation"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/sources"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginerrs"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,9 +30,10 @@ const (
|
||||
// CoreProvider retrieves plugin metadata for core plugins.
|
||||
type CoreProvider struct {
|
||||
mu sync.RWMutex
|
||||
loadedPlugins map[string]pluginsv0alpha1.MetaJSONData
|
||||
loadedPlugins map[string]pluginsv0alpha1.MetaSpec
|
||||
initialized bool
|
||||
ttl time.Duration
|
||||
loader pluginsLoader.Service
|
||||
}
|
||||
|
||||
// NewCoreProvider creates a new CoreProvider for core plugins.
|
||||
@@ -35,9 +43,13 @@ func NewCoreProvider() *CoreProvider {
|
||||
|
||||
// NewCoreProviderWithTTL creates a new CoreProvider with a custom TTL.
|
||||
func NewCoreProviderWithTTL(ttl time.Duration) *CoreProvider {
|
||||
cfg := &config.PluginManagementCfg{
|
||||
Features: config.Features{},
|
||||
}
|
||||
return &CoreProvider{
|
||||
loadedPlugins: make(map[string]pluginsv0alpha1.MetaJSONData),
|
||||
loadedPlugins: make(map[string]pluginsv0alpha1.MetaSpec),
|
||||
ttl: ttl,
|
||||
loader: createLoader(cfg),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,9 +88,9 @@ func (p *CoreProvider) GetMeta(ctx context.Context, pluginID, _ string) (*Result
|
||||
p.initialized = true
|
||||
}
|
||||
|
||||
if meta, found := p.loadedPlugins[pluginID]; found {
|
||||
if spec, found := p.loadedPlugins[pluginID]; found {
|
||||
return &Result{
|
||||
Meta: meta,
|
||||
Meta: spec,
|
||||
TTL: p.ttl,
|
||||
}, nil
|
||||
}
|
||||
@@ -86,8 +98,8 @@ func (p *CoreProvider) GetMeta(ctx context.Context, pluginID, _ string) (*Result
|
||||
return nil, ErrMetaNotFound
|
||||
}
|
||||
|
||||
// loadPlugins discovers and caches all core plugins.
|
||||
// Returns an error if the static root path cannot be found or if plugin discovery fails.
|
||||
// loadPlugins discovers and caches all core plugins by fully loading them.
|
||||
// Returns an error if the static root path cannot be found or if plugin loading fails.
|
||||
// This error will be handled gracefully by GetMeta, which will return ErrMetaNotFound
|
||||
// to allow other providers to handle the request.
|
||||
func (p *CoreProvider) loadPlugins(ctx context.Context) error {
|
||||
@@ -108,496 +120,51 @@ func (p *CoreProvider) loadPlugins(ctx context.Context) error {
|
||||
panelPath := filepath.Join(staticRootPath, "app", "plugins", "panel")
|
||||
|
||||
src := sources.NewLocalSource(plugins.ClassCore, []string{datasourcePath, panelPath})
|
||||
ps, err := src.Discover(ctx)
|
||||
loadedPlugins, err := p.loader.Load(ctx, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(ps) == 0 {
|
||||
logging.DefaultLogger.Warn("CoreProvider: no core plugins found during discovery")
|
||||
if len(loadedPlugins) == 0 {
|
||||
logging.DefaultLogger.Warn("CoreProvider: no core plugins found during loading")
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, bundle := range ps {
|
||||
meta := jsonDataToMetaJSONData(bundle.Primary.JSONData)
|
||||
p.loadedPlugins[bundle.Primary.JSONData.ID] = meta
|
||||
for _, plugin := range loadedPlugins {
|
||||
metaSpec := pluginToMetaSpec(plugin)
|
||||
p.loadedPlugins[plugin.ID] = metaSpec
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// jsonDataToMetaJSONData converts a plugins.JSONData to a pluginsv0alpha1.MetaJSONData.
|
||||
// nolint:gocyclo
|
||||
func jsonDataToMetaJSONData(jsonData plugins.JSONData) pluginsv0alpha1.MetaJSONData {
|
||||
meta := pluginsv0alpha1.MetaJSONData{
|
||||
Id: jsonData.ID,
|
||||
Name: jsonData.Name,
|
||||
}
|
||||
|
||||
// Map plugin type
|
||||
switch jsonData.Type {
|
||||
case plugins.TypeApp:
|
||||
meta.Type = pluginsv0alpha1.MetaJSONDataTypeApp
|
||||
case plugins.TypeDataSource:
|
||||
meta.Type = pluginsv0alpha1.MetaJSONDataTypeDatasource
|
||||
case plugins.TypePanel:
|
||||
meta.Type = pluginsv0alpha1.MetaJSONDataTypePanel
|
||||
case plugins.TypeRenderer:
|
||||
meta.Type = pluginsv0alpha1.MetaJSONDataTypeRenderer
|
||||
}
|
||||
|
||||
// Map Info
|
||||
meta.Info = pluginsv0alpha1.MetaInfo{
|
||||
Keywords: jsonData.Info.Keywords,
|
||||
Logos: pluginsv0alpha1.MetaV0alpha1InfoLogos{
|
||||
Small: jsonData.Info.Logos.Small,
|
||||
Large: jsonData.Info.Logos.Large,
|
||||
// createLoader creates a loader service configured for core plugins.
|
||||
func createLoader(cfg *config.PluginManagementCfg) pluginsLoader.Service {
|
||||
d := discovery.New(cfg, discovery.Opts{
|
||||
FilterFuncs: []discovery.FilterFunc{
|
||||
// Allow all plugin types for core plugins
|
||||
},
|
||||
Updated: jsonData.Info.Updated,
|
||||
Version: jsonData.Info.Version,
|
||||
}
|
||||
})
|
||||
b := bootstrap.New(cfg, bootstrap.Opts{
|
||||
DecorateFuncs: []bootstrap.DecorateFunc{}, // no decoration required for metadata
|
||||
})
|
||||
v := validation.New(cfg, validation.Opts{
|
||||
ValidateFuncs: []validation.ValidateFunc{
|
||||
// Skip validation for core plugins - they're trusted
|
||||
},
|
||||
})
|
||||
i := initialization.New(cfg, initialization.Opts{
|
||||
InitializeFuncs: []initialization.InitializeFunc{
|
||||
// Skip initialization - we only need metadata, not running plugins
|
||||
},
|
||||
})
|
||||
t, _ := termination.New(cfg, termination.Opts{
|
||||
TerminateFuncs: []termination.TerminateFunc{
|
||||
// No termination needed for metadata-only loading
|
||||
},
|
||||
})
|
||||
|
||||
if jsonData.Info.Description != "" {
|
||||
meta.Info.Description = &jsonData.Info.Description
|
||||
}
|
||||
et := pluginerrs.ProvideErrorTracker()
|
||||
|
||||
if jsonData.Info.Author.Name != "" || jsonData.Info.Author.URL != "" {
|
||||
author := &pluginsv0alpha1.MetaV0alpha1InfoAuthor{}
|
||||
if jsonData.Info.Author.Name != "" {
|
||||
author.Name = &jsonData.Info.Author.Name
|
||||
}
|
||||
if jsonData.Info.Author.URL != "" {
|
||||
author.Url = &jsonData.Info.Author.URL
|
||||
}
|
||||
meta.Info.Author = author
|
||||
}
|
||||
|
||||
if len(jsonData.Info.Links) > 0 {
|
||||
meta.Info.Links = make([]pluginsv0alpha1.MetaV0alpha1InfoLinks, 0, len(jsonData.Info.Links))
|
||||
for _, link := range jsonData.Info.Links {
|
||||
v0Link := pluginsv0alpha1.MetaV0alpha1InfoLinks{}
|
||||
if link.Name != "" {
|
||||
v0Link.Name = &link.Name
|
||||
}
|
||||
if link.URL != "" {
|
||||
v0Link.Url = &link.URL
|
||||
}
|
||||
meta.Info.Links = append(meta.Info.Links, v0Link)
|
||||
}
|
||||
}
|
||||
|
||||
if len(jsonData.Info.Screenshots) > 0 {
|
||||
meta.Info.Screenshots = make([]pluginsv0alpha1.MetaV0alpha1InfoScreenshots, 0, len(jsonData.Info.Screenshots))
|
||||
for _, screenshot := range jsonData.Info.Screenshots {
|
||||
v0Screenshot := pluginsv0alpha1.MetaV0alpha1InfoScreenshots{}
|
||||
if screenshot.Name != "" {
|
||||
v0Screenshot.Name = &screenshot.Name
|
||||
}
|
||||
if screenshot.Path != "" {
|
||||
v0Screenshot.Path = &screenshot.Path
|
||||
}
|
||||
meta.Info.Screenshots = append(meta.Info.Screenshots, v0Screenshot)
|
||||
}
|
||||
}
|
||||
|
||||
// Map Dependencies
|
||||
meta.Dependencies = pluginsv0alpha1.MetaDependencies{
|
||||
GrafanaDependency: jsonData.Dependencies.GrafanaDependency,
|
||||
}
|
||||
|
||||
if jsonData.Dependencies.GrafanaVersion != "" {
|
||||
meta.Dependencies.GrafanaVersion = &jsonData.Dependencies.GrafanaVersion
|
||||
}
|
||||
|
||||
if len(jsonData.Dependencies.Plugins) > 0 {
|
||||
meta.Dependencies.Plugins = make([]pluginsv0alpha1.MetaV0alpha1DependenciesPlugins, 0, len(jsonData.Dependencies.Plugins))
|
||||
for _, dep := range jsonData.Dependencies.Plugins {
|
||||
var depType pluginsv0alpha1.MetaV0alpha1DependenciesPluginsType
|
||||
switch dep.Type {
|
||||
case "app":
|
||||
depType = pluginsv0alpha1.MetaV0alpha1DependenciesPluginsTypeApp
|
||||
case "datasource":
|
||||
depType = pluginsv0alpha1.MetaV0alpha1DependenciesPluginsTypeDatasource
|
||||
case "panel":
|
||||
depType = pluginsv0alpha1.MetaV0alpha1DependenciesPluginsTypePanel
|
||||
}
|
||||
meta.Dependencies.Plugins = append(meta.Dependencies.Plugins, pluginsv0alpha1.MetaV0alpha1DependenciesPlugins{
|
||||
Id: dep.ID,
|
||||
Type: depType,
|
||||
Name: dep.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(jsonData.Dependencies.Extensions.ExposedComponents) > 0 {
|
||||
meta.Dependencies.Extensions = &pluginsv0alpha1.MetaV0alpha1DependenciesExtensions{
|
||||
ExposedComponents: jsonData.Dependencies.Extensions.ExposedComponents,
|
||||
}
|
||||
}
|
||||
|
||||
// Map optional boolean fields
|
||||
if jsonData.Alerting {
|
||||
meta.Alerting = &jsonData.Alerting
|
||||
}
|
||||
if jsonData.Annotations {
|
||||
meta.Annotations = &jsonData.Annotations
|
||||
}
|
||||
if jsonData.AutoEnabled {
|
||||
meta.AutoEnabled = &jsonData.AutoEnabled
|
||||
}
|
||||
if jsonData.Backend {
|
||||
meta.Backend = &jsonData.Backend
|
||||
}
|
||||
if jsonData.BuiltIn {
|
||||
meta.BuiltIn = &jsonData.BuiltIn
|
||||
}
|
||||
if jsonData.HideFromList {
|
||||
meta.HideFromList = &jsonData.HideFromList
|
||||
}
|
||||
if jsonData.Logs {
|
||||
meta.Logs = &jsonData.Logs
|
||||
}
|
||||
if jsonData.Metrics {
|
||||
meta.Metrics = &jsonData.Metrics
|
||||
}
|
||||
if jsonData.MultiValueFilterOperators {
|
||||
meta.MultiValueFilterOperators = &jsonData.MultiValueFilterOperators
|
||||
}
|
||||
if jsonData.Preload {
|
||||
meta.Preload = &jsonData.Preload
|
||||
}
|
||||
if jsonData.SkipDataQuery {
|
||||
meta.SkipDataQuery = &jsonData.SkipDataQuery
|
||||
}
|
||||
if jsonData.Streaming {
|
||||
meta.Streaming = &jsonData.Streaming
|
||||
}
|
||||
if jsonData.Tracing {
|
||||
meta.Tracing = &jsonData.Tracing
|
||||
}
|
||||
|
||||
// Map category
|
||||
if jsonData.Category != "" {
|
||||
var category pluginsv0alpha1.MetaJSONDataCategory
|
||||
switch jsonData.Category {
|
||||
case "tsdb":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryTsdb
|
||||
case "logging":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryLogging
|
||||
case "cloud":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryCloud
|
||||
case "tracing":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryTracing
|
||||
case "profiling":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryProfiling
|
||||
case "sql":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategorySql
|
||||
case "enterprise":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryEnterprise
|
||||
case "iot":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryIot
|
||||
case "other":
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryOther
|
||||
default:
|
||||
category = pluginsv0alpha1.MetaJSONDataCategoryOther
|
||||
}
|
||||
meta.Category = &category
|
||||
}
|
||||
|
||||
// Map state
|
||||
if jsonData.State != "" {
|
||||
var state pluginsv0alpha1.MetaJSONDataState
|
||||
switch jsonData.State {
|
||||
case plugins.ReleaseStateAlpha:
|
||||
state = pluginsv0alpha1.MetaJSONDataStateAlpha
|
||||
case plugins.ReleaseStateBeta:
|
||||
state = pluginsv0alpha1.MetaJSONDataStateBeta
|
||||
default:
|
||||
}
|
||||
if state != "" {
|
||||
meta.State = &state
|
||||
}
|
||||
}
|
||||
|
||||
// Map executable
|
||||
if jsonData.Executable != "" {
|
||||
meta.Executable = &jsonData.Executable
|
||||
}
|
||||
|
||||
// Map QueryOptions
|
||||
if len(jsonData.QueryOptions) > 0 {
|
||||
queryOptions := &pluginsv0alpha1.MetaQueryOptions{}
|
||||
if val, ok := jsonData.QueryOptions["maxDataPoints"]; ok {
|
||||
queryOptions.MaxDataPoints = &val
|
||||
}
|
||||
if val, ok := jsonData.QueryOptions["minInterval"]; ok {
|
||||
queryOptions.MinInterval = &val
|
||||
}
|
||||
if val, ok := jsonData.QueryOptions["cacheTimeout"]; ok {
|
||||
queryOptions.CacheTimeout = &val
|
||||
}
|
||||
meta.QueryOptions = queryOptions
|
||||
}
|
||||
|
||||
// Map Includes
|
||||
if len(jsonData.Includes) > 0 {
|
||||
meta.Includes = make([]pluginsv0alpha1.MetaInclude, 0, len(jsonData.Includes))
|
||||
for _, include := range jsonData.Includes {
|
||||
v0Include := pluginsv0alpha1.MetaInclude{}
|
||||
if include.UID != "" {
|
||||
v0Include.Uid = &include.UID
|
||||
}
|
||||
if include.Type != "" {
|
||||
var includeType pluginsv0alpha1.MetaIncludeType
|
||||
switch include.Type {
|
||||
case "dashboard":
|
||||
includeType = pluginsv0alpha1.MetaIncludeTypeDashboard
|
||||
case "page":
|
||||
includeType = pluginsv0alpha1.MetaIncludeTypePage
|
||||
case "panel":
|
||||
includeType = pluginsv0alpha1.MetaIncludeTypePanel
|
||||
case "datasource":
|
||||
includeType = pluginsv0alpha1.MetaIncludeTypeDatasource
|
||||
}
|
||||
v0Include.Type = &includeType
|
||||
}
|
||||
if include.Name != "" {
|
||||
v0Include.Name = &include.Name
|
||||
}
|
||||
if include.Component != "" {
|
||||
v0Include.Component = &include.Component
|
||||
}
|
||||
if include.Role != "" {
|
||||
var role pluginsv0alpha1.MetaIncludeRole
|
||||
switch include.Role {
|
||||
case "Admin":
|
||||
role = pluginsv0alpha1.MetaIncludeRoleAdmin
|
||||
case "Editor":
|
||||
role = pluginsv0alpha1.MetaIncludeRoleEditor
|
||||
case "Viewer":
|
||||
role = pluginsv0alpha1.MetaIncludeRoleViewer
|
||||
}
|
||||
v0Include.Role = &role
|
||||
}
|
||||
if include.Action != "" {
|
||||
v0Include.Action = &include.Action
|
||||
}
|
||||
if include.Path != "" {
|
||||
v0Include.Path = &include.Path
|
||||
}
|
||||
if include.AddToNav {
|
||||
v0Include.AddToNav = &include.AddToNav
|
||||
}
|
||||
if include.DefaultNav {
|
||||
v0Include.DefaultNav = &include.DefaultNav
|
||||
}
|
||||
if include.Icon != "" {
|
||||
v0Include.Icon = &include.Icon
|
||||
}
|
||||
meta.Includes = append(meta.Includes, v0Include)
|
||||
}
|
||||
}
|
||||
|
||||
// Map Routes
|
||||
if len(jsonData.Routes) > 0 {
|
||||
meta.Routes = make([]pluginsv0alpha1.MetaRoute, 0, len(jsonData.Routes))
|
||||
for _, route := range jsonData.Routes {
|
||||
v0Route := pluginsv0alpha1.MetaRoute{}
|
||||
if route.Path != "" {
|
||||
v0Route.Path = &route.Path
|
||||
}
|
||||
if route.Method != "" {
|
||||
v0Route.Method = &route.Method
|
||||
}
|
||||
if route.URL != "" {
|
||||
v0Route.Url = &route.URL
|
||||
}
|
||||
if route.ReqRole != "" {
|
||||
reqRole := string(route.ReqRole)
|
||||
v0Route.ReqRole = &reqRole
|
||||
}
|
||||
if route.ReqAction != "" {
|
||||
v0Route.ReqAction = &route.ReqAction
|
||||
}
|
||||
if len(route.Headers) > 0 {
|
||||
headers := make([]string, 0, len(route.Headers))
|
||||
for _, header := range route.Headers {
|
||||
headers = append(headers, header.Name+": "+header.Content)
|
||||
}
|
||||
v0Route.Headers = headers
|
||||
}
|
||||
if len(route.URLParams) > 0 {
|
||||
v0Route.UrlParams = make([]pluginsv0alpha1.MetaV0alpha1RouteUrlParams, 0, len(route.URLParams))
|
||||
for _, param := range route.URLParams {
|
||||
v0Param := pluginsv0alpha1.MetaV0alpha1RouteUrlParams{}
|
||||
if param.Name != "" {
|
||||
v0Param.Name = ¶m.Name
|
||||
}
|
||||
if param.Content != "" {
|
||||
v0Param.Content = ¶m.Content
|
||||
}
|
||||
v0Route.UrlParams = append(v0Route.UrlParams, v0Param)
|
||||
}
|
||||
}
|
||||
if route.TokenAuth != nil {
|
||||
v0Route.TokenAuth = &pluginsv0alpha1.MetaV0alpha1RouteTokenAuth{}
|
||||
if route.TokenAuth.Url != "" {
|
||||
v0Route.TokenAuth.Url = &route.TokenAuth.Url
|
||||
}
|
||||
if len(route.TokenAuth.Scopes) > 0 {
|
||||
v0Route.TokenAuth.Scopes = route.TokenAuth.Scopes
|
||||
}
|
||||
if len(route.TokenAuth.Params) > 0 {
|
||||
v0Route.TokenAuth.Params = make(map[string]interface{})
|
||||
for k, v := range route.TokenAuth.Params {
|
||||
v0Route.TokenAuth.Params[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
if route.JwtTokenAuth != nil {
|
||||
v0Route.JwtTokenAuth = &pluginsv0alpha1.MetaV0alpha1RouteJwtTokenAuth{}
|
||||
if route.JwtTokenAuth.Url != "" {
|
||||
v0Route.JwtTokenAuth.Url = &route.JwtTokenAuth.Url
|
||||
}
|
||||
if len(route.JwtTokenAuth.Scopes) > 0 {
|
||||
v0Route.JwtTokenAuth.Scopes = route.JwtTokenAuth.Scopes
|
||||
}
|
||||
if len(route.JwtTokenAuth.Params) > 0 {
|
||||
v0Route.JwtTokenAuth.Params = make(map[string]interface{})
|
||||
for k, v := range route.JwtTokenAuth.Params {
|
||||
v0Route.JwtTokenAuth.Params[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(route.Body) > 0 {
|
||||
var bodyMap map[string]interface{}
|
||||
if err := json.Unmarshal(route.Body, &bodyMap); err == nil {
|
||||
v0Route.Body = bodyMap
|
||||
}
|
||||
}
|
||||
meta.Routes = append(meta.Routes, v0Route)
|
||||
}
|
||||
}
|
||||
|
||||
// Map Extensions
|
||||
if len(jsonData.Extensions.AddedLinks) > 0 || len(jsonData.Extensions.AddedComponents) > 0 ||
|
||||
len(jsonData.Extensions.ExposedComponents) > 0 || len(jsonData.Extensions.ExtensionPoints) > 0 {
|
||||
extensions := &pluginsv0alpha1.MetaExtensions{}
|
||||
|
||||
if len(jsonData.Extensions.AddedLinks) > 0 {
|
||||
extensions.AddedLinks = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsAddedLinks, 0, len(jsonData.Extensions.AddedLinks))
|
||||
for _, link := range jsonData.Extensions.AddedLinks {
|
||||
v0Link := pluginsv0alpha1.MetaV0alpha1ExtensionsAddedLinks{
|
||||
Targets: link.Targets,
|
||||
Title: link.Title,
|
||||
}
|
||||
if link.Description != "" {
|
||||
v0Link.Description = &link.Description
|
||||
}
|
||||
extensions.AddedLinks = append(extensions.AddedLinks, v0Link)
|
||||
}
|
||||
}
|
||||
|
||||
if len(jsonData.Extensions.AddedComponents) > 0 {
|
||||
extensions.AddedComponents = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsAddedComponents, 0, len(jsonData.Extensions.AddedComponents))
|
||||
for _, comp := range jsonData.Extensions.AddedComponents {
|
||||
v0Comp := pluginsv0alpha1.MetaV0alpha1ExtensionsAddedComponents{
|
||||
Targets: comp.Targets,
|
||||
Title: comp.Title,
|
||||
}
|
||||
if comp.Description != "" {
|
||||
v0Comp.Description = &comp.Description
|
||||
}
|
||||
extensions.AddedComponents = append(extensions.AddedComponents, v0Comp)
|
||||
}
|
||||
}
|
||||
|
||||
if len(jsonData.Extensions.ExposedComponents) > 0 {
|
||||
extensions.ExposedComponents = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsExposedComponents, 0, len(jsonData.Extensions.ExposedComponents))
|
||||
for _, comp := range jsonData.Extensions.ExposedComponents {
|
||||
v0Comp := pluginsv0alpha1.MetaV0alpha1ExtensionsExposedComponents{
|
||||
Id: comp.Id,
|
||||
}
|
||||
if comp.Title != "" {
|
||||
v0Comp.Title = &comp.Title
|
||||
}
|
||||
if comp.Description != "" {
|
||||
v0Comp.Description = &comp.Description
|
||||
}
|
||||
extensions.ExposedComponents = append(extensions.ExposedComponents, v0Comp)
|
||||
}
|
||||
}
|
||||
|
||||
if len(jsonData.Extensions.ExtensionPoints) > 0 {
|
||||
extensions.ExtensionPoints = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsExtensionPoints, 0, len(jsonData.Extensions.ExtensionPoints))
|
||||
for _, point := range jsonData.Extensions.ExtensionPoints {
|
||||
v0Point := pluginsv0alpha1.MetaV0alpha1ExtensionsExtensionPoints{
|
||||
Id: point.Id,
|
||||
}
|
||||
if point.Title != "" {
|
||||
v0Point.Title = &point.Title
|
||||
}
|
||||
if point.Description != "" {
|
||||
v0Point.Description = &point.Description
|
||||
}
|
||||
extensions.ExtensionPoints = append(extensions.ExtensionPoints, v0Point)
|
||||
}
|
||||
}
|
||||
|
||||
meta.Extensions = extensions
|
||||
}
|
||||
|
||||
// Map Roles
|
||||
if len(jsonData.Roles) > 0 {
|
||||
meta.Roles = make([]pluginsv0alpha1.MetaRole, 0, len(jsonData.Roles))
|
||||
for _, role := range jsonData.Roles {
|
||||
v0Role := pluginsv0alpha1.MetaRole{
|
||||
Grants: role.Grants,
|
||||
}
|
||||
if role.Role.Name != "" || role.Role.Description != "" || len(role.Role.Permissions) > 0 {
|
||||
v0RoleRole := &pluginsv0alpha1.MetaV0alpha1RoleRole{}
|
||||
if role.Role.Name != "" {
|
||||
v0RoleRole.Name = &role.Role.Name
|
||||
}
|
||||
if role.Role.Description != "" {
|
||||
v0RoleRole.Description = &role.Role.Description
|
||||
}
|
||||
if len(role.Role.Permissions) > 0 {
|
||||
v0RoleRole.Permissions = make([]pluginsv0alpha1.MetaV0alpha1RoleRolePermissions, 0, len(role.Role.Permissions))
|
||||
for _, perm := range role.Role.Permissions {
|
||||
v0Perm := pluginsv0alpha1.MetaV0alpha1RoleRolePermissions{}
|
||||
if perm.Action != "" {
|
||||
v0Perm.Action = &perm.Action
|
||||
}
|
||||
if perm.Scope != "" {
|
||||
v0Perm.Scope = &perm.Scope
|
||||
}
|
||||
v0RoleRole.Permissions = append(v0RoleRole.Permissions, v0Perm)
|
||||
}
|
||||
}
|
||||
v0Role.Role = v0RoleRole
|
||||
}
|
||||
meta.Roles = append(meta.Roles, v0Role)
|
||||
}
|
||||
}
|
||||
|
||||
// Map IAM
|
||||
if jsonData.IAM != nil && len(jsonData.IAM.Permissions) > 0 {
|
||||
iam := &pluginsv0alpha1.MetaIAM{
|
||||
Permissions: make([]pluginsv0alpha1.MetaV0alpha1IAMPermissions, 0, len(jsonData.IAM.Permissions)),
|
||||
}
|
||||
for _, perm := range jsonData.IAM.Permissions {
|
||||
v0Perm := pluginsv0alpha1.MetaV0alpha1IAMPermissions{}
|
||||
if perm.Action != "" {
|
||||
v0Perm.Action = &perm.Action
|
||||
}
|
||||
if perm.Scope != "" {
|
||||
v0Perm.Scope = &perm.Scope
|
||||
}
|
||||
iam.Permissions = append(iam.Permissions, v0Perm)
|
||||
}
|
||||
meta.Iam = iam
|
||||
}
|
||||
|
||||
return meta
|
||||
return pluginsLoader.New(cfg, d, b, v, i, t, et)
|
||||
}
|
||||
|
||||
@@ -22,10 +22,12 @@ func TestCoreProvider_GetMeta(t *testing.T) {
|
||||
t.Run("returns cached plugin when available", func(t *testing.T) {
|
||||
provider := NewCoreProvider()
|
||||
|
||||
expectedMeta := pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
expectedMeta := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
},
|
||||
}
|
||||
|
||||
provider.mu.Lock()
|
||||
@@ -58,10 +60,12 @@ func TestCoreProvider_GetMeta(t *testing.T) {
|
||||
t.Run("ignores version parameter", func(t *testing.T) {
|
||||
provider := NewCoreProvider()
|
||||
|
||||
expectedMeta := pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
expectedMeta := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
},
|
||||
}
|
||||
|
||||
provider.mu.Lock()
|
||||
@@ -81,10 +85,12 @@ func TestCoreProvider_GetMeta(t *testing.T) {
|
||||
customTTL := 2 * time.Hour
|
||||
provider := NewCoreProviderWithTTL(customTTL)
|
||||
|
||||
expectedMeta := pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
expectedMeta := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
},
|
||||
}
|
||||
|
||||
provider.mu.Lock()
|
||||
@@ -226,8 +232,8 @@ func TestCoreProvider_loadPlugins(t *testing.T) {
|
||||
if loaded {
|
||||
result, err := provider.GetMeta(ctx, "test-datasource", "1.0.0")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "test-datasource", result.Meta.Id)
|
||||
assert.Equal(t, "Test Datasource", result.Meta.Name)
|
||||
assert.Equal(t, "test-datasource", result.Meta.PluginJson.Id)
|
||||
assert.Equal(t, "Test Datasource", result.Meta.PluginJson.Name)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
53
apps/plugins/pkg/app/meta/local.go
Normal file
53
apps/plugins/pkg/app/meta/local.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package meta
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLocalTTL = 1 * time.Hour
|
||||
)
|
||||
|
||||
// PluginAssetsCalculator is an interface for calculating plugin asset information.
|
||||
// LocalProvider requires this to calculate loading strategy and module hash.
|
||||
type PluginAssetsCalculator interface {
|
||||
LoadingStrategy(ctx context.Context, p pluginstore.Plugin) plugins.LoadingStrategy
|
||||
ModuleHash(ctx context.Context, p pluginstore.Plugin) string
|
||||
}
|
||||
|
||||
// LocalProvider retrieves plugin metadata for locally installed plugins.
|
||||
// It uses the plugin store to access plugins that have already been loaded.
|
||||
type LocalProvider struct {
|
||||
store pluginstore.Store
|
||||
pluginAssets PluginAssetsCalculator
|
||||
}
|
||||
|
||||
// NewLocalProvider creates a new LocalProvider for locally installed plugins.
|
||||
// pluginAssets is required for calculating loading strategy and module hash.
|
||||
func NewLocalProvider(pluginStore pluginstore.Store, pluginAssets PluginAssetsCalculator) *LocalProvider {
|
||||
return &LocalProvider{
|
||||
store: pluginStore,
|
||||
pluginAssets: pluginAssets,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMeta retrieves plugin metadata for locally installed plugins.
|
||||
func (p *LocalProvider) GetMeta(ctx context.Context, pluginID, version string) (*Result, error) {
|
||||
plugin, exists := p.store.Plugin(ctx, pluginID)
|
||||
if !exists {
|
||||
return nil, ErrMetaNotFound
|
||||
}
|
||||
|
||||
loadingStrategy := p.pluginAssets.LoadingStrategy(ctx, plugin)
|
||||
moduleHash := p.pluginAssets.ModuleHash(ctx, plugin)
|
||||
|
||||
spec := pluginStorePluginToMeta(plugin, loadingStrategy, moduleHash)
|
||||
return &Result{
|
||||
Meta: spec,
|
||||
TTL: defaultLocalTTL,
|
||||
}, nil
|
||||
}
|
||||
@@ -16,7 +16,7 @@ const (
|
||||
|
||||
// cachedMeta represents a cached metadata entry with expiration time
|
||||
type cachedMeta struct {
|
||||
meta pluginsv0alpha1.MetaJSONData
|
||||
meta pluginsv0alpha1.MetaSpec
|
||||
ttl time.Duration
|
||||
expiresAt time.Time
|
||||
}
|
||||
@@ -84,7 +84,7 @@ func (pm *ProviderManager) GetMeta(ctx context.Context, pluginID, version string
|
||||
if err == nil {
|
||||
// Don't cache results with a zero TTL
|
||||
if result.TTL == 0 {
|
||||
continue
|
||||
return result, nil
|
||||
}
|
||||
|
||||
pm.cacheMu.Lock()
|
||||
|
||||
@@ -35,10 +35,12 @@ func TestProviderManager_GetMeta(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("returns cached result when available and not expired", func(t *testing.T) {
|
||||
cachedMeta := pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
cachedMeta := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
},
|
||||
}
|
||||
|
||||
provider := &mockProvider{
|
||||
@@ -60,8 +62,10 @@ func TestProviderManager_GetMeta(t *testing.T) {
|
||||
|
||||
provider.getMetaFunc = func(ctx context.Context, pluginID, version string) (*Result, error) {
|
||||
return &Result{
|
||||
Meta: pluginsv0alpha1.MetaJSONData{Id: "different"},
|
||||
TTL: time.Hour,
|
||||
Meta: pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: pluginsv0alpha1.MetaJSONData{Id: "different"},
|
||||
},
|
||||
TTL: time.Hour,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -73,10 +77,12 @@ func TestProviderManager_GetMeta(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("fetches from provider when not cached", func(t *testing.T) {
|
||||
expectedMeta := pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
expectedMeta := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
},
|
||||
}
|
||||
expectedTTL := 2 * time.Hour
|
||||
|
||||
@@ -107,19 +113,16 @@ func TestProviderManager_GetMeta(t *testing.T) {
|
||||
assert.Equal(t, expectedTTL, cached.ttl)
|
||||
})
|
||||
|
||||
t.Run("does not cache result with zero TTL and tries next provider", func(t *testing.T) {
|
||||
zeroTTLMeta := pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Zero TTL Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
}
|
||||
expectedMeta := pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
t.Run("does not cache result with zero TTL", func(t *testing.T) {
|
||||
zeroTTLMeta := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Zero TTL Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
},
|
||||
}
|
||||
|
||||
provider1 := &mockProvider{
|
||||
provider := &mockProvider{
|
||||
getMetaFunc: func(ctx context.Context, pluginID, version string) (*Result, error) {
|
||||
return &Result{
|
||||
Meta: zeroTTLMeta,
|
||||
@@ -127,37 +130,30 @@ func TestProviderManager_GetMeta(t *testing.T) {
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
provider2 := &mockProvider{
|
||||
getMetaFunc: func(ctx context.Context, pluginID, version string) (*Result, error) {
|
||||
return &Result{
|
||||
Meta: expectedMeta,
|
||||
TTL: time.Hour,
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
|
||||
pm := NewProviderManager(provider1, provider2)
|
||||
pm := NewProviderManager(provider)
|
||||
|
||||
result, err := pm.GetMeta(ctx, "test-plugin", "1.0.0")
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
assert.Equal(t, expectedMeta, result.Meta)
|
||||
assert.Equal(t, zeroTTLMeta, result.Meta)
|
||||
assert.Equal(t, time.Duration(0), result.TTL)
|
||||
|
||||
pm.cacheMu.RLock()
|
||||
cached, exists := pm.cache["test-plugin:1.0.0"]
|
||||
_, exists := pm.cache["test-plugin:1.0.0"]
|
||||
pm.cacheMu.RUnlock()
|
||||
|
||||
assert.True(t, exists)
|
||||
assert.Equal(t, expectedMeta, cached.meta)
|
||||
assert.Equal(t, time.Hour, cached.ttl)
|
||||
assert.False(t, exists, "zero TTL results should not be cached")
|
||||
})
|
||||
|
||||
t.Run("tries next provider when first returns ErrMetaNotFound", func(t *testing.T) {
|
||||
expectedMeta := pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
expectedMeta := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
},
|
||||
}
|
||||
|
||||
provider1 := &mockProvider{
|
||||
@@ -229,15 +225,19 @@ func TestProviderManager_GetMeta(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("skips expired cache entries", func(t *testing.T) {
|
||||
expiredMeta := pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Expired Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
expiredMeta := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Expired Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
},
|
||||
}
|
||||
expectedMeta := pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
expectedMeta := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
},
|
||||
}
|
||||
|
||||
callCount := 0
|
||||
@@ -272,15 +272,19 @@ func TestProviderManager_GetMeta(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("uses first successful provider", func(t *testing.T) {
|
||||
expectedMeta1 := pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Provider 1 Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
expectedMeta1 := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Provider 1 Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
},
|
||||
}
|
||||
expectedMeta2 := pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Provider 2 Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
expectedMeta2 := pluginsv0alpha1.MetaSpec{
|
||||
PluginJson: pluginsv0alpha1.MetaJSONData{
|
||||
Id: "test-plugin",
|
||||
Name: "Provider 2 Plugin",
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
},
|
||||
}
|
||||
|
||||
provider1 := &mockProvider{
|
||||
@@ -331,9 +335,9 @@ func TestProviderManager_Run(t *testing.T) {
|
||||
|
||||
func TestProviderManager_cleanupExpired(t *testing.T) {
|
||||
t.Run("removes expired entries", func(t *testing.T) {
|
||||
validMeta := pluginsv0alpha1.MetaJSONData{Id: "valid"}
|
||||
expiredMeta1 := pluginsv0alpha1.MetaJSONData{Id: "expired1"}
|
||||
expiredMeta2 := pluginsv0alpha1.MetaJSONData{Id: "expired2"}
|
||||
validMeta := pluginsv0alpha1.MetaSpec{PluginJson: pluginsv0alpha1.MetaJSONData{Id: "valid"}}
|
||||
expiredMeta1 := pluginsv0alpha1.MetaSpec{PluginJson: pluginsv0alpha1.MetaJSONData{Id: "expired1"}}
|
||||
expiredMeta2 := pluginsv0alpha1.MetaSpec{PluginJson: pluginsv0alpha1.MetaJSONData{Id: "expired2"}}
|
||||
|
||||
provider := &mockProvider{
|
||||
getMetaFunc: func(ctx context.Context, pluginID, version string) (*Result, error) {
|
||||
|
||||
@@ -14,7 +14,7 @@ var (
|
||||
|
||||
// Result contains plugin metadata along with its recommended TTL.
|
||||
type Result struct {
|
||||
Meta pluginsv0alpha1.MetaJSONData
|
||||
Meta pluginsv0alpha1.MetaSpec
|
||||
TTL time.Duration
|
||||
}
|
||||
|
||||
|
||||
@@ -121,8 +121,19 @@ func (s *MetaStorage) List(ctx context.Context, options *internalversion.ListOpt
|
||||
continue
|
||||
}
|
||||
|
||||
pluginMeta := createMetaFromMetaJSONData(result.Meta, plugin.Name, plugin.Namespace)
|
||||
metaItems = append(metaItems, *pluginMeta)
|
||||
pluginMeta := pluginsv0alpha1.Meta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: plugin.Name,
|
||||
Namespace: plugin.Namespace,
|
||||
},
|
||||
Spec: result.Meta,
|
||||
}
|
||||
pluginMeta.SetGroupVersionKind(schema.GroupVersionKind{
|
||||
Group: pluginsv0alpha1.APIGroup,
|
||||
Version: pluginsv0alpha1.APIVersion,
|
||||
Kind: pluginsv0alpha1.MetaKind().Kind(),
|
||||
})
|
||||
metaItems = append(metaItems, pluginMeta)
|
||||
}
|
||||
|
||||
list := &pluginsv0alpha1.MetaList{
|
||||
@@ -169,27 +180,18 @@ func (s *MetaStorage) Get(ctx context.Context, name string, options *metav1.GetO
|
||||
return nil, apierrors.NewInternalError(fmt.Errorf("failed to fetch plugin metadata: %w", err))
|
||||
}
|
||||
|
||||
return createMetaFromMetaJSONData(result.Meta, name, ns.Value), nil
|
||||
}
|
||||
|
||||
// createMetaFromMetaJSONData creates a Meta k8s object from MetaJSONData and plugin metadata.
|
||||
func createMetaFromMetaJSONData(pluginJSON pluginsv0alpha1.MetaJSONData, name, namespace string) *pluginsv0alpha1.Meta {
|
||||
pluginMeta := &pluginsv0alpha1.Meta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: pluginsv0alpha1.MetaSpec{
|
||||
PluginJSON: pluginJSON,
|
||||
Name: plugin.Name,
|
||||
Namespace: plugin.Namespace,
|
||||
},
|
||||
Spec: result.Meta,
|
||||
}
|
||||
|
||||
// Set the GroupVersionKind
|
||||
pluginMeta.SetGroupVersionKind(schema.GroupVersionKind{
|
||||
Group: pluginsv0alpha1.APIGroup,
|
||||
Version: pluginsv0alpha1.APIVersion,
|
||||
Kind: pluginsv0alpha1.MetaKind().Kind(),
|
||||
})
|
||||
|
||||
return pluginMeta
|
||||
return pluginMeta, nil
|
||||
}
|
||||
|
||||
249
apps/plugins/pkg/app/storage_test.go
Normal file
249
apps/plugins/pkg/app/storage_test.go
Normal file
@@ -0,0 +1,249 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/grafana-app-sdk/resource"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
|
||||
pluginsv0alpha1 "github.com/grafana/grafana/apps/plugins/pkg/apis/plugins/v0alpha1"
|
||||
"github.com/grafana/grafana/apps/plugins/pkg/app/meta"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
|
||||
)
|
||||
|
||||
func TestMetaStorageListPreload(t *testing.T) {
|
||||
ctx := request.WithNamespace(context.Background(), "default")
|
||||
|
||||
preloadPlugin := pluginstore.Plugin{
|
||||
JSONData: plugins.JSONData{
|
||||
ID: "test-plugin",
|
||||
Name: "Test Plugin",
|
||||
Type: plugins.TypeDataSource,
|
||||
Info: plugins.Info{Version: "1.0.0"},
|
||||
Preload: true,
|
||||
},
|
||||
}
|
||||
nonPreloadPlugin := pluginstore.Plugin{
|
||||
JSONData: plugins.JSONData{
|
||||
ID: "test-plugin-2",
|
||||
Name: "Test Plugin 2",
|
||||
Type: plugins.TypeDataSource,
|
||||
Info: plugins.Info{Version: "1.0.0"},
|
||||
Preload: false,
|
||||
},
|
||||
}
|
||||
|
||||
store := &mockPluginStore{plugins: map[string]pluginstore.Plugin{
|
||||
"test-plugin": preloadPlugin,
|
||||
}}
|
||||
store2 := &mockPluginStore{plugins: map[string]pluginstore.Plugin{
|
||||
"test-plugin-2": nonPreloadPlugin,
|
||||
}}
|
||||
catalogServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
require.Equal(t, http.MethodGet, r.Method)
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
require.Equal(t, "grafana-plugins-app", r.Header.Get("User-Agent"))
|
||||
|
||||
segments := strings.Split(strings.Trim(r.URL.Path, "/"), "/")
|
||||
require.Len(t, segments, 5)
|
||||
require.Equal(t, "api", segments[0])
|
||||
require.Equal(t, "plugins", segments[1])
|
||||
require.Equal(t, "versions", segments[3])
|
||||
|
||||
preload := true
|
||||
response := struct {
|
||||
PluginID string `json:"pluginSlug"`
|
||||
Version string `json:"version"`
|
||||
JSON pluginsv0alpha1.MetaJSONData `json:"json"`
|
||||
}{
|
||||
PluginID: segments[2],
|
||||
Version: segments[4],
|
||||
JSON: pluginsv0alpha1.MetaJSONData{
|
||||
Id: segments[2],
|
||||
Name: segments[2],
|
||||
Type: pluginsv0alpha1.MetaJSONDataTypeDatasource,
|
||||
Preload: &preload,
|
||||
},
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
require.NoError(t, json.NewEncoder(w).Encode(response))
|
||||
}))
|
||||
defer catalogServer.Close()
|
||||
provider := meta.NewLocalProvider(store, mockPluginAssets{})
|
||||
provider2 := meta.NewLocalProvider(store2, mockPluginAssets{})
|
||||
catalogProvider := meta.NewCatalogProvider(catalogServer.URL + "/api/plugins")
|
||||
metaManager := meta.NewProviderManager(provider2, provider, catalogProvider)
|
||||
|
||||
pluginClient := pluginsv0alpha1.NewPluginClient(&mockResourceClient{
|
||||
listFunc: func(ctx context.Context, namespace string, opts resource.ListOptions) (resource.ListObject, error) {
|
||||
return newPluginList(), nil
|
||||
},
|
||||
})
|
||||
|
||||
storage := NewMetaStorage(metaManager, func(ctx context.Context) (*pluginsv0alpha1.PluginClient, error) {
|
||||
return pluginClient, nil
|
||||
})
|
||||
|
||||
obj, err := storage.List(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
metaList, ok := obj.(*pluginsv0alpha1.MetaList)
|
||||
require.True(t, ok)
|
||||
require.Len(t, metaList.Items, 3)
|
||||
|
||||
require.NotNil(t, metaList.Items[0].Spec.PluginJson.Preload)
|
||||
require.True(t, *metaList.Items[0].Spec.PluginJson.Preload)
|
||||
require.NotNil(t, metaList.Items[1].Spec.PluginJson.Preload)
|
||||
require.True(t, *metaList.Items[1].Spec.PluginJson.Preload)
|
||||
require.Nil(t, metaList.Items[2].Spec.PluginJson.Preload)
|
||||
|
||||
obj, err = storage.List(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
metaList, ok = obj.(*pluginsv0alpha1.MetaList)
|
||||
require.True(t, ok)
|
||||
require.Len(t, metaList.Items, 3)
|
||||
require.NotNil(t, metaList.Items[0].Spec.PluginJson.Preload)
|
||||
require.True(t, *metaList.Items[0].Spec.PluginJson.Preload)
|
||||
require.NotNil(t, metaList.Items[1].Spec.PluginJson.Preload)
|
||||
require.True(t, *metaList.Items[1].Spec.PluginJson.Preload)
|
||||
require.Nil(t, metaList.Items[2].Spec.PluginJson.Preload)
|
||||
}
|
||||
|
||||
type mockPluginAssets struct{}
|
||||
|
||||
func (mockPluginAssets) LoadingStrategy(ctx context.Context, p pluginstore.Plugin) plugins.LoadingStrategy {
|
||||
return plugins.LoadingStrategyFetch
|
||||
}
|
||||
|
||||
func (mockPluginAssets) ModuleHash(ctx context.Context, p pluginstore.Plugin) string {
|
||||
return "hash"
|
||||
}
|
||||
|
||||
type mockPluginStore struct {
|
||||
plugins map[string]pluginstore.Plugin
|
||||
}
|
||||
|
||||
func (m *mockPluginStore) Plugin(ctx context.Context, pluginID string) (pluginstore.Plugin, bool) {
|
||||
if m.plugins[pluginID].ID != pluginID {
|
||||
return pluginstore.Plugin{}, false
|
||||
}
|
||||
return m.plugins[pluginID], true
|
||||
}
|
||||
|
||||
func (m *mockPluginStore) Plugins(ctx context.Context, pluginTypes ...plugins.Type) []pluginstore.Plugin {
|
||||
result := []pluginstore.Plugin{}
|
||||
for _, plugin := range m.plugins {
|
||||
if len(pluginTypes) == 0 || slices.Contains(pluginTypes, plugin.Type) {
|
||||
result = append(result, plugin)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func newPluginList() *pluginsv0alpha1.PluginList {
|
||||
return &pluginsv0alpha1.PluginList{
|
||||
Items: []pluginsv0alpha1.Plugin{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "grafana-plugins-app", Namespace: "org-1"},
|
||||
Spec: pluginsv0alpha1.PluginSpec{Id: "grafana-plugins-app", Version: "1.0.0"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-plugin", Namespace: "org-1"},
|
||||
Spec: pluginsv0alpha1.PluginSpec{Id: "test-plugin", Version: "1.0.0"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-plugin-2", Namespace: "org-1"},
|
||||
Spec: pluginsv0alpha1.PluginSpec{Id: "test-plugin-2", Version: "1.0.0"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type mockResourceClient struct {
|
||||
listFunc func(ctx context.Context, namespace string, opts resource.ListOptions) (resource.ListObject, error)
|
||||
}
|
||||
|
||||
func (m *mockResourceClient) List(ctx context.Context, namespace string, opts resource.ListOptions) (resource.ListObject, error) {
|
||||
if m.listFunc != nil {
|
||||
return m.listFunc(ctx, namespace, opts)
|
||||
}
|
||||
return &pluginsv0alpha1.PluginList{}, nil
|
||||
}
|
||||
|
||||
func (m *mockResourceClient) ListInto(ctx context.Context, namespace string, opts resource.ListOptions, into resource.ListObject) error {
|
||||
list, err := m.List(ctx, namespace, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if src, ok := list.(*pluginsv0alpha1.PluginList); ok {
|
||||
if dst, ok := into.(*pluginsv0alpha1.PluginList); ok {
|
||||
*dst = *src
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockResourceClient) Get(ctx context.Context, identifier resource.Identifier) (resource.Object, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockResourceClient) GetInto(ctx context.Context, identifier resource.Identifier, into resource.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockResourceClient) Create(ctx context.Context, identifier resource.Identifier, obj resource.Object, opts resource.CreateOptions) (resource.Object, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockResourceClient) CreateInto(ctx context.Context, identifier resource.Identifier, obj resource.Object, opts resource.CreateOptions, into resource.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockResourceClient) Update(ctx context.Context, identifier resource.Identifier, obj resource.Object, opts resource.UpdateOptions) (resource.Object, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockResourceClient) UpdateInto(ctx context.Context, identifier resource.Identifier, obj resource.Object, opts resource.UpdateOptions, into resource.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockResourceClient) Patch(ctx context.Context, identifier resource.Identifier, patch resource.PatchRequest, opts resource.PatchOptions) (resource.Object, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockResourceClient) PatchInto(ctx context.Context, identifier resource.Identifier, patch resource.PatchRequest, opts resource.PatchOptions, into resource.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockResourceClient) Delete(ctx context.Context, identifier resource.Identifier, opts resource.DeleteOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockResourceClient) SubresourceRequest(ctx context.Context, identifier resource.Identifier, req resource.CustomRouteRequestOptions) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockResourceClient) Watch(ctx context.Context, namespace string, opts resource.WatchOptions) (resource.WatchResponse, error) {
|
||||
return &mockWatchResponse{}, nil
|
||||
}
|
||||
|
||||
type mockWatchResponse struct{}
|
||||
|
||||
func (m *mockWatchResponse) Stop() {}
|
||||
|
||||
func (m *mockWatchResponse) WatchEvents() <-chan resource.WatchEvent {
|
||||
ch := make(chan resource.WatchEvent)
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
@@ -198,7 +198,6 @@ type JobStatus struct {
|
||||
Finished int64 `json:"finished,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
|
||||
// Optional value 0-100 that can be set while running
|
||||
Progress float64 `json:"progress,omitempty"`
|
||||
@@ -226,20 +225,18 @@ type JobResourceSummary struct {
|
||||
Kind string `json:"kind,omitempty"`
|
||||
Total int64 `json:"total,omitempty"` // the count (if known)
|
||||
|
||||
Create int64 `json:"create,omitempty"`
|
||||
Update int64 `json:"update,omitempty"`
|
||||
Delete int64 `json:"delete,omitempty"`
|
||||
Write int64 `json:"write,omitempty"` // Create or update (export)
|
||||
Error int64 `json:"error,omitempty"` // The error count
|
||||
Warning int64 `json:"warning,omitempty"` // The warning count
|
||||
Create int64 `json:"create,omitempty"`
|
||||
Update int64 `json:"update,omitempty"`
|
||||
Delete int64 `json:"delete,omitempty"`
|
||||
Write int64 `json:"write,omitempty"` // Create or update (export)
|
||||
Error int64 `json:"error,omitempty"` // The error count
|
||||
|
||||
// No action required (useful for sync)
|
||||
Noop int64 `json:"noop,omitempty"`
|
||||
|
||||
// Report errors/warnings for this resource type
|
||||
// Report errors for this resource type
|
||||
// This may not be an exhaustive list and recommend looking at the logs for more info
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
}
|
||||
|
||||
// HistoricJob is an append only log, saving all jobs that have been processed.
|
||||
|
||||
@@ -401,11 +401,6 @@ func (in *JobResourceSummary) DeepCopyInto(out *JobResourceSummary) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Warnings != nil {
|
||||
in, out := &in.Warnings, &out.Warnings
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -473,11 +468,6 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Warnings != nil {
|
||||
in, out := &in.Warnings, &out.Warnings
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Summary != nil {
|
||||
in, out := &in.Summary, &out.Summary
|
||||
*out = make([]*JobResourceSummary, len(*in))
|
||||
|
||||
@@ -889,13 +889,6 @@ func schema_pkg_apis_provisioning_v0alpha1_JobResourceSummary(ref common.Referen
|
||||
Format: "int64",
|
||||
},
|
||||
},
|
||||
"warning": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "The error count",
|
||||
Type: []string{"integer"},
|
||||
Format: "int64",
|
||||
},
|
||||
},
|
||||
"noop": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "No action required (useful for sync)",
|
||||
@@ -905,7 +898,7 @@ func schema_pkg_apis_provisioning_v0alpha1_JobResourceSummary(ref common.Referen
|
||||
},
|
||||
"errors": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Report errors/warnings for this resource type This may not be an exhaustive list and recommend looking at the logs for more info",
|
||||
Description: "Report errors for this resource type This may not be an exhaustive list and recommend looking at the logs for more info",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
@@ -918,20 +911,6 @@ func schema_pkg_apis_provisioning_v0alpha1_JobResourceSummary(ref common.Referen
|
||||
},
|
||||
},
|
||||
},
|
||||
"warnings": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1050,20 +1029,6 @@ func schema_pkg_apis_provisioning_v0alpha1_JobStatus(ref common.ReferenceCallbac
|
||||
},
|
||||
},
|
||||
},
|
||||
"warnings": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"progress": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Optional value 0-100 that can be set while running",
|
||||
|
||||
@@ -3,10 +3,8 @@ API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioni
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,FileList,Items
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,HistoryList,Items
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobResourceSummary,Errors
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobResourceSummary,Warnings
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobStatus,Errors
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobStatus,Summary
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobStatus,Warnings
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,ManagerStats,Stats
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,MoveJobOptions,Paths
|
||||
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,MoveJobOptions,Resources
|
||||
|
||||
@@ -7,18 +7,16 @@ package v0alpha1
|
||||
// JobResourceSummaryApplyConfiguration represents a declarative configuration of the JobResourceSummary type for use
|
||||
// with apply.
|
||||
type JobResourceSummaryApplyConfiguration struct {
|
||||
Group *string `json:"group,omitempty"`
|
||||
Kind *string `json:"kind,omitempty"`
|
||||
Total *int64 `json:"total,omitempty"`
|
||||
Create *int64 `json:"create,omitempty"`
|
||||
Update *int64 `json:"update,omitempty"`
|
||||
Delete *int64 `json:"delete,omitempty"`
|
||||
Write *int64 `json:"write,omitempty"`
|
||||
Error *int64 `json:"error,omitempty"`
|
||||
Warning *int64 `json:"warning,omitempty"`
|
||||
Noop *int64 `json:"noop,omitempty"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
Group *string `json:"group,omitempty"`
|
||||
Kind *string `json:"kind,omitempty"`
|
||||
Total *int64 `json:"total,omitempty"`
|
||||
Create *int64 `json:"create,omitempty"`
|
||||
Update *int64 `json:"update,omitempty"`
|
||||
Delete *int64 `json:"delete,omitempty"`
|
||||
Write *int64 `json:"write,omitempty"`
|
||||
Error *int64 `json:"error,omitempty"`
|
||||
Noop *int64 `json:"noop,omitempty"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
}
|
||||
|
||||
// JobResourceSummaryApplyConfiguration constructs a declarative configuration of the JobResourceSummary type for use with
|
||||
@@ -91,14 +89,6 @@ func (b *JobResourceSummaryApplyConfiguration) WithError(value int64) *JobResour
|
||||
return b
|
||||
}
|
||||
|
||||
// WithWarning sets the Warning field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Warning field is set to the value of the last call.
|
||||
func (b *JobResourceSummaryApplyConfiguration) WithWarning(value int64) *JobResourceSummaryApplyConfiguration {
|
||||
b.Warning = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithNoop sets the Noop field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Noop field is set to the value of the last call.
|
||||
@@ -116,13 +106,3 @@ func (b *JobResourceSummaryApplyConfiguration) WithErrors(values ...string) *Job
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// WithWarnings adds the given value to the Warnings field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the Warnings field.
|
||||
func (b *JobResourceSummaryApplyConfiguration) WithWarnings(values ...string) *JobResourceSummaryApplyConfiguration {
|
||||
for i := range values {
|
||||
b.Warnings = append(b.Warnings, values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ type JobStatusApplyConfiguration struct {
|
||||
Finished *int64 `json:"finished,omitempty"`
|
||||
Message *string `json:"message,omitempty"`
|
||||
Errors []string `json:"errors,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
Progress *float64 `json:"progress,omitempty"`
|
||||
Summary []*provisioningv0alpha1.JobResourceSummary `json:"summary,omitempty"`
|
||||
URLs *RepositoryURLsApplyConfiguration `json:"url,omitempty"`
|
||||
@@ -70,16 +69,6 @@ func (b *JobStatusApplyConfiguration) WithErrors(values ...string) *JobStatusApp
|
||||
return b
|
||||
}
|
||||
|
||||
// WithWarnings adds the given value to the Warnings field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the Warnings field.
|
||||
func (b *JobStatusApplyConfiguration) WithWarnings(values ...string) *JobStatusApplyConfiguration {
|
||||
for i := range values {
|
||||
b.Warnings = append(b.Warnings, values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// WithProgress sets the Progress field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Progress field is set to the value of the last call.
|
||||
|
||||
@@ -75,9 +75,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": false
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -152,9 +152,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": false
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -229,9 +229,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": false
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -306,9 +306,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -383,9 +383,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -460,9 +460,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": false,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -537,9 +537,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": false,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -627,9 +627,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -704,9 +704,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -781,9 +781,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -858,9 +858,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": false,
|
||||
"rounded": true,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -952,9 +952,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": false,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1029,9 +1029,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": false,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1106,9 +1106,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
"spotlight": false,
|
||||
"gradient": true
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1183,9 +1183,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": false,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1260,9 +1260,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": false,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
"spotlight": false,
|
||||
"gradient": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1354,9 +1354,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
"spotlight": false,
|
||||
"gradient": true
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1435,9 +1435,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
"spotlight": false,
|
||||
"gradient": true
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1516,9 +1516,9 @@
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
"spotlight": false,
|
||||
"gradient": true
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
@@ -1565,6 +1565,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1605,9 +1606,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": true,
|
||||
"rounded": true,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": true
|
||||
},
|
||||
"glow": "both",
|
||||
"orientation": "auto",
|
||||
@@ -1630,6 +1631,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 98,
|
||||
"min": 5,
|
||||
"noise": 22,
|
||||
@@ -1647,6 +1649,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1687,9 +1690,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": true,
|
||||
"rounded": true,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": true
|
||||
},
|
||||
"glow": "both",
|
||||
"orientation": "auto",
|
||||
@@ -1712,6 +1715,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 98,
|
||||
"min": 5,
|
||||
"noise": 22,
|
||||
@@ -1742,6 +1746,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1783,9 +1788,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": true,
|
||||
"rounded": true,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": true
|
||||
},
|
||||
"glow": "both",
|
||||
"orientation": "auto",
|
||||
@@ -1808,6 +1813,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 8,
|
||||
"min": 1,
|
||||
"noise": 2,
|
||||
@@ -1825,6 +1831,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1866,9 +1873,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": true,
|
||||
"rounded": true,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": true
|
||||
},
|
||||
"glow": "both",
|
||||
"orientation": "auto",
|
||||
@@ -1891,6 +1898,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 12,
|
||||
"min": 1,
|
||||
"noise": 2,
|
||||
@@ -1908,6 +1916,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -1948,9 +1957,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": true,
|
||||
"rounded": true,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": true
|
||||
},
|
||||
"glow": "both",
|
||||
"orientation": "auto",
|
||||
@@ -1973,6 +1982,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 100,
|
||||
"min": 10,
|
||||
"noise": 22,
|
||||
@@ -1990,6 +2000,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"description": "",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@@ -2030,9 +2041,9 @@
|
||||
"effects": {
|
||||
"barGlow": true,
|
||||
"centerGlow": true,
|
||||
"gradient": true,
|
||||
"rounded": true,
|
||||
"spotlight": true
|
||||
"spotlight": true,
|
||||
"gradient": true
|
||||
},
|
||||
"glow": "both",
|
||||
"orientation": "auto",
|
||||
@@ -2055,6 +2066,7 @@
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"hide": false,
|
||||
"max": 100,
|
||||
"min": 10,
|
||||
"noise": 22,
|
||||
@@ -2067,147 +2079,6 @@
|
||||
],
|
||||
"title": "Backend",
|
||||
"type": "radialbar"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 66
|
||||
},
|
||||
"id": 35,
|
||||
"panels": [],
|
||||
"title": "Empty data",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": 0
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 6,
|
||||
"x": 0,
|
||||
"y": 67
|
||||
},
|
||||
"id": 36,
|
||||
"options": {
|
||||
"barWidthFactor": 0.5,
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"segmentCount": 1,
|
||||
"segmentSpacing": 0.3,
|
||||
"shape": "gauge",
|
||||
"showThresholdLabels": false,
|
||||
"showThresholdMarkers": true,
|
||||
"sparkline": true
|
||||
},
|
||||
"pluginVersion": "13.0.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"refId": "A",
|
||||
"scenarioId": "random_walk",
|
||||
"seriesCount": 0
|
||||
}
|
||||
],
|
||||
"title": "Numeric, no series",
|
||||
"type": "gauge"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "grafana-testdata-datasource"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": 0
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 6,
|
||||
"x": 6,
|
||||
"y": 67
|
||||
},
|
||||
"id": 37,
|
||||
"options": {
|
||||
"barWidthFactor": 0.5,
|
||||
"effects": {
|
||||
"barGlow": false,
|
||||
"centerGlow": false,
|
||||
"gradient": true,
|
||||
"rounded": false,
|
||||
"spotlight": false
|
||||
},
|
||||
"orientation": "auto",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"segmentCount": 1,
|
||||
"segmentSpacing": 0.3,
|
||||
"shape": "gauge",
|
||||
"showThresholdLabels": false,
|
||||
"showThresholdMarkers": true,
|
||||
"sparkline": true
|
||||
},
|
||||
"pluginVersion": "13.0.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"refId": "A",
|
||||
"scenarioId": "logs"
|
||||
}
|
||||
],
|
||||
"title": "Non-numeric",
|
||||
"type": "gauge"
|
||||
}
|
||||
],
|
||||
"preload": false,
|
||||
@@ -2224,5 +2095,5 @@
|
||||
"timezone": "browser",
|
||||
"title": "Panel tests - Gauge (new)",
|
||||
"uid": "panel-tests-gauge-new",
|
||||
"version": 9
|
||||
"version": 6
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@ MAKEFLAGS += --no-builtin-rule
|
||||
|
||||
include docs.mk
|
||||
|
||||
.PHONY: sources/visualizations/panels-visualizations/query-transform-data/transform-data/index.md
|
||||
sources/visualizations/panels-visualizations/query-transform-data/transform-data/index.md: ## Generate the Transform Data page source.
|
||||
.PHONY: sources/panels-visualizations/query-transform-data/transform-data/index.md
|
||||
sources/panels-visualizations/query-transform-data/transform-data/index.md: ## Generate the Transform Data page source.
|
||||
cd $(CURDIR)/.. && \
|
||||
npx tsx ./scripts/docs/generate-transformations.ts && \
|
||||
npx prettier -w $(CURDIR)/$@
|
||||
|
||||
@@ -59,9 +59,9 @@ For more details on contact points, including how to test them and enable notifi
|
||||
|
||||
## Alertmanager settings
|
||||
|
||||
| Option | Description |
|
||||
| ------ | ----------------------------------------------------------------------------------------------------------------- |
|
||||
| URL | The Alertmanager URL. This field is [protected](ref:configure-contact-points) from modification in Grafana Cloud. |
|
||||
| Option | Description |
|
||||
| ------ | ---------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| URL | The Alertmanager URL. This field is [protected](ref:configure-contact-points#protected-fields) from modification in Grafana Cloud. |
|
||||
|
||||
#### Optional settings
|
||||
|
||||
|
||||
@@ -49,14 +49,14 @@ For more details on contact points, including how to test them and enable notifi
|
||||
|
||||
### Required Settings
|
||||
|
||||
| Key | Description |
|
||||
| ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| URL | The URL of the REST API of your Jira instance. Supported versions: `2` and `3` (e.g., `https://your-domain.atlassian.net/rest/api/3`). This field is [protected](ref:configure-contact-points) from modification in Grafana Cloud. |
|
||||
| Basic Auth User | Username for authentication. For Jira Cloud, use your email address. |
|
||||
| Basic Auth Password | Password or personal token. For Jira Cloud, you need to obtain a personal token [here](https://id.atlassian.com/manage-profile/security/api-tokens) and use it as the password. |
|
||||
| API Token | An alternative to basic authentication, a bearer token is used to authorize the API requests. See [Jira documentation](https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html) for more information. |
|
||||
| Project Key | The project key identifying the project where issues will be created. Project keys are unique identifiers for a project. |
|
||||
| Issue Type | The type of issue to create (e.g., `Task`, `Bug`, `Incident`). Make sure that you specify a type that is available in your project. |
|
||||
| Key | Description |
|
||||
| ------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| URL | The URL of the REST API of your Jira instance. Supported versions: `2` and `3` (e.g., `https://your-domain.atlassian.net/rest/api/3`). This field is [protected](ref:configure-contact-points#protected-fields) from modification in Grafana Cloud. |
|
||||
| Basic Auth User | Username for authentication. For Jira Cloud, use your email address. |
|
||||
| Basic Auth Password | Password or personal token. For Jira Cloud, you need to obtain a personal token [here](https://id.atlassian.com/manage-profile/security/api-tokens) and use it as the password. |
|
||||
| API Token | An alternative to basic authentication, a bearer token is used to authorize the API requests. See [Jira documentation](https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html) for more information. |
|
||||
| Project Key | The project key identifying the project where issues will be created. Project keys are unique identifiers for a project. |
|
||||
| Issue Type | The type of issue to create (e.g., `Task`, `Bug`, `Incident`). Make sure that you specify a type that is available in your project. |
|
||||
|
||||
### Optional Settings
|
||||
|
||||
|
||||
@@ -54,10 +54,10 @@ For more details on contact points, including how to test them and enable notifi
|
||||
|
||||
### Required Settings
|
||||
|
||||
| Option | Description |
|
||||
| ---------- | ----------------------------------------------------------------------------------------------------------------------- |
|
||||
| Broker URL | The URL of the MQTT broker. This field is [protected](ref:configure-contact-points) from modification in Grafana Cloud. |
|
||||
| Topic | The topic to which the message will be sent. |
|
||||
| Option | Description |
|
||||
| ---------- | ---------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Broker URL | The URL of the MQTT broker. This field is [protected](ref:configure-contact-points#protected-fields) from modification in Grafana Cloud. |
|
||||
| Topic | The topic to which the message will be sent. |
|
||||
|
||||
### Optional Settings
|
||||
|
||||
|
||||
@@ -51,8 +51,8 @@ You can customize the `title` and `body` of the Slack message using [notificatio
|
||||
|
||||
If you are using a Slack API Token, complete the following steps.
|
||||
|
||||
1. Follow step 1 of the [Slack API Quickstart](https://docs.slack.dev/app-management/quickstart-app-settings/#creating) to create the app.
|
||||
1. Continue onto the second step of the [Slack API Quickstart](https://docs.slack.dev/app-management/quickstart-app-settings/#scopes) and add the [chat:write.public](https://api.slack.com/scopes/chat:write.public) scope as described to give your app the ability to post in all public channels without joining.
|
||||
1. Follow steps 1 and 2 of the [Slack API Quickstart](https://api.slack.com/start/quickstart).
|
||||
1. Add the [chat:write.public](https://api.slack.com/scopes/chat:write.public) scope to give your app the ability to post in all public channels without joining.
|
||||
1. In OAuth Tokens for Your Workspace, copy the Bot User OAuth Token.
|
||||
1. Open your Slack workplace.
|
||||
1. Right click the channel you want to receive notifications in.
|
||||
|
||||
@@ -62,9 +62,9 @@ For more details on contact points, including how to test them and enable notifi
|
||||
|
||||
## Webhook settings
|
||||
|
||||
| Option | Description |
|
||||
| ------ | ------------------------------------------------------------------------------------------------------------ |
|
||||
| URL | The Webhook URL. This field is [protected](ref:configure-contact-points) from modification in Grafana Cloud. |
|
||||
| Option | Description |
|
||||
| ------ | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||
| URL | The Webhook URL. This field is [protected](ref:configure-contact-points#protected-fields) from modification in Grafana Cloud. |
|
||||
|
||||
#### Optional settings
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ Replace the placeholders with your values:
|
||||
|
||||
In your `grafana` directory, create a sub-folder called `dashboards`.
|
||||
|
||||
This guide shows you how to create three separate dashboards. For all dashboard configurations, replace the placeholders with your values:
|
||||
This guide shows you how to creates three separate dashboards. For all dashboard configurations, replace the placeholders with your values:
|
||||
|
||||
- _`<GRAFANA_CLOUD_STACK_NAME>`_: Name of your Grafana Cloud Stack
|
||||
- _`<GRAFANA_OPERATOR_NAMESPACE>`_: Namespace where the `grafana-operator` is deployed in your Kubernetes cluster
|
||||
|
||||
@@ -1,147 +0,0 @@
|
||||
---
|
||||
title: Git Sync deployment scenarios
|
||||
menuTitle: Deployment scenarios
|
||||
description: Learn about common Git Sync deployment patterns and configurations for different organizational needs
|
||||
weight: 450
|
||||
keywords:
|
||||
- git sync
|
||||
- deployment patterns
|
||||
- scenarios
|
||||
- multi-environment
|
||||
- teams
|
||||
---
|
||||
|
||||
# Git Sync deployment scenarios
|
||||
|
||||
This guide shows practical deployment scenarios for Grafana’s Git Sync. Learn how to configure bidirectional synchronization between Grafana and Git repositories for teams, environments, and regions.
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
Git Sync is an experimental feature. It reflects Grafana’s approach to Observability as Code and might include limitations or breaking changes. For current status and known limitations, refer to the [Git Sync introduction](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/intro-git-sync/).
|
||||
{{< /admonition >}}
|
||||
|
||||
## Understand the relationship between key Git Sync components
|
||||
|
||||
Before you explore the scenarios, understand how the key Git Sync components relate:
|
||||
|
||||
- [Grafana instance](#grafana-instance)
|
||||
- [Git repository structure](#git-repository-structure)
|
||||
- [Git Sync repository resource](#git-sync-repository-resource)
|
||||
|
||||
### Grafana instance
|
||||
|
||||
A Grafana instance is a running Grafana server. Multiple instances can:
|
||||
|
||||
- Connect to the same Git repository using different Repository configurations.
|
||||
- Sync from different branches of the same repository.
|
||||
- Sync from different paths within the same repository.
|
||||
- Sync from different repositories.
|
||||
|
||||
### Git repository structure
|
||||
|
||||
You can organize your Git repository in several ways:
|
||||
|
||||
- Single branch, multiple paths: Use different directories for different purposes (for example, `dev/`, `prod/`, `team-a/`).
|
||||
- Multiple branches: Use different branches for different environments or teams (for example, `main`, `develop`, `team-a`).
|
||||
- Multiple repositories: Use separate repositories for different teams or environments.
|
||||
|
||||
### Git Sync repository resource
|
||||
|
||||
A repository resource is a Grafana configuration object that defines:
|
||||
|
||||
- Which Git repository to sync with.
|
||||
- Which branch to use.
|
||||
- Which directory path to synchronize.
|
||||
- Sync behavior and workflows.
|
||||
|
||||
Each repository resource creates bidirectional synchronization between a Grafana instance and a specific location in Git.
|
||||
|
||||
## How does repository sync behave?
|
||||
|
||||
With Git Sync you configure a repository resource to sync with your Grafana instance:
|
||||
|
||||
1. Grafana monitors the specified Git location (repository, branch, and path).
|
||||
2. Grafana creates a folder in Dashboards (typically named after the repository).
|
||||
3. Grafana creates dashboards from dashboard JSON files in Git within this folder.
|
||||
4. Grafana commits dashboard changes made in the UI back to Git.
|
||||
5. Grafana pulls dashboard changes made in Git and updates dashboards in the UI.
|
||||
6. Synchronization occurs at regular intervals (configurable), or instantly if you use webhooks.
|
||||
|
||||
You can find the provisioned dashboards organized in folders under **Dashboards**.
|
||||
|
||||
## Example: Relationship between repository, branch, and path
|
||||
|
||||
Here's a concrete example showing how the three parameters work together:
|
||||
|
||||
**Configuration:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `team-platform/grafana/`
|
||||
|
||||
**In Git (on branch `main`):**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests/
|
||||
├── .git/
|
||||
├── README.md
|
||||
├── team-platform/
|
||||
│ └── grafana/
|
||||
│ ├── cpu-metrics.json ← Synced
|
||||
│ ├── memory-usage.json ← Synced
|
||||
│ └── disk-io.json ← Synced
|
||||
├── team-data/
|
||||
│ └── grafana/
|
||||
│ └── pipeline-stats.json ← Not synced (different path)
|
||||
└── other-files.txt ← Not synced (outside path)
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view:**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── CPU Metrics Dashboard
|
||||
├── Memory Usage Dashboard
|
||||
└── Disk I/O Dashboard
|
||||
```
|
||||
|
||||
**Key points:**
|
||||
|
||||
- Grafana only synchronizes files within the specified path (`team-platform/grafana/`).
|
||||
- Grafana ignores files in other paths or at the repository root.
|
||||
- The folder name in Grafana comes from the repository name.
|
||||
- Dashboard titles come from the JSON file content, not the filename.
|
||||
|
||||
## Repository configuration flexibility
|
||||
|
||||
Git Sync repositories support different combinations of repository URL, branch, and path:
|
||||
|
||||
- Different Git repositories: Each environment or team can use its own repository.
|
||||
- Instance A: `repository: your-org/grafana-prod`.
|
||||
- Instance B: `repository: your-org/grafana-dev`.
|
||||
- Different branches: Use separate branches within the same repository.
|
||||
- Instance A: `repository: your-org/grafana-manifests, branch: main`.
|
||||
- Instance B: `repository: your-org/grafana-manifests, branch: develop`.
|
||||
- Different paths: Use different directory paths within the same repository.
|
||||
- Instance A: `repository: your-org/grafana-manifests, branch: main, path: production/`.
|
||||
- Instance B: `repository: your-org/grafana-manifests, branch: main, path: development/`.
|
||||
- Any combination: Mix and match based on your workflow requirements.
|
||||
|
||||
## Scenarios
|
||||
|
||||
Use these deployment scenarios to plan your Git Sync setup:
|
||||
|
||||
- [Single instance](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/single-instance/)
|
||||
- [Git Sync for development and production environments](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/dev-prod/)
|
||||
- [Git Sync with regional replication](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/multi-region/)
|
||||
- [High availability](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/high-availability/)
|
||||
- [Git Sync in a shared Grafana instance](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios/multi-team/)
|
||||
|
||||
## Learn more
|
||||
|
||||
Refer to the following documents to learn more:
|
||||
|
||||
- [Git Sync introduction](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/intro-git-sync/)
|
||||
- [Git Sync setup guide](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-setup/)
|
||||
- [Dashboard provisioning](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/administration/provisioning/)
|
||||
- [Observability as Code](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/)
|
||||
@@ -1,147 +0,0 @@
|
||||
---
|
||||
title: Git Sync for development and production environments
|
||||
menuTitle: Across environments
|
||||
description: Use separate Grafana instances for development and production with Git-controlled promotion
|
||||
weight: 20
|
||||
---
|
||||
|
||||
# Git Sync for development and production environments
|
||||
|
||||
Use separate Grafana instances for development and production. Each syncs with different Git locations to test dashboards before production.
|
||||
|
||||
## Use it for
|
||||
|
||||
- **Staged deployments**: You need to test dashboard changes before production deployment.
|
||||
- **Change control**: You require approvals before dashboards reach production.
|
||||
- **Quality assurance**: You verify dashboard functionality in a non-production environment.
|
||||
- **Risk mitigation**: You minimize the risk of breaking production dashboards.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────┐
|
||||
│ GitHub Repository │
|
||||
│ Repository: your-org/grafana-manifests │
|
||||
│ Branch: main │
|
||||
│ │
|
||||
│ grafana-manifests/ │
|
||||
│ ├── dev/ │
|
||||
│ │ ├── dashboard-new.json ← Development dashboards │
|
||||
│ │ └── dashboard-test.json │
|
||||
│ │ │
|
||||
│ └── prod/ │
|
||||
│ ├── dashboard-stable.json ← Production dashboards │
|
||||
│ └── dashboard-approved.json │
|
||||
└────────────────────────────────────────────────────────────┘
|
||||
↕ ↕
|
||||
Git Sync (dev/) Git Sync (prod/)
|
||||
↕ ↕
|
||||
┌─────────────────────┐ ┌─────────────────────┐
|
||||
│ Dev Grafana │ │ Prod Grafana │
|
||||
│ │ │ │
|
||||
│ Repository: │ │ Repository: │
|
||||
│ - path: dev/ │ │ - path: prod/ │
|
||||
│ │ │ │
|
||||
│ Creates folder: │ │ Creates folder: │
|
||||
│ "grafana-manifests"│ │ "grafana-manifests"│
|
||||
└─────────────────────┘ └─────────────────────┘
|
||||
```
|
||||
|
||||
## Repository structure
|
||||
|
||||
**In Git:**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests
|
||||
├── dev/
|
||||
│ ├── dashboard-new.json
|
||||
│ └── dashboard-test.json
|
||||
└── prod/
|
||||
├── dashboard-stable.json
|
||||
└── dashboard-approved.json
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view:**
|
||||
|
||||
**Dev instance:**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── New Dashboard
|
||||
└── Test Dashboard
|
||||
```
|
||||
|
||||
**Prod instance:**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── Stable Dashboard
|
||||
└── Approved Dashboard
|
||||
```
|
||||
|
||||
- Both instances create a folder named "grafana-manifests" (from repository name)
|
||||
- Each instance only shows dashboards from its configured path (`dev/` or `prod/`)
|
||||
- Dashboards appear with their titles from the JSON files
|
||||
|
||||
## Configuration parameters
|
||||
|
||||
Development:
|
||||
|
||||
- Repository: `your-org/grafana-manifests`
|
||||
- Branch: `main`
|
||||
- Path: `dev/`
|
||||
|
||||
Production:
|
||||
|
||||
- Repository: `your-org/grafana-manifests`
|
||||
- Branch: `main`
|
||||
- Path: `prod/`
|
||||
|
||||
## How it works
|
||||
|
||||
1. Developers create and modify dashboards in development.
|
||||
2. Git Sync commits changes to `dev/`.
|
||||
3. You review changes in Git.
|
||||
4. You promote approved dashboards from `dev/` to `prod/`.
|
||||
5. Production syncs from `prod/`.
|
||||
6. Production dashboards update.
|
||||
|
||||
## Alternative: Use branches
|
||||
|
||||
Instead of using different paths, you can configure instances to use different branches:
|
||||
|
||||
**Development instance:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `develop`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
**Production instance:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
With this approach:
|
||||
|
||||
- Development changes go to the `develop` branch
|
||||
- Use Git merge or pull request workflows to promote changes from `develop` to `main`
|
||||
- Production automatically syncs from the `main` branch
|
||||
|
||||
## Alternative: Use separate repositories for stricter isolation
|
||||
|
||||
For stricter isolation, use completely separate repositories:
|
||||
|
||||
**Development instance:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests-dev`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
**Production instance:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests-prod`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
@@ -1,217 +0,0 @@
|
||||
---
|
||||
title: Git Sync for high availability environments
|
||||
menuTitle: High availability
|
||||
description: Run multiple Grafana instances serving traffic simultaneously, synchronized via Git Sync
|
||||
weight: 50
|
||||
---
|
||||
|
||||
# Git Sync for high availability environments
|
||||
|
||||
## Primary–replica scenario
|
||||
|
||||
Use a primary Grafana instance and one or more replicas synchronized with the same Git location to enable failover.
|
||||
|
||||
### Use it for
|
||||
|
||||
- **Automatic failover**: You need service continuity when the primary instance fails.
|
||||
- **High availability**: Your organization requires guaranteed dashboard availability.
|
||||
- **Simple HA setup**: You want high availability without the complexity of active–active.
|
||||
- **Maintenance windows**: You perform updates while another instance serves traffic.
|
||||
- **Business continuity**: Dashboard access can't tolerate downtime.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ GitHub Repository │
|
||||
│ Repository: your-org/grafana-manifests │
|
||||
│ Branch: main │
|
||||
│ │
|
||||
│ grafana-manifests/ │
|
||||
│ └── shared/ │
|
||||
│ ├── dashboard-metrics.json │
|
||||
│ ├── dashboard-alerts.json │
|
||||
│ └── dashboard-logs.json │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
↕ ↕
|
||||
Git Sync (shared/) Git Sync (shared/)
|
||||
↕ ↕
|
||||
┌────────────────────┐ ┌────────────────────┐
|
||||
│ Master Grafana │ │ Replica Grafana │
|
||||
│ (Active) │ │ (Standby) │
|
||||
│ │ │ │
|
||||
│ Repository: │ │ Repository: │
|
||||
│ - path: shared/ │ │ - path: shared/ │
|
||||
└────────────────────┘ └────────────────────┘
|
||||
│ │
|
||||
└───────────┬───────────────────┘
|
||||
↓
|
||||
┌──────────────────────┐
|
||||
│ Reverse Proxy │
|
||||
│ (Failover) │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
### Repository structure
|
||||
|
||||
**In Git:**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests
|
||||
└── shared/
|
||||
├── dashboard-metrics.json
|
||||
├── dashboard-alerts.json
|
||||
└── dashboard-logs.json
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view (both instances):**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── Metrics Dashboard
|
||||
├── Alerts Dashboard
|
||||
└── Logs Dashboard
|
||||
```
|
||||
|
||||
- Master and replica instances show identical folder structure.
|
||||
- Both sync from the same `shared/` path.
|
||||
- Reverse proxy routes traffic to master (active) instance.
|
||||
- If master fails, proxy automatically fails over to replica (standby).
|
||||
- Users see the same dashboards regardless of which instance is serving traffic.
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
Both master and replica instances use identical parameters:
|
||||
|
||||
**Master instance:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `shared/`
|
||||
|
||||
**Replica instance:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `shared/`
|
||||
|
||||
### How it works
|
||||
|
||||
1. Both instances stay synchronized through Git.
|
||||
2. Reverse proxy routes traffic to primary.
|
||||
3. Users edit on primary. Git Sync commits changes.
|
||||
4. Both instances pull latest changes to keep replica in sync.
|
||||
5. On primary failure, proxy fails over to replica.
|
||||
|
||||
### Failover considerations
|
||||
|
||||
- Health checks and monitoring.
|
||||
- Continuous syncing to minimize data loss.
|
||||
- Plan failback (automatic or manual).
|
||||
|
||||
## Load balancer scenario
|
||||
|
||||
Run multiple active Grafana instances behind a load balancer. All instances sync from the same Git location.
|
||||
|
||||
### Use it for
|
||||
|
||||
- **High traffic**: Your deployment needs to handle significant user load.
|
||||
- **Load distribution**: You want to distribute user requests across instances.
|
||||
- **Maximum availability**: You need service continuity during maintenance or failures.
|
||||
- **Scalability**: You want to add instances as load increases.
|
||||
- **Performance**: Users need fast response times under heavy load.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ GitHub Repository │
|
||||
│ Repository: your-org/grafana-manifests │
|
||||
│ Branch: main │
|
||||
│ │
|
||||
│ grafana-manifests/ │
|
||||
│ └── shared/ │
|
||||
│ ├── dashboard-metrics.json │
|
||||
│ ├── dashboard-alerts.json │
|
||||
│ └── dashboard-logs.json │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
↕ ↕
|
||||
Git Sync (shared/) Git Sync (shared/)
|
||||
↕ ↕
|
||||
┌────────────────────┐ ┌────────────────────┐
|
||||
│ Grafana Instance 1│ │ Grafana Instance 2│
|
||||
│ (Active) │ │ (Active) │
|
||||
│ │ │ │
|
||||
│ Repository: │ │ Repository: │
|
||||
│ - path: shared/ │ │ - path: shared/ │
|
||||
└────────────────────┘ └────────────────────┘
|
||||
│ │
|
||||
└───────────┬───────────────────┘
|
||||
↓
|
||||
┌──────────────────────┐
|
||||
│ Load Balancer │
|
||||
│ (Round Robin) │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
### Repository structure
|
||||
|
||||
**In Git:**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests
|
||||
└── shared/
|
||||
├── dashboard-metrics.json
|
||||
├── dashboard-alerts.json
|
||||
└── dashboard-logs.json
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view (all instances):**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── Metrics Dashboard
|
||||
├── Alerts Dashboard
|
||||
└── Logs Dashboard
|
||||
```
|
||||
|
||||
- All instances show identical folder structure.
|
||||
- All instances sync from the same `shared/` path.
|
||||
- Load balancer distributes requests across all active instances.
|
||||
- Any instance can serve read requests.
|
||||
- Any instance can accept dashboard modifications.
|
||||
- Changes propagate to all instances through Git.
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
All instances use identical parameters:
|
||||
|
||||
**Instance 1:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `shared/`
|
||||
|
||||
**Instance 2:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `shared/`
|
||||
|
||||
### How it works
|
||||
|
||||
1. All instances stay synchronized through Git.
|
||||
2. Load balancer distributes incoming traffic across all active instances.
|
||||
3. Users can view dashboards from any instance.
|
||||
4. When a user modifies a dashboard on any instance, Git Sync commits the change.
|
||||
5. All other instances pull the updated dashboard during their next sync cycle, or instantly if webhooks are configured.
|
||||
6. If one instance fails, load balancer stops routing traffic to it and remaining instances continue serving.
|
||||
|
||||
### Important considerations
|
||||
|
||||
- **Eventually consistent**: Due to sync intervals, instances may briefly have different dashboard versions.
|
||||
- **Concurrent edits**: Multiple users editing the same dashboard on different instances can cause conflicts.
|
||||
- **Database sharing**: Instances should share the same backend database for user sessions, preferences, and annotations.
|
||||
- **Stateless design**: Design for stateless operation where possible to maximize load balancing effectiveness.
|
||||
@@ -1,93 +0,0 @@
|
||||
---
|
||||
title: Git Sync with regional replication
|
||||
menuTitle: Regional replication
|
||||
description: Synchronize multiple regional Grafana instances from a shared Git location
|
||||
weight: 30
|
||||
---
|
||||
|
||||
# Git Sync with regional replication
|
||||
|
||||
Deploy multiple Grafana instances across regions. Synchronize them with the same Git location to ensure consistent dashboards everywhere.
|
||||
|
||||
## Use it for
|
||||
|
||||
- **Geographic distribution**: You deploy Grafana close to users in different regions.
|
||||
- **Latency reduction**: Users need fast dashboard access from their location.
|
||||
- **Data sovereignty**: You keep dashboard data in specific regions.
|
||||
- **High availability**: You need dashboard availability across regions.
|
||||
- **Consistent experience**: All users see the same dashboards regardless of region.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ GitHub Repository │
|
||||
│ Repository: your-org/grafana-manifests │
|
||||
│ Branch: main │
|
||||
│ │
|
||||
│ grafana-manifests/ │
|
||||
│ └── shared/ │
|
||||
│ ├── dashboard-global.json │
|
||||
│ ├── dashboard-metrics.json │
|
||||
│ └── dashboard-logs.json │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
↕ ↕
|
||||
Git Sync (shared/) Git Sync (shared/)
|
||||
↕ ↕
|
||||
┌────────────────────┐ ┌────────────────────┐
|
||||
│ US Region │ │ EU Region │
|
||||
│ Grafana │ │ Grafana │
|
||||
│ │ │ │
|
||||
│ Repository: │ │ Repository: │
|
||||
│ - path: shared/ │ │ - path: shared/ │
|
||||
└────────────────────┘ └────────────────────┘
|
||||
```
|
||||
|
||||
## Repository structure
|
||||
|
||||
**In Git:**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests
|
||||
└── shared/
|
||||
├── dashboard-global.json
|
||||
├── dashboard-metrics.json
|
||||
└── dashboard-logs.json
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view (all regions):**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── Global Dashboard
|
||||
├── Metrics Dashboard
|
||||
└── Logs Dashboard
|
||||
```
|
||||
|
||||
- All regional instances (US, EU, etc.) show identical folder structure
|
||||
- Same folder name "grafana-manifests" in every region
|
||||
- Same dashboards synced from the `shared/` path appear everywhere
|
||||
- Users in any region see the exact same dashboards with the same titles
|
||||
|
||||
## Configuration parameters
|
||||
|
||||
All regions:
|
||||
|
||||
- Repository: `your-org/grafana-manifests`
|
||||
- Branch: `main`
|
||||
- Path: `shared/`
|
||||
|
||||
## How it works
|
||||
|
||||
1. All regional instances pull dashboards from `shared/`.
|
||||
2. Any region’s change commits to Git.
|
||||
3. Other regions pull updates during the next sync (or via webhooks).
|
||||
4. Changes propagate across regions per sync interval.
|
||||
|
||||
## Considerations
|
||||
|
||||
- **Write conflicts**: If users in different regions modify the same dashboard simultaneously, Git uses last-write-wins.
|
||||
- **Primary region**: Consider designating one region as the primary location for making dashboard changes.
|
||||
- **Propagation time**: Changes propagate to all regions within the configured sync interval, or instantly if webhooks are configured.
|
||||
- **Network reliability**: Ensure all regions have reliable connectivity to the Git repository.
|
||||
@@ -1,169 +0,0 @@
|
||||
---
|
||||
title: Multiple team Git Sync
|
||||
menuTitle: Shared instance
|
||||
description: Use multiple Git repositories with one Grafana instance, one repository per team
|
||||
weight: 60
|
||||
---
|
||||
|
||||
# Git Sync in a Grafana instance shared by multiple teams
|
||||
|
||||
Use a single Grafana instance with multiple Repository resources, one per team. Each team manages its own dashboards while sharing Grafana.
|
||||
|
||||
## Use it for
|
||||
|
||||
- **Team autonomy**: Different teams manage their own dashboards independently.
|
||||
- **Organizational structure**: Dashboard organization aligns with team structure.
|
||||
- **Resource efficiency**: Multiple teams share Grafana infrastructure.
|
||||
- **Cost optimization**: You reduce infrastructure costs while maintaining team separation.
|
||||
- **Collaboration**: Teams can view each other’s dashboards while managing their own.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────┐ ┌─────────────────────────┐
|
||||
│ Platform Team Repo │ │ Data Team Repo │
|
||||
│ platform-dashboards │ │ data-dashboards │
|
||||
│ │ │ │
|
||||
│ platform-dashboards/ │ │ data-dashboards/ │
|
||||
│ └── grafana/ │ │ └── grafana/ │
|
||||
│ ├── k8s.json │ │ ├── pipeline.json │
|
||||
│ └── infra.json │ │ └── analytics.json │
|
||||
└─────────────────────────┘ └─────────────────────────┘
|
||||
↕ ↕
|
||||
Git Sync (grafana/) Git Sync (grafana/)
|
||||
↕ ↕
|
||||
┌──────────────────────────────────────┐
|
||||
│ Grafana Instance │
|
||||
│ │
|
||||
│ Repository 1: │
|
||||
│ - repo: platform-dashboards │
|
||||
│ → Creates "platform-dashboards" │
|
||||
│ │
|
||||
│ Repository 2: │
|
||||
│ - repo: data-dashboards │
|
||||
│ → Creates "data-dashboards" │
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Repository structure
|
||||
|
||||
**In Git (separate repositories):**
|
||||
|
||||
**Platform team repository:**
|
||||
|
||||
```
|
||||
your-org/platform-dashboards
|
||||
└── grafana/
|
||||
├── dashboard-k8s.json
|
||||
└── dashboard-infra.json
|
||||
```
|
||||
|
||||
**Data team repository:**
|
||||
|
||||
```
|
||||
your-org/data-dashboards
|
||||
└── grafana/
|
||||
├── dashboard-pipeline.json
|
||||
└── dashboard-analytics.json
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view:**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
├── 📁 platform-dashboards/
|
||||
│ ├── Kubernetes Dashboard
|
||||
│ └── Infrastructure Dashboard
|
||||
└── 📁 data-dashboards/
|
||||
├── Pipeline Dashboard
|
||||
└── Analytics Dashboard
|
||||
```
|
||||
|
||||
- Two separate folders created (one per Repository resource).
|
||||
- Folder names derived from repository names.
|
||||
- Each team has complete control over their own repository.
|
||||
- Teams can independently manage permissions, branches, and workflows in their repos.
|
||||
- All teams can view each other's dashboards in Grafana but manage only their own.
|
||||
|
||||
## Configuration parameters
|
||||
|
||||
**Platform team repository:**
|
||||
|
||||
- **Repository**: `your-org/platform-dashboards`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
**Data team repository:**
|
||||
|
||||
- **Repository**: `your-org/data-dashboards`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
## How it works
|
||||
|
||||
1. Each team has their own Git repository for complete autonomy.
|
||||
2. Each repository resource in Grafana creates a separate folder.
|
||||
3. Platform team dashboards sync from `your-org/platform-dashboards` repository.
|
||||
4. Data team dashboards sync from `your-org/data-dashboards` repository.
|
||||
5. Teams can independently manage their repository settings, access controls, and workflows.
|
||||
6. All teams can view each other's dashboards in Grafana but edit only their own.
|
||||
|
||||
## Scale to more teams
|
||||
|
||||
Adding additional teams is straightforward. For a third team, create a new repository and configure:
|
||||
|
||||
- **Repository**: `your-org/security-dashboards`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
This creates a new "security-dashboards" folder in the same Grafana instance.
|
||||
|
||||
## Alternative: Shared repository with different paths
|
||||
|
||||
For teams that prefer sharing a single repository, use different paths to separate team dashboards:
|
||||
|
||||
**In Git:**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests
|
||||
├── team-platform/
|
||||
│ ├── dashboard-k8s.json
|
||||
│ └── dashboard-infra.json
|
||||
└── team-data/
|
||||
├── dashboard-pipeline.json
|
||||
└── dashboard-analytics.json
|
||||
```
|
||||
|
||||
**Configuration:**
|
||||
|
||||
**Platform team:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `team-platform/`
|
||||
|
||||
**Data team:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `team-data/`
|
||||
|
||||
This approach provides simpler repository management but less isolation between teams.
|
||||
|
||||
## Alternative: Different branches per team
|
||||
|
||||
For teams wanting their own branch in a shared repository:
|
||||
|
||||
**Platform team:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `team-platform`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
**Data team:**
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `team-data`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
This allows teams to use Git branch workflows for collaboration while sharing the same repository.
|
||||
@@ -1,86 +0,0 @@
|
||||
---
|
||||
title: Single instance Git Sync
|
||||
menuTitle: Single instance
|
||||
description: Synchronize a single Grafana instance with a Git repository
|
||||
weight: 10
|
||||
---
|
||||
|
||||
# Single instance Git Sync
|
||||
|
||||
Use a single Grafana instance synchronized with a Git repository. This is the foundation for Git Sync and helps you understand bidirectional synchronization.
|
||||
|
||||
## Use it for
|
||||
|
||||
- **Getting started**: You want to learn how Git Sync works before implementing complex scenarios.
|
||||
- **Personal projects**: Individual developers manage their own dashboards.
|
||||
- **Small teams**: You have a simple setup without multiple environments or complex workflows.
|
||||
- **Development environments**: You need quick prototyping and testing.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ GitHub Repository │
|
||||
│ Repository: your-org/grafana-manifests │
|
||||
│ Branch: main │
|
||||
│ │
|
||||
│ grafana-manifests/ │
|
||||
│ └── grafana/ │
|
||||
│ ├── dashboard-1.json │
|
||||
│ ├── dashboard-2.json │
|
||||
│ └── dashboard-3.json │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
↕
|
||||
Git Sync (bidirectional)
|
||||
↕
|
||||
┌─────────────────────────────┐
|
||||
│ Grafana Instance │
|
||||
│ │
|
||||
│ Repository Resource: │
|
||||
│ - url: grafana-manifests │
|
||||
│ - branch: main │
|
||||
│ - path: grafana/ │
|
||||
│ │
|
||||
│ Creates folder: │
|
||||
│ "grafana-manifests" │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
## Repository structure
|
||||
|
||||
**In Git:**
|
||||
|
||||
```
|
||||
your-org/grafana-manifests
|
||||
└── grafana/
|
||||
├── dashboard-1.json
|
||||
├── dashboard-2.json
|
||||
└── dashboard-3.json
|
||||
```
|
||||
|
||||
**In Grafana Dashboards view:**
|
||||
|
||||
```
|
||||
Dashboards
|
||||
└── 📁 grafana-manifests/
|
||||
├── Dashboard 1
|
||||
├── Dashboard 2
|
||||
└── Dashboard 3
|
||||
```
|
||||
|
||||
- A folder named "grafana-manifests" (from repository name) contains all synced dashboards.
|
||||
- Each JSON file becomes a dashboard with its title displayed in the folder.
|
||||
- Users browse dashboards organized under this folder structure.
|
||||
|
||||
## Configuration parameters
|
||||
|
||||
Configure your Grafana instance to synchronize with:
|
||||
|
||||
- **Repository**: `your-org/grafana-manifests`
|
||||
- **Branch**: `main`
|
||||
- **Path**: `grafana/`
|
||||
|
||||
## How it works
|
||||
|
||||
1. **From Grafana to Git**: When users create or modify dashboards in Grafana, Git Sync commits changes to the `grafana/` directory on the `main` branch.
|
||||
2. **From Git to Grafana**: When dashboard JSON files are added or modified in the `grafana/` directory, Git Sync pulls these changes into Grafana.
|
||||
@@ -367,6 +367,5 @@ To learn more about using Git Sync:
|
||||
|
||||
- [Work with provisioned dashboards](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/provisioned-dashboards/)
|
||||
- [Manage provisioned repositories with Git Sync](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/use-git-sync/)
|
||||
- [Git Sync deployment scenarios](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios)
|
||||
- [Export resources](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/export-resources/)
|
||||
- [grafanactl documentation](https://grafana.github.io/grafanactl/)
|
||||
|
||||
@@ -127,13 +127,7 @@ An instance can be in one of the following Git Sync states:
|
||||
|
||||
## Common use cases
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
|
||||
Refer to [Git Sync deployment scenarios](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/as-code/observability-as-code/provision-resources/git-sync-deployment-scenarios) for sample scenarios, including architecture and configuration details.
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
You can use Git Sync for the following use cases:
|
||||
You can use Git Sync in the following scenarios.
|
||||
|
||||
### Version control and auditing
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ labels:
|
||||
- cloud
|
||||
title: Manage provisioned repositories with Git Sync
|
||||
menuTitle: Manage repositories with Git Sync
|
||||
weight: 400
|
||||
weight: 120
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/observability-as-code/provision-resources/use-git-sync/
|
||||
aliases:
|
||||
- ../../../observability-as-code/provision-resources/use-git-sync/ # /docs/grafana/next/observability-as-code/provision-resources/use-git-sync/
|
||||
|
||||
@@ -62,6 +62,5 @@ The table includes default and other fields:
|
||||
| targetBlank | bool. If true, the link will be opened in a new tab. Default is `false`. |
|
||||
| includeVars | bool. If true, includes current template variables values in the link as query params. Default is `false`. |
|
||||
| keepTime | bool. If true, includes current time range in the link as query params. Default is `false`. |
|
||||
| placement? | string. Use placement to display the link somewhere else on the dashboard other than above the visualizations. Use the `inControlsMenu` parameter to render the link in the dashboard controls dropdown menu. |
|
||||
|
||||
<!-- prettier-ignore-end -->
|
||||
|
||||
@@ -3,6 +3,7 @@ aliases:
|
||||
- ../data-sources/azure-monitor/
|
||||
- ../features/datasources/azuremonitor/
|
||||
- azuremonitor/
|
||||
- azuremonitor/deprecated-application-insights/
|
||||
description: Guide for using Azure Monitor in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -22,7 +23,6 @@ labels:
|
||||
menuTitle: Azure Monitor
|
||||
title: Azure Monitor data source
|
||||
weight: 300
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
configure-grafana-feature-toggles:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -49,11 +49,6 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
transform-data:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
|
||||
configure-grafana-azure:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
|
||||
@@ -68,98 +63,295 @@ refs:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-access/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
query-editor-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
template-variables-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
alerting-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
troubleshooting-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
annotations-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-access/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
---
|
||||
|
||||
# Azure Monitor data source
|
||||
|
||||
The Azure Monitor data source plugin allows you to query and visualize data from Azure Monitor, the Azure service to maximize the availability and performance of applications and services in the Azure Cloud.
|
||||
Grafana ships with built-in support for Azure Monitor, the Azure service to maximize the availability and performance of applications and services in the Azure Cloud.
|
||||
This topic explains configuring and querying specific to the Azure Monitor data source.
|
||||
|
||||
## Supported Azure clouds
|
||||
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:data-source-management).
|
||||
Only users with the organization administrator role can add data sources.
|
||||
|
||||
The Azure Monitor data source supports the following Azure cloud environments:
|
||||
Once you've added the Azure Monitor data source, you can [configure it](#configure-the-data-source) so that your Grafana instance's users can create queries in its [query editor](query-editor/) when they [build dashboards](ref:build-dashboards) and use [Explore](ref:explore).
|
||||
|
||||
- **Azure** - Azure public cloud (default)
|
||||
- **Azure US Government** - Azure Government cloud
|
||||
- **Azure China** - Azure China cloud operated by 21Vianet
|
||||
The Azure Monitor data source supports visualizing data from four Azure services:
|
||||
|
||||
## Supported Azure services
|
||||
- **Azure Monitor Metrics:** Collect numeric data from resources in your Azure account.
|
||||
- **Azure Monitor Logs:** Collect log and performance data from your Azure account, and query using the Kusto Query Language (KQL).
|
||||
- **Azure Resource Graph:** Query your Azure resources across subscriptions.
|
||||
- **Azure Monitor Application Insights:** Collect trace logging data and other application performance metrics.
|
||||
|
||||
The Azure Monitor data source supports the following Azure services:
|
||||
## Configure the data source
|
||||
|
||||
| Service | Description |
|
||||
| ------------------------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Azure Monitor Metrics** | Collect numeric data from resources in your Azure account. Supports dimensions, aggregations, and time grain configuration. |
|
||||
| **Azure Monitor Logs** | Collect log and performance data from your Azure account using the Kusto Query Language (KQL). |
|
||||
| **Azure Resource Graph** | Query your Azure resources across subscriptions using KQL. Useful for inventory, compliance, and resource management. |
|
||||
| **Application Insights Traces** | Collect distributed trace data and correlate requests across your application components. |
|
||||
**To access the data source configuration page:**
|
||||
|
||||
## Get started
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Under Your connections, click **Data sources**.
|
||||
1. Enter `Azure Monitor` in the search bar.
|
||||
1. Click **Azure Monitor**.
|
||||
|
||||
The following documents will help you get started with the Azure Monitor data source:
|
||||
The **Settings** tab of the data source is displayed.
|
||||
|
||||
- [Configure the Azure Monitor data source](ref:configure-azure-monitor) - Set up authentication and connect to Azure
|
||||
- [Azure Monitor query editor](ref:query-editor-azure-monitor) - Create and edit queries for Metrics, Logs, Traces, and Resource Graph
|
||||
- [Template variables](ref:template-variables-azure-monitor) - Create dynamic dashboards with Azure Monitor variables
|
||||
- [Alerting](ref:alerting-azure-monitor) - Create alert rules using Azure Monitor data
|
||||
- [Troubleshooting](ref:troubleshooting-azure-monitor) - Solve common configuration and query errors
|
||||
### Configure Azure Active Directory (AD) authentication
|
||||
|
||||
## Additional features
|
||||
You must create an app registration and service principal in Azure AD to authenticate the data source.
|
||||
For configuration details, refer to the [Azure documentation for service principals](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in).
|
||||
|
||||
After you have configured the Azure Monitor data source, you can:
|
||||
The app registration you create must have the `Reader` role assigned on the subscription.
|
||||
For more information, refer to [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
|
||||
|
||||
- Add [Annotations](ref:annotations-azure-monitor) to overlay Azure log events on your graphs.
|
||||
- Configure and use [Template variables](ref:template-variables-azure-monitor) for dynamic dashboards.
|
||||
- Add [Transformations](ref:transform-data) to manipulate query results.
|
||||
- Set up [Alerting](ref:alerting-azure-monitor) and recording rules using Metrics, Logs, Traces, and Resource Graph queries.
|
||||
- Use [Explore](ref:explore) to investigate your Azure data without building a dashboard.
|
||||
If you host Grafana in Azure, such as in App Service or Azure Virtual Machines, you can configure the Azure Monitor data source to use Managed Identity for secure authentication without entering credentials into Grafana.
|
||||
For details, refer to [Configuring using Managed Identity](#configuring-using-managed-identity).
|
||||
|
||||
## Pre-built dashboards
|
||||
You can configure the Azure Monitor data source to use Workload Identity for secure authentication without entering credentials into Grafana if you host Grafana in a Kubernetes environment, such as AKS, and require access to Azure resources.
|
||||
For details, refer to [Configuring using Workload Identity](#configuring-using-workload-identity).
|
||||
|
||||
The Azure Monitor plugin includes the following pre-built dashboards:
|
||||
| Name | Description |
|
||||
| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Enables Managed Identity. Selecting Managed Identity hides many of the other fields. For details, see [Configuring using Managed Identity](#configuring-using-managed-identity). |
|
||||
| **Azure Cloud** | Sets the national cloud for your Azure account. For most users, this is the default "Azure". For details, see the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/develop/authentication-national-cloud). |
|
||||
| **Directory (tenant) ID** | Sets the directory/tenant ID for the Azure AD app registration to use for authentication. For details, see the [Azure tenant and app ID docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in). |
|
||||
| **Application (client) ID** | Sets the application/client ID for the Azure AD app registration to use for authentication. |
|
||||
| **Client secret** | Sets the application client secret for the Azure AD app registration to use for authentication. For details, see the [Azure application secret docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret). |
|
||||
| **Default subscription** | _(Optional)_ Sets a default subscription for template variables to use. |
|
||||
| **Enable Basic Logs** | Allows this data source to execute queries against [Basic Logs tables](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/basic-logs-query?tabs=portal-1) in supported Log Analytics Workspaces. These queries may incur additional costs. |
|
||||
|
||||
- **Azure Monitor Overview** - Displays key metrics across your Azure subscriptions and resources.
|
||||
- **Azure Storage Account** - Shows storage account metrics including availability, latency, and transactions.
|
||||
### Provision the data source
|
||||
|
||||
To import a pre-built dashboard:
|
||||
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
|
||||
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
|
||||
1. Go to **Connections** > **Data sources**.
|
||||
1. Select your Azure Monitor data source.
|
||||
1. Click the **Dashboards** tab.
|
||||
1. Click **Import** next to the dashboard you want to use.
|
||||
#### Provisioning examples
|
||||
|
||||
## Related resources
|
||||
**Azure AD App Registration (client secret):**
|
||||
|
||||
- [Azure Monitor documentation](https://docs.microsoft.com/en-us/azure/azure-monitor/)
|
||||
- [Kusto Query Language (KQL) reference](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/)
|
||||
- [Grafana community forum](https://community.grafana.com/)
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: clientsecret
|
||||
cloudName: azuremonitor # See table below
|
||||
tenantId: <tenant-id>
|
||||
clientId: <client-id>
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
secureJsonData:
|
||||
clientSecret: <client-secret>
|
||||
version: 1
|
||||
```
|
||||
|
||||
**Managed Identity:**
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: msi
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
**Workload Identity:**
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: workloadidentity
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
**Current User:**
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The `oauthPassThru` property is required for current user authentication to function.
|
||||
Additionally, `disableGrafanaCache` is necessary to prevent the data source returning cached responses for resources users don't have access to.
|
||||
{{< /admonition >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: currentuser
|
||||
oauthPassThru: true
|
||||
disableGrafanaCache: true
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
#### Supported cloud names
|
||||
|
||||
| Azure Cloud | `cloudName` Value |
|
||||
| ------------------------------------ | -------------------------- |
|
||||
| **Microsoft Azure public cloud** | `azuremonitor` (_Default_) |
|
||||
| **Microsoft Chinese national cloud** | `chinaazuremonitor` |
|
||||
| **US Government cloud** | `govazuremonitor` |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Cloud names for current user authentication differ to the `cloudName` values in the preceding table.
|
||||
The public cloud name is `AzureCloud`, the Chinese national cloud name is `AzureChinaCloud`, and the US Government cloud name is `AzureUSGovernment`.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Configure Managed Identity
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Managed Identity is available only in [Azure Managed Grafana](https://azure.microsoft.com/en-us/products/managed-grafana) or Grafana OSS/Enterprise when deployed in Azure. It is not available in Grafana Cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
You can use managed identity to configure Azure Monitor in Grafana if you host Grafana in Azure (such as an App Service or with Azure Virtual Machines) and have managed identity enabled on your VM.
|
||||
This lets you securely authenticate data sources without manually configuring credentials via Azure AD App Registrations.
|
||||
For details on Azure managed identities, refer to the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
|
||||
|
||||
**To enable managed identity for Grafana:**
|
||||
|
||||
1. Set the `managed_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
```
|
||||
|
||||
2. In the Azure Monitor data source configuration, set **Authentication** to **Managed Identity**.
|
||||
|
||||
This hides the directory ID, application ID, and client secret fields, and the data source uses managed identity to authenticate to Azure Monitor Metrics and Logs, and Azure Resource Graph.
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-managed-identity-2.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Managed Identity authentication" >}}
|
||||
|
||||
3. You can set the `managed_identity_client_id` field in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure) to allow a user-assigned managed identity to be used instead of the default system-assigned identity.
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
managed_identity_client_id = USER_ASSIGNED_IDENTITY_CLIENT_ID
|
||||
```
|
||||
|
||||
### Configure Workload Identity
|
||||
|
||||
You can use workload identity to configure Azure Monitor in Grafana if you host Grafana in a Kubernetes environment, such as AKS, in conjunction with managed identities.
|
||||
This lets you securely authenticate data sources without manually configuring credentials via Azure AD App Registrations.
|
||||
For details on workload identity, refer to the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/).
|
||||
|
||||
**To enable workload identity for Grafana:**
|
||||
|
||||
1. Set the `workload_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
```
|
||||
|
||||
2. In the Azure Monitor data source configuration, set **Authentication** to **Workload Identity**.
|
||||
|
||||
This hides the directory ID, application ID, and client secret fields, and the data source uses workload identity to authenticate to Azure Monitor Metrics and Logs, and Azure Resource Graph.
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-workload-identity.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Workload Identity authentication" >}}
|
||||
|
||||
3. There are additional configuration variables that can control the authentication method.`workload_identity_tenant_id` represents the Azure AD tenant that contains the managed identity, `workload_identity_client_id` represents the client ID of the managed identity if it differs from the default client ID, `workload_identity_token_file` represents the path to the token file. Refer to the [documentation](https://azure.github.io/azure-workload-identity/docs/) for more information on what values these variables should use, if any.
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
workload_identity_tenant_id = IDENTITY_TENANT_ID
|
||||
workload_identity_client_id = IDENTITY_CLIENT_ID
|
||||
workload_identity_token_file = TOKEN_FILE_PATH
|
||||
```
|
||||
|
||||
### Configure Current User authentication
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Current user authentication is an [experimental feature](/docs/release-life-cycle). Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Contact Grafana Support to enable this feature in Grafana Cloud. Aspects of Grafana may not work as expected when using this authentication method.
|
||||
{{< /admonition >}}
|
||||
|
||||
If your Grafana instance is configured with Azure Entra (formerly Active Directory) authentication for login, this authentication method can be used to forward the currently logged in user's credentials to the data source. The users credentials will then be used when requesting data from the data source. For details on how to configure your Grafana instance using Azure Entra refer to the [documentation](ref:configure-grafana-azure-auth).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Additional configuration is required to ensure that the App Registration used to login a user via Azure provides an access token with the permissions required by the data source.
|
||||
|
||||
The App Registration must be configured to issue both **Access Tokens** and **ID Tokens**.
|
||||
|
||||
1. In the Azure Portal, open the App Registration that requires configuration.
|
||||
2. Select **Authentication** in the side menu.
|
||||
3. Under **Implicit grant and hybrid flows** check both the **Access tokens** and **ID tokens** boxes.
|
||||
4. Save the changes to ensure the App Registration is updated.
|
||||
|
||||
The App Registration must also be configured with additional **API Permissions** to provide authenticated users with access to the APIs utilised by the data source.
|
||||
|
||||
1. In the Azure Portal, open the App Registration that requires configuration.
|
||||
1. Select **API Permissions** in the side menu.
|
||||
1. Ensure the `openid`, `profile`, `email`, and `offline_access` permissions are present under the **Microsoft Graph** section. If not, they must be added.
|
||||
1. Select **Add a permission** and choose the following permissions. They must be added individually. Refer to the [Azure documentation](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-configure-app-access-web-apis) for more information.
|
||||
- Select **Azure Service Management** > **Delegated permissions** > `user_impersonation` > **Add permissions**
|
||||
- Select **APIs my organization uses** > Search for **Log Analytics API** and select it > **Delegated permissions** > `Date.Read` > **Add permissions**
|
||||
|
||||
Once all permissions have been added, the Azure authentication section in Grafana must be updated. The `scopes` section must be updated to include the `.default` scope to ensure that a token with access to all APIs declared on the App Registration is requested by Grafana. Once updated the scopes value should equal: `.default openid email profile`.
|
||||
{{< /admonition >}}
|
||||
|
||||
This method of authentication doesn't inherently support all backend functionality as a user's credentials won't be in scope.
|
||||
Affected functionality includes alerting, reporting, and recorded queries.
|
||||
In order to support backend queries when using a data source configured with current user authentication, you can configure service credentials.
|
||||
Also, note that query and resource caching is disabled by default for data sources using current user authentication.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To configure fallback service credentials the [feature toggle](ref:configure-grafana-feature-toggles) `idForwarding` must be set to `true` and `user_identity_fallback_credentials_enabled` must be enabled in the [Azure configuration section](ref:configure-grafana-azure) (enabled by default when `user_identity_enabled` is set to `true`).
|
||||
{{< /admonition >}}
|
||||
|
||||
Permissions for fallback credentials may need to be broad to appropriately support backend functionality.
|
||||
For example, an alerting query created by a user is dependent on their permissions.
|
||||
If a user tries to create an alert for a resource that the fallback credentials can't access, the alert will fail.
|
||||
|
||||
**To enable current user authentication for Grafana:**
|
||||
|
||||
1. Set the `user_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
|
||||
By default this will also enable fallback service credentials.
|
||||
If you want to disable service credentials at the instance level set `user_identity_fallback_credentials_enabled` to false.
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
user_identity_enabled = true
|
||||
```
|
||||
|
||||
1. In the Azure Monitor data source configuration, set **Authentication** to **Current User**.
|
||||
If fallback service credentials are enabled at the instance level, an additional configuration section is visible that you can use to enable or disable using service credentials for this data source.
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-current-user.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Current User authentication" >}}
|
||||
|
||||
1. If you want backend functionality to work with this data source, enable service credentials and configure the data source using the most applicable credentials for your circumstances.
|
||||
|
||||
## Query the data source
|
||||
|
||||
The Azure Monitor data source can query data from Azure Monitor Metrics and Logs, the Azure Resource Graph, and Application Insights Traces. Each source has its own specialized query editor.
|
||||
|
||||
For details, see the [query editor documentation](query-editor/).
|
||||
|
||||
## Use template variables
|
||||
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
|
||||
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For details, see the [template variables documentation](template-variables/).
|
||||
|
||||
## Application Insights and Insights Analytics (removed)
|
||||
|
||||
Until Grafana v8.0, you could query the same Azure Application Insights data using Application Insights and Insights Analytics.
|
||||
|
||||
These queries were deprecated in Grafana v7.5. In Grafana v8.0, Application Insights and Insights Analytics were made read-only in favor of querying this data through Metrics and Logs. These query methods were completely removed in Grafana v9.0.
|
||||
|
||||
If you're upgrading from a Grafana version prior to v9.0 and relied on Application Insights and Analytics queries, refer to the [Grafana v9.0 documentation](/docs/grafana/v9.0/datasources/azuremonitor/deprecated-application-insights/) for help migrating these queries to Metrics and Logs queries.
|
||||
|
||||
@@ -1,262 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/alerting/
|
||||
description: Set up alerts using Azure Monitor data in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- azure
|
||||
- monitor
|
||||
- alerting
|
||||
- alerts
|
||||
- metrics
|
||||
- logs
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Alerting
|
||||
title: Azure Monitor alerting
|
||||
weight: 500
|
||||
refs:
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
alerting-fundamentals:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/
|
||||
create-alert-rule:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
grafana-managed-recording-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
troubleshoot:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
---
|
||||
|
||||
# Azure Monitor alerting
|
||||
|
||||
The Azure Monitor data source supports [Grafana Alerting](ref:alerting) and [Grafana-managed recording rules](ref:grafana-managed-recording-rules), allowing you to create alert rules based on Azure metrics, logs, traces, and resource data. You can monitor your Azure environment and receive notifications when specific conditions are met.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have the appropriate permissions to create alert rules in Grafana.
|
||||
- Verify your Azure Monitor data source is configured and working correctly.
|
||||
- Familiarize yourself with [Grafana Alerting concepts](ref:alerting-fundamentals).
|
||||
- **Important**: Verify your data source uses a supported authentication method. Refer to [Authentication requirements](#authentication-requirements).
|
||||
|
||||
## Supported query types for alerting
|
||||
|
||||
All Azure Monitor query types support alerting and recording rules:
|
||||
|
||||
| Query type | Use case | Notes |
|
||||
| -------------------- | -------------------------------------------------- | -------------------------------------------------------- |
|
||||
| Metrics | Threshold-based alerts on Azure resource metrics | Best suited for alerting; returns time-series data |
|
||||
| Logs | Alert on log patterns, error counts, or thresholds | Use KQL to aggregate data into numeric values |
|
||||
| Azure Resource Graph | Alert on resource state or configuration changes | Use count aggregations to return numeric data |
|
||||
| Traces | Alert on trace data and application performance | Use aggregations to return numeric values for evaluation |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Alert queries must return numeric data that Grafana can evaluate against a threshold. Queries that return only text or non-numeric data cannot be used directly for alerting.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Authentication requirements
|
||||
|
||||
Alerting and recording rules run as background processes without a user context. This means they require service-level authentication and don't work with all authentication methods.
|
||||
|
||||
| Authentication method | Supported |
|
||||
| -------------------------------- | ------------------------------------- |
|
||||
| App Registration (client secret) | ✓ |
|
||||
| Managed Identity | ✓ |
|
||||
| Workload Identity | ✓ |
|
||||
| Current User | ✓ (with fallback service credentials) |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
If you use **Current User** authentication, you must configure **fallback service credentials** for alerting and recording rules to function. User credentials aren't available for background operations, so Grafana uses the fallback credentials instead. Refer to [configure the data source](ref:configure-azure-monitor) for details on setting up fallback credentials.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Create an alert rule
|
||||
|
||||
To create an alert rule using Azure Monitor data:
|
||||
|
||||
1. Go to **Alerting** > **Alert rules**.
|
||||
1. Click **New alert rule**.
|
||||
1. Enter a name for your alert rule.
|
||||
1. In the **Define query and alert condition** section:
|
||||
- Select your Azure Monitor data source.
|
||||
- Configure your query (for example, a Metrics query for CPU usage or a Logs query using KQL).
|
||||
- Add a **Reduce** expression if your query returns multiple series.
|
||||
- Add a **Threshold** expression to define the alert condition.
|
||||
1. Configure the **Set evaluation behavior**:
|
||||
- Select or create a folder and evaluation group.
|
||||
- Set the evaluation interval (how often the alert is checked).
|
||||
- Set the pending period (how long the condition must be true before firing).
|
||||
1. Add labels and annotations to provide context for notifications.
|
||||
1. Click **Save rule**.
|
||||
|
||||
For detailed instructions, refer to [Create a Grafana-managed alert rule](ref:create-alert-rule).
|
||||
|
||||
## Example: VM CPU usage alert
|
||||
|
||||
This example creates an alert that fires when virtual machine CPU usage exceeds 80%:
|
||||
|
||||
1. Create a new alert rule.
|
||||
1. Configure the query:
|
||||
- **Service**: Metrics
|
||||
- **Resource**: Select your virtual machine
|
||||
- **Metric namespace**: `Microsoft.Compute/virtualMachines`
|
||||
- **Metric**: `Percentage CPU`
|
||||
- **Aggregation**: `Average`
|
||||
1. Add expressions:
|
||||
- **Reduce**: Last (to get the most recent data point)
|
||||
- **Threshold**: Is above 80
|
||||
1. Set evaluation to run every 1 minute with a 5-minute pending period.
|
||||
1. Save the rule.
|
||||
|
||||
## Example: Error log count alert
|
||||
|
||||
This example alerts when error logs exceed a threshold using a KQL query:
|
||||
|
||||
1. Create a new alert rule.
|
||||
1. Configure the query:
|
||||
- **Service**: Logs
|
||||
- **Resource**: Select your Log Analytics workspace
|
||||
- **Query**:
|
||||
```kusto
|
||||
AppExceptions
|
||||
| where TimeGenerated > ago(5m)
|
||||
| summarize ErrorCount = count() by bin(TimeGenerated, 1m)
|
||||
```
|
||||
1. Add expressions:
|
||||
- **Reduce**: Max (to get the highest count in the period)
|
||||
- **Threshold**: Is above 10
|
||||
1. Set evaluation to run every 5 minutes.
|
||||
1. Save the rule.
|
||||
|
||||
## Example: Resource count alert
|
||||
|
||||
This example alerts when the number of running virtual machines drops below a threshold using Azure Resource Graph:
|
||||
|
||||
1. Create a new alert rule.
|
||||
1. Configure the query:
|
||||
- **Service**: Azure Resource Graph
|
||||
- **Subscriptions**: Select your subscriptions
|
||||
- **Query**:
|
||||
|
||||
```kusto
|
||||
resources
|
||||
| where type == "microsoft.compute/virtualmachines"
|
||||
| where properties.extended.instanceView.powerState.displayStatus == "VM running"
|
||||
| summarize RunningVMs = count()
|
||||
```
|
||||
|
||||
1. Add expressions:
|
||||
- **Reduce**: Last
|
||||
- **Threshold**: Is below 3
|
||||
1. Set evaluation to run every 5 minutes.
|
||||
1. Save the rule.
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these recommendations to create reliable and efficient alerts with Azure Monitor data.
|
||||
|
||||
### Use appropriate query intervals
|
||||
|
||||
- Set the alert evaluation interval to be greater than or equal to the minimum data resolution from Azure Monitor.
|
||||
- Azure Monitor Metrics typically have 1-minute granularity at minimum.
|
||||
- Avoid very short intervals (less than 1 minute) as they may cause evaluation timeouts or miss data points.
|
||||
|
||||
### Reduce multiple series
|
||||
|
||||
When your Azure Monitor query returns multiple time series (for example, CPU usage across multiple VMs), use the **Reduce** expression to aggregate them:
|
||||
|
||||
- **Last**: Use the most recent value
|
||||
- **Mean**: Average across all series
|
||||
- **Max/Min**: Use the highest or lowest value
|
||||
- **Sum**: Total across all series
|
||||
|
||||
### Optimize Log Analytics queries
|
||||
|
||||
For Logs queries used in alerting:
|
||||
|
||||
- Use `summarize` to aggregate data into numeric values.
|
||||
- Include appropriate time filters using `ago()` or `TimeGenerated`.
|
||||
- Avoid returning large result sets; aggregate data in the query.
|
||||
- Test queries in Explore before using them in alert rules.
|
||||
|
||||
### Handle no data conditions
|
||||
|
||||
Configure what happens when no data is returned:
|
||||
|
||||
1. In the alert rule, find **Configure no data and error handling**.
|
||||
1. Choose an appropriate action:
|
||||
- **No Data**: Keep the alert in its current state
|
||||
- **Alerting**: Treat no data as an alert condition
|
||||
- **OK**: Treat no data as a healthy state
|
||||
|
||||
### Test queries before alerting
|
||||
|
||||
Always verify your query returns expected data before creating an alert:
|
||||
|
||||
1. Go to **Explore**.
|
||||
1. Select your Azure Monitor data source.
|
||||
1. Run the query you plan to use for alerting.
|
||||
1. Confirm the data format and values are correct.
|
||||
1. Verify the query returns numeric data suitable for threshold evaluation.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If your Azure Monitor alerts aren't working as expected, use the following sections to diagnose and resolve common issues.
|
||||
|
||||
### Alerts not firing
|
||||
|
||||
- Verify the data source uses a supported authentication method. If using Current User authentication, ensure fallback service credentials are configured.
|
||||
- Check that the query returns numeric data in Explore.
|
||||
- Ensure the evaluation interval allows enough time for data to be available.
|
||||
- Review the alert rule's health and any error messages in the Alerting UI.
|
||||
|
||||
### Authentication errors in alert evaluation
|
||||
|
||||
If you see authentication errors when alerts evaluate:
|
||||
|
||||
- Confirm the data source is configured with App Registration, Managed Identity, Workload Identity, or Current User with fallback service credentials.
|
||||
- If using App Registration, verify the client secret hasn't expired.
|
||||
- If using Current User, verify that fallback service credentials are configured and valid.
|
||||
- Check that the service principal has appropriate permissions on Azure resources.
|
||||
|
||||
### Query timeout errors
|
||||
|
||||
- Simplify complex KQL queries.
|
||||
- Reduce the time range in Log Analytics queries.
|
||||
- Add more specific filters to narrow result sets.
|
||||
|
||||
For additional troubleshooting help, refer to [Troubleshoot Azure Monitor](ref:troubleshoot).
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Grafana Alerting documentation](ref:alerting)
|
||||
- [Create alert rules](ref:create-alert-rule)
|
||||
- [Azure Monitor query editor](ref:query-editor)
|
||||
- [Grafana-managed recording rules](ref:grafana-managed-recording-rules)
|
||||
@@ -1,218 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/annotations/
|
||||
description: Use annotations with the Azure Monitor data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- azure
|
||||
- monitor
|
||||
- annotations
|
||||
- events
|
||||
- logs
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Annotations
|
||||
title: Azure Monitor annotations
|
||||
weight: 450
|
||||
refs:
|
||||
annotate-visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
---
|
||||
|
||||
# Azure Monitor annotations
|
||||
|
||||
[Annotations](ref:annotate-visualizations) overlay rich event information on top of graphs. You can use Azure Monitor Log Analytics queries to create annotations that mark important events, deployments, alerts, or other significant occurrences on your dashboards.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have configured the Azure Monitor data source.
|
||||
- You need access to a Log Analytics workspace containing the data you want to use for annotations.
|
||||
- Annotations use Log Analytics (KQL) queries only. Metrics, Traces, and Azure Resource Graph queries are not supported for annotations.
|
||||
|
||||
## Create an annotation query
|
||||
|
||||
To add an Azure Monitor annotation to a dashboard:
|
||||
|
||||
1. Open the dashboard where you want to add annotations.
|
||||
1. Click **Dashboard settings** (gear icon) in the top navigation.
|
||||
1. Select **Annotations** in the left menu.
|
||||
1. Click **Add annotation query**.
|
||||
1. Enter a **Name** for the annotation (e.g., "Azure Activity", "Deployments").
|
||||
1. Select your **Azure Monitor** data source.
|
||||
1. Choose the **Logs** service.
|
||||
1. Select a **Resource** (Log Analytics workspace or Application Insights resource).
|
||||
1. Write a KQL query that returns the annotation data.
|
||||
1. Click **Apply** to save.
|
||||
|
||||
## Query requirements
|
||||
|
||||
Your KQL query should return columns that Grafana can use to create annotations:
|
||||
|
||||
| Column | Required | Description |
|
||||
| ------------------ | ----------- | ------------------------------------------------------------------------------------------------ |
|
||||
| `TimeGenerated` | Yes | The timestamp for the annotation. Grafana uses this to position the annotation on the time axis. |
|
||||
| `Text` | Recommended | The annotation text displayed when you hover over or click the annotation. |
|
||||
| Additional columns | Optional | Any other columns returned become annotation tags. |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Always include a time filter in your query to limit results to the dashboard's time range. Use the `$__timeFilter()` macro.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Annotation query examples
|
||||
|
||||
The following examples demonstrate common annotation use cases.
|
||||
|
||||
### Azure Activity Log events
|
||||
|
||||
Display Azure Activity Log events such as resource modifications, deployments, and administrative actions:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where Level == "Error" or Level == "Warning" or CategoryValue == "Administrative"
|
||||
| project TimeGenerated, Text=OperationNameValue, Level, ResourceGroup, Caller
|
||||
| order by TimeGenerated desc
|
||||
| take 100
|
||||
```
|
||||
|
||||
### Deployment events
|
||||
|
||||
Show deployment-related activity:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where OperationNameValue contains "deployments"
|
||||
| project TimeGenerated, Text=strcat("Deployment: ", OperationNameValue), Status=ActivityStatusValue, ResourceGroup
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### Application Insights exceptions
|
||||
|
||||
Mark application exceptions as annotations:
|
||||
|
||||
```kusto
|
||||
AppExceptions
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| project TimeGenerated, Text=strcat(ProblemId, ": ", OuterMessage), SeverityLevel, AppRoleName
|
||||
| order by TimeGenerated desc
|
||||
| take 50
|
||||
```
|
||||
|
||||
### Custom events from Application Insights
|
||||
|
||||
Display custom events logged by your application:
|
||||
|
||||
```kusto
|
||||
AppEvents
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where Name == "DeploymentStarted" or Name == "DeploymentCompleted"
|
||||
| project TimeGenerated, Text=Name, AppRoleName
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### Security alerts
|
||||
|
||||
Show security-related alerts:
|
||||
|
||||
```kusto
|
||||
SecurityAlert
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| project TimeGenerated, Text=AlertName, Severity=AlertSeverity, Description
|
||||
| order by TimeGenerated desc
|
||||
| take 50
|
||||
```
|
||||
|
||||
### Resource health events
|
||||
|
||||
Display resource health status changes:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where CategoryValue == "ResourceHealth"
|
||||
| project TimeGenerated, Text=OperationNameValue, Status=ActivityStatusValue, ResourceId
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### VM start and stop events
|
||||
|
||||
Mark virtual machine state changes:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where OperationNameValue has_any ("start", "deallocate", "restart")
|
||||
| where ResourceProviderValue == "MICROSOFT.COMPUTE"
|
||||
| project TimeGenerated, Text=OperationNameValue, VM=Resource, Status=ActivityStatusValue
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### Autoscale events
|
||||
|
||||
Show autoscale operations:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where OperationNameValue contains "autoscale"
|
||||
| project TimeGenerated, Text=strcat("Autoscale: ", OperationNameValue), Status=ActivityStatusValue, ResourceGroup
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
## Customize annotation appearance
|
||||
|
||||
After creating an annotation query, you can customize its appearance:
|
||||
|
||||
| Setting | Description |
|
||||
| ------------- | -------------------------------------------------------------------------------------------------------- |
|
||||
| **Color** | Choose a color for the annotation markers. Use different colors to distinguish between annotation types. |
|
||||
| **Show in** | Select which panels display the annotations. |
|
||||
| **Filter by** | Add filters to limit when annotations appear. |
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these recommendations when creating annotations:
|
||||
|
||||
1. **Limit results**: Always use `take` or `limit` to restrict the number of annotations. Too many annotations can clutter your dashboard and impact performance.
|
||||
|
||||
2. **Use time filters**: Include `$__timeFilter()` to ensure queries only return data within the dashboard's time range.
|
||||
|
||||
3. **Create meaningful text**: Use `strcat()` or `project` to create descriptive annotation text that provides context at a glance.
|
||||
|
||||
4. **Add relevant tags**: Include columns like `ResourceGroup`, `Severity`, or `Status` that become clickable tags for filtering.
|
||||
|
||||
5. **Use descriptive names**: Name your annotations clearly (e.g., "Production Deployments", "Critical Alerts") so dashboard users understand what they represent.
|
||||
|
||||
## Troubleshoot annotations
|
||||
|
||||
If annotations aren't appearing as expected, try the following solutions.
|
||||
|
||||
### Annotations don't appear
|
||||
|
||||
- Verify the query returns data in the selected time range.
|
||||
- Check that the query includes a `TimeGenerated` column.
|
||||
- Test the query in the Azure Portal Log Analytics query editor.
|
||||
- Ensure the annotation is enabled (toggle is on).
|
||||
|
||||
### Too many annotations
|
||||
|
||||
- Add more specific filters to your query.
|
||||
- Use `take` to limit results.
|
||||
- Narrow the time range.
|
||||
|
||||
### Annotations appear at wrong times
|
||||
|
||||
- Verify the `TimeGenerated` column contains the correct timestamp.
|
||||
- Check your dashboard's timezone settings.
|
||||
@@ -1,605 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/configure/
|
||||
description: Guide for configuring the Azure Monitor data source in Grafana.
|
||||
keywords:
|
||||
- grafana
|
||||
- microsoft
|
||||
- azure
|
||||
- monitor
|
||||
- application
|
||||
- insights
|
||||
- log
|
||||
- analytics
|
||||
- guide
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Configure
|
||||
title: Configure the Azure Monitor data source
|
||||
weight: 200
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
configure-grafana-feature-toggles:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#feature_toggles
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#feature_toggles
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
configure-grafana-azure-auth:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/
|
||||
build-dashboards:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
configure-grafana-azure:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
|
||||
data-source-management:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
configure-grafana-azure-auth-scopes:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
configure-pdc:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
---
|
||||
|
||||
# Configure the Azure Monitor data source
|
||||
|
||||
This document explains how to configure the Azure Monitor data source and the available configuration options.
|
||||
For general information about data sources, refer to [Grafana data sources](ref:data-sources) and [Data source management](ref:data-source-management).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before configuring the Azure Monitor data source, ensure you have the following:
|
||||
|
||||
- **Grafana permissions:** You must have the `Organization administrator` role to configure data sources.
|
||||
Organization administrators can also [configure the data source via YAML](#provision-the-data-source) with the Grafana provisioning system or [using Terraform](#configure-with-terraform).
|
||||
|
||||
- **Azure prerequisites:** Depending on your chosen authentication method, you may need:
|
||||
- A Microsoft Entra ID (formerly Azure AD) app registration with a service principal (for App Registration authentication)
|
||||
- A Managed Identity enabled on your Azure VM or App Service (for Managed Identity authentication)
|
||||
- Workload identity configured in your Kubernetes cluster (for Workload Identity authentication)
|
||||
- Microsoft Entra ID authentication configured for Grafana login (for Current User authentication)
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
**Grafana Cloud users:** Managed Identity and Workload Identity authentication methods are not available in Grafana Cloud because they require Grafana to run on your Azure infrastructure. Use **App Registration** authentication instead.
|
||||
{{< /admonition >}}
|
||||
|
||||
- **Azure RBAC permissions:** The identity used to authenticate must have the `Reader` role on the Azure subscription containing the resources you want to monitor.
|
||||
For Log Analytics queries, the identity also needs appropriate permissions on the Log Analytics workspaces to be queried.
|
||||
Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The Azure Monitor data source plugin is built into Grafana. No additional installation is required.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Add the data source
|
||||
|
||||
To add the Azure Monitor data source:
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Click **Add new connection**.
|
||||
1. Type `Azure Monitor` in the search bar.
|
||||
1. Select **Azure Monitor**.
|
||||
1. Click **Add new data source** in the upper right.
|
||||
|
||||
You're taken to the **Settings** tab where you can configure the data source.
|
||||
|
||||
## Choose an authentication method
|
||||
|
||||
The Azure Monitor data source supports four authentication methods. Choose based on where Grafana is hosted and your security requirements:
|
||||
|
||||
| Authentication method | Best for | Requirements |
|
||||
| --------------------- | ------------------------------------------ | -------------------------------------------------------------- |
|
||||
| **App Registration** | Any Grafana deployment | Microsoft Entra ID app registration with client secret |
|
||||
| **Managed Identity** | Grafana hosted in Azure (VMs, App Service) | Managed identity enabled on the Azure resource |
|
||||
| **Workload Identity** | Grafana in Kubernetes (AKS) | Workload identity federation configured |
|
||||
| **Current User** | User-level access control | Microsoft Entra ID authentication configured for Grafana login |
|
||||
|
||||
## Configure authentication
|
||||
|
||||
Select one of the following authentication methods and complete the configuration.
|
||||
|
||||
### App Registration
|
||||
|
||||
Use a Microsoft Entra ID app registration (service principal) to authenticate. This method works with any Grafana deployment.
|
||||
|
||||
#### App Registration prerequisites
|
||||
|
||||
1. Create an app registration in Microsoft Entra ID.
|
||||
Refer to the [Azure documentation for creating a service principal](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in).
|
||||
|
||||
1. Create a client secret for the app registration.
|
||||
Refer to the [Azure documentation for creating a client secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).
|
||||
|
||||
1. Assign the `Reader` role to the app registration on the subscription or resources you want to monitor.
|
||||
Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
|
||||
|
||||
#### App Registration UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| **Authentication** | Select **App Registration**. |
|
||||
| **Azure Cloud** | The Azure environment to connect to. Select **Azure** for the public cloud, or choose Azure Government or Azure China for national clouds. |
|
||||
| **Directory (tenant) ID** | The GUID that identifies your Microsoft Entra ID tenant. |
|
||||
| **Application (client) ID** | The GUID for the app registration you created. |
|
||||
| **Client secret** | The secret key for the app registration. Keep this secure and rotate periodically. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
|
||||
#### Provision App Registration with YAML
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: clientsecret
|
||||
cloudName: azuremonitor # See supported cloud names below
|
||||
tenantId: <tenant-id>
|
||||
clientId: <client-id>
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
secureJsonData:
|
||||
clientSecret: <client-secret>
|
||||
version: 1
|
||||
```
|
||||
|
||||
### Managed Identity
|
||||
|
||||
Use Azure Managed Identity for secure, credential-free authentication when Grafana is hosted in Azure.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Managed Identity is available in [Azure Managed Grafana](https://azure.microsoft.com/en-us/products/managed-grafana) or self-hosted Grafana deployed in Azure. It is not available in Grafana Cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Managed Identity prerequisites
|
||||
|
||||
- Grafana must be hosted in Azure (App Service, Azure VMs, or Azure Managed Grafana).
|
||||
- Managed identity must be enabled on the Azure resource hosting Grafana.
|
||||
- The managed identity must have the `Reader` role on the subscription or resources you want to monitor.
|
||||
|
||||
For details on Azure managed identities, refer to the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
|
||||
|
||||
#### Managed Identity Grafana server configuration
|
||||
|
||||
Enable managed identity in the Grafana server configuration:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
```
|
||||
|
||||
To use a user-assigned managed identity instead of the system-assigned identity, also set:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
managed_identity_client_id = <USER_ASSIGNED_IDENTITY_CLIENT_ID>
|
||||
```
|
||||
|
||||
Refer to [Grafana Azure configuration](ref:configure-grafana-azure) for more details.
|
||||
|
||||
#### Managed Identity UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------------ | --------------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Select **Managed Identity**. The directory ID, application ID, and client secret fields are hidden. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-managed-identity-2.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Managed Identity" >}}
|
||||
|
||||
#### Provision Managed Identity with YAML
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: msi
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
### Workload Identity
|
||||
|
||||
Use Azure Workload Identity for secure authentication in Kubernetes environments like AKS.
|
||||
|
||||
#### Workload Identity prerequisites
|
||||
|
||||
- Grafana must be running in a Kubernetes environment with workload identity federation configured.
|
||||
- The workload identity must have the `Reader` role on the subscription or resources you want to monitor.
|
||||
|
||||
For details, refer to the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/).
|
||||
|
||||
#### Workload Identity Grafana server configuration
|
||||
|
||||
Enable workload identity in the Grafana server configuration:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
```
|
||||
|
||||
Optional configuration variables:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
workload_identity_tenant_id = <IDENTITY_TENANT_ID> # Microsoft Entra ID tenant containing the managed identity
|
||||
workload_identity_client_id = <IDENTITY_CLIENT_ID> # Client ID if different from default
|
||||
workload_identity_token_file = <TOKEN_FILE_PATH> # Path to the token file
|
||||
```
|
||||
|
||||
Refer to [Grafana Azure configuration](ref:configure-grafana-azure) and the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/) for more details.
|
||||
|
||||
#### Workload Identity UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------------ | ---------------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Select **Workload Identity**. The directory ID, application ID, and client secret fields are hidden. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-workload-identity.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Workload Identity" >}}
|
||||
|
||||
#### Provision Workload Identity with YAML
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: workloadidentity
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
### Current User
|
||||
|
||||
Forward the logged-in Grafana user's Azure credentials to the data source for user-level access control.
|
||||
|
||||
{{< admonition type="warning" >}}
|
||||
Current User authentication is an [experimental feature](/docs/release-life-cycle/). Engineering and on-call support is not available. Documentation is limited. No SLA is provided. Contact Grafana Support to enable this feature in Grafana Cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Current User prerequisites
|
||||
|
||||
Your Grafana instance must be configured with Microsoft Entra ID authentication. Refer to the [Microsoft Entra ID authentication documentation](ref:configure-grafana-azure-auth).
|
||||
|
||||
#### Configure your Azure App Registration
|
||||
|
||||
The App Registration used for Grafana login requires additional configuration:
|
||||
|
||||
**Enable token issuance:**
|
||||
|
||||
1. In the Azure Portal, open your App Registration.
|
||||
1. Select **Authentication** in the side menu.
|
||||
1. Under **Implicit grant and hybrid flows**, check both **Access tokens** and **ID tokens**.
|
||||
1. Save your changes.
|
||||
|
||||
**Add API permissions:**
|
||||
|
||||
1. In the Azure Portal, open your App Registration.
|
||||
1. Select **API Permissions** in the side menu.
|
||||
1. Ensure these permissions are present under **Microsoft Graph**: `openid`, `profile`, `email`, and `offline_access`.
|
||||
1. Add the following permissions:
|
||||
- **Azure Service Management** > **Delegated permissions** > `user_impersonation`
|
||||
- **APIs my organization uses** > Search for **Log Analytics API** > **Delegated permissions** > `Data.Read`
|
||||
|
||||
Refer to the [Azure documentation](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-configure-app-access-web-apis) for more information.
|
||||
|
||||
**Update Grafana scopes:**
|
||||
|
||||
Update the `scopes` section in your Grafana Azure authentication configuration to include the `.default` scope:
|
||||
|
||||
```
|
||||
.default openid email profile
|
||||
```
|
||||
|
||||
#### Current User Grafana server configuration
|
||||
|
||||
Enable current user authentication in the Grafana server configuration:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
user_identity_enabled = true
|
||||
```
|
||||
|
||||
By default, this also enables fallback service credentials. To disable fallback credentials at the instance level:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
user_identity_enabled = true
|
||||
user_identity_fallback_credentials_enabled = false
|
||||
```
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To use fallback service credentials, the [feature toggle](ref:configure-grafana-feature-toggles) `idForwarding` must be set to `true`.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Limitations and fallback credentials
|
||||
|
||||
Current User authentication doesn't support backend functionality like alerting, reporting, and recorded queries because user credentials aren't available for background operations.
|
||||
|
||||
To support these features, configure **fallback service credentials**. When enabled, Grafana uses the fallback credentials for backend operations. Note that operations using fallback credentials are limited to the permissions of those credentials, not the user's permissions.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Query and resource caching is disabled by default for data sources using Current User authentication.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Current User UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| -------------------------------- | ------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Select **Current User**. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
| **Fallback Service Credentials** | Enable and configure credentials for backend features like alerting. |
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-current-user.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Current User authentication" >}}
|
||||
|
||||
#### Provision Current User with YAML
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The `oauthPassThru` property is required for Current User authentication. The `disableGrafanaCache` property prevents returning cached responses for resources users don't have access to.
|
||||
{{< /admonition >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: currentuser
|
||||
oauthPassThru: true
|
||||
disableGrafanaCache: true
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
## Additional configuration options
|
||||
|
||||
These settings apply to all authentication methods.
|
||||
|
||||
### General settings
|
||||
|
||||
| Setting | Description |
|
||||
| ----------- | ------------------------------------------------------------------------------- |
|
||||
| **Name** | The data source name used in panels and queries. Example: `azure-monitor-prod`. |
|
||||
| **Default** | Toggle to make this the default data source for new panels. |
|
||||
|
||||
### Enable Basic Logs
|
||||
|
||||
Toggle **Enable Basic Logs** to allow queries against [Basic Logs tables](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/basic-logs-query?tabs=portal-1) in supported Log Analytics Workspaces.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Querying Basic Logs tables incurs additional costs on a per-query basis.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Private data source connect (Grafana Cloud only)
|
||||
|
||||
If you're using Grafana Cloud and need to connect to Azure resources in a private network, use Private Data Source Connect (PDC).
|
||||
|
||||
1. Click the **Private data source connect** dropdown to select your PDC configuration.
|
||||
1. Click **Manage private data source connect** to view your PDC connection details.
|
||||
|
||||
For more information, refer to [Private data source connect](ref:private-data-source-connect) and [Configure PDC](ref:configure-pdc).
|
||||
|
||||
## Supported cloud names
|
||||
|
||||
When provisioning the data source, use the following `cloudName` values:
|
||||
|
||||
| Azure Cloud | `cloudName` value |
|
||||
| -------------------------------- | ------------------------ |
|
||||
| Microsoft Azure public cloud | `azuremonitor` (default) |
|
||||
| Microsoft Chinese national cloud | `chinaazuremonitor` |
|
||||
| US Government cloud | `govazuremonitor` |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
For Current User authentication, the cloud names differ: use `AzureCloud` for public cloud, `AzureChinaCloud` for the Chinese national cloud, and `AzureUSGovernment` for the US Government cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Verify the connection
|
||||
|
||||
After configuring the data source, click **Save & test**. A successful connection displays a message confirming that the credentials are valid and have access to the configured default subscription.
|
||||
|
||||
If the test fails, verify:
|
||||
|
||||
- Your credentials are correct (tenant ID, client ID, client secret)
|
||||
- The identity has the required Azure RBAC permissions
|
||||
- For Managed Identity or Workload Identity, that the Grafana server configuration is correct
|
||||
- Network connectivity to Azure endpoints
|
||||
|
||||
## Provision the data source
|
||||
|
||||
You can define and configure the Azure Monitor data source in YAML files as part of the Grafana provisioning system.
|
||||
For more information about provisioning, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
|
||||
### Provision quick reference
|
||||
|
||||
| Authentication method | `azureAuthType` value | Required fields |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- |
|
||||
| App Registration | `clientsecret` | `tenantId`, `clientId`, `clientSecret` |
|
||||
| Managed Identity | `msi` | None (uses VM identity) |
|
||||
| Workload Identity | `workloadidentity` | None (uses pod identity) |
|
||||
| Current User | `currentuser` | `oauthPassThru: true`, `disableGrafanaCache: true` |
|
||||
|
||||
All methods support the optional `subscriptionId` field to set a default subscription.
|
||||
|
||||
For complete YAML examples, see the [authentication method sections](#configure-authentication) above.
|
||||
|
||||
## Configure with Terraform
|
||||
|
||||
You can configure the Azure Monitor data source using the [Grafana Terraform provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs). This approach enables infrastructure-as-code workflows and version control for your Grafana configuration.
|
||||
|
||||
### Terraform prerequisites
|
||||
|
||||
- [Terraform](https://www.terraform.io/downloads) installed
|
||||
- Grafana Terraform provider configured with appropriate credentials
|
||||
- For Grafana Cloud: A [Cloud Access Policy token](https://grafana.com/docs/grafana-cloud/account-management/authentication-and-permissions/access-policies/) with data source permissions
|
||||
|
||||
### Provider configuration
|
||||
|
||||
Configure the Grafana provider to connect to your Grafana instance:
|
||||
|
||||
```hcl
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
version = ">= 2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# For Grafana Cloud
|
||||
provider "grafana" {
|
||||
url = "<YOUR_GRAFANA_CLOUD_STACK_URL>"
|
||||
auth = "<YOUR_SERVICE_ACCOUNT_TOKEN>"
|
||||
}
|
||||
|
||||
# For self-hosted Grafana
|
||||
# provider "grafana" {
|
||||
# url = "http://localhost:3000"
|
||||
# auth = "<API_KEY_OR_SERVICE_ACCOUNT_TOKEN>"
|
||||
# }
|
||||
```
|
||||
|
||||
### Terraform examples
|
||||
|
||||
The following examples show how to configure the Azure Monitor data source for each authentication method.
|
||||
|
||||
**App Registration (client secret):**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "clientsecret"
|
||||
cloudName = "azuremonitor"
|
||||
tenantId = "<TENANT_ID>"
|
||||
clientId = "<CLIENT_ID>"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
|
||||
secure_json_data_encoded = jsonencode({
|
||||
clientSecret = "<CLIENT_SECRET>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Managed Identity:**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "msi"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Workload Identity:**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "workloadidentity"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Current User:**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "currentuser"
|
||||
oauthPassThru = true
|
||||
disableGrafanaCache = true
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**With Basic Logs enabled:**
|
||||
|
||||
Add `enableBasicLogs = true` to any of the above configurations:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "clientsecret"
|
||||
cloudName = "azuremonitor"
|
||||
tenantId = "<TENANT_ID>"
|
||||
clientId = "<CLIENT_ID>"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
enableBasicLogs = true
|
||||
})
|
||||
|
||||
secure_json_data_encoded = jsonencode({
|
||||
clientSecret = "<CLIENT_SECRET>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
For more information about the Grafana Terraform provider, refer to the [provider documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs) and the [grafana_data_source resource](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source).
|
||||
@@ -21,7 +21,6 @@ labels:
|
||||
menuTitle: Query editor
|
||||
title: Azure Monitor query editor
|
||||
weight: 300
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
query-transform-data-query-options:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -33,85 +32,30 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
troubleshoot-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
configure-grafana-feature-toggles:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/
|
||||
template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
alerting-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
annotations-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
---
|
||||
|
||||
# Azure Monitor query editor
|
||||
|
||||
Grafana provides a query editor for the Azure Monitor data source, which is located on the [Explore page](ref:explore). You can also access the Azure Monitor query editor from a dashboard panel. Click the menu in the upper right of the panel and select **Edit**.
|
||||
This topic explains querying specific to the Azure Monitor data source.
|
||||
For general documentation on querying data sources in Grafana, see [Query and transform data](ref:query-transform-data).
|
||||
|
||||
This document explains querying specific to the Azure Monitor data source.
|
||||
For general documentation on querying data sources in Grafana, refer to [Query and transform data](ref:query-transform-data).
|
||||
## Choose a query editing mode
|
||||
|
||||
The Azure Monitor data source can query data from Azure Monitor Metrics and Logs, the Azure Resource Graph, and Application Insights Traces. Each source has its own specialized query editor.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have [configured the Azure Monitor data source](ref:configure-azure-monitor).
|
||||
- Verify your credentials have appropriate permissions for the resources you want to query.
|
||||
|
||||
## Key concepts
|
||||
|
||||
If you're new to Azure Monitor, here are some key terms used throughout this documentation:
|
||||
|
||||
| Term | Description |
|
||||
| ------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **KQL (Kusto Query Language)** | The query language used for Azure Monitor Logs and Azure Resource Graph. KQL uses a pipe-based syntax similar to Unix commands and is optimized for read-only data exploration. If you know SQL, the [SQL to Kusto cheat sheet](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/sqlcheatsheet) can help you get started. |
|
||||
| **Log Analytics workspace** | An Azure resource that collects and stores log data from your Azure resources, applications, and services. You query this data using KQL. |
|
||||
| **Application Insights** | Azure's application performance monitoring (APM) service. It collects telemetry data like requests, exceptions, and traces from your applications. |
|
||||
| **Metrics vs. Logs** | **Metrics** are lightweight numeric values collected at regular intervals (e.g., CPU percentage). **Logs** are detailed records of events with varying schemas (e.g., request logs, error messages). Metrics use a visual query builder; Logs require KQL. |
|
||||
|
||||
## Choose a query editor mode
|
||||
|
||||
The Azure Monitor data source's query editor has four modes depending on which Azure service you want to query:
|
||||
The Azure Monitor data source's query editor has three modes depending on which Azure service you want to query:
|
||||
|
||||
- **Metrics** for [Azure Monitor Metrics](#query-azure-monitor-metrics)
|
||||
- **Logs** for [Azure Monitor Logs](#query-azure-monitor-logs)
|
||||
- [**Azure Resource Graph**](#query-azure-resource-graph)
|
||||
- **Traces** for [Application Insights Traces](#query-application-insights-traces)
|
||||
- **Azure Resource Graph** for [Azure Resource Graph](#query-azure-resource-graph)
|
||||
|
||||
## Query Azure Monitor Metrics
|
||||
|
||||
Azure Monitor Metrics collects numeric data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and you can query them to investigate your resources' health and usage and maximize availability and performance.
|
||||
Azure Monitor Metrics collects numeric data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and you can query them to investigate your resources' health and usage and maximise availability and performance.
|
||||
|
||||
Monitor Metrics use a lightweight format that stores only numeric data in a specific structure and supports near real-time scenarios, making it useful for fast detection of issues.
|
||||
In contrast, Azure Monitor Logs can store a variety of data types, each with their own structure.
|
||||
|
||||
{{< figure src="/static/img/docs/azure-monitor/query-editor-metrics.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor Metrics sample query visualizing CPU percentage over time" >}}
|
||||
{{< figure src="/static/img/docs/azure-monitor/query-editor-metrics.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Logs Metrics sample query visualizing CPU percentage over time" >}}
|
||||
|
||||
### Create a Metrics query
|
||||
|
||||
@@ -141,7 +85,7 @@ Optionally, you can apply further aggregations or filter by dimensions.
|
||||
|
||||
The available options change depending on what is relevant to the selected metric.
|
||||
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
|
||||
### Format legend aliases
|
||||
|
||||
@@ -165,7 +109,7 @@ For example:
|
||||
| `{{ dimensionname }}` | _(Legacy for backward compatibility)_ Replaced with the name of the first dimension. |
|
||||
| `{{ dimensionvalue }}` | _(Legacy for backward compatibility)_ Replaced with the value of the first dimension. |
|
||||
|
||||
### Filter with dimensions
|
||||
### Filter using dimensions
|
||||
|
||||
Some metrics also have dimensions, which associate additional metadata.
|
||||
Dimensions are represented as key-value pairs assigned to each value of a metric.
|
||||
@@ -177,7 +121,7 @@ For more information on multi-dimensional metrics, refer to the [Azure Monitor d
|
||||
|
||||
## Query Azure Monitor Logs
|
||||
|
||||
Azure Monitor Logs collects and organizes log and performance data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and makes many sources of data available to query together with the [Kusto Query Language (KQL)](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/).
|
||||
Azure Monitor Logs collects and organises log and performance data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and makes many sources of data available to query together with the [Kusto Query Language (KQL)](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/).
|
||||
|
||||
While Azure Monitor Metrics stores only simplified numerical data, Logs can store different data types, each with their own structure.
|
||||
You can also perform complex analysis of Logs data by using KQL.
|
||||
@@ -186,32 +130,6 @@ The Azure Monitor data source also supports querying of [Basic Logs](https://lea
|
||||
|
||||
{{< figure src="/static/img/docs/azure-monitor/query-editor-logs.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor Logs sample query comparing successful requests to failed requests" >}}
|
||||
|
||||
### Logs query builder (public preview)
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The Logs query builder is a [public preview feature](/docs/release-life-cycle/). It may not be enabled in all Grafana environments.
|
||||
{{< /admonition >}}
|
||||
|
||||
The Logs query builder provides a visual interface for building Azure Monitor Logs queries without writing KQL. This is helpful if you're new to KQL or want to quickly build simple queries.
|
||||
|
||||
**To enable the Logs query builder:**
|
||||
|
||||
1. Enable the `azureMonitorLogsBuilderEditor` [feature toggle](ref:configure-grafana-feature-toggles) in your Grafana configuration.
|
||||
1. Restart Grafana for the change to take effect.
|
||||
|
||||
**To switch between Builder and Code modes:**
|
||||
|
||||
When the feature is enabled, a **Builder / Code** toggle appears in the Logs query editor:
|
||||
|
||||
- **Builder**: Use the visual interface to select tables, columns, filters, and aggregations. The builder generates the KQL query for you.
|
||||
- **Code**: Write KQL queries directly. Use this mode for complex queries that require full KQL capabilities.
|
||||
|
||||
New queries default to Builder mode. Existing queries that were created with raw KQL remain in Code mode.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
You can switch from Builder to Code mode at any time to view or edit the generated KQL. However, switching from Code to Builder mode may not preserve complex queries that can't be represented in the builder interface.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Create a Logs query
|
||||
|
||||
**To create a Logs query:**
|
||||
@@ -222,13 +140,13 @@ You can switch from Builder to Code mode at any time to view or edit the generat
|
||||
|
||||
Alternatively, you can dynamically query all resources under a single resource group or subscription.
|
||||
{{< admonition type="note" >}}
|
||||
If a time span is specified in the query, the overlap between the query time span and the dashboard time range will be used. See the [API documentation for
|
||||
If a timespan is specified in the query, the overlap of the timespan between the query and the dashboard will be used as the query timespan. See the [API documentation for
|
||||
details.](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters)
|
||||
{{< /admonition >}}
|
||||
|
||||
1. Enter your KQL query.
|
||||
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
|
||||
**To create a Basic Logs query:**
|
||||
|
||||
@@ -243,7 +161,7 @@ You can also augment queries by using [template variables](ref:template-variable
|
||||
{{< /admonition >}}
|
||||
1. Enter your KQL query.
|
||||
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
You can also augment queries by using [template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/).
|
||||
|
||||
### Logs query examples
|
||||
|
||||
@@ -256,28 +174,24 @@ The Azure documentation includes resources to help you learn KQL:
|
||||
- [Tutorial: Use Kusto queries in Azure Monitor](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/tutorial?pivots=azuremonitor)
|
||||
- [SQL to Kusto cheat sheet](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/sqlcheatsheet)
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
**Time-range:** The time-range used for the query can be modified via the time-range switch:
|
||||
> **Time-range:** The time-range that will be used for the query can be modified via the time-range switch. Selecting `Query` will only make use of time-ranges specified within the query.
|
||||
> Specifying `Dashboard` will only make use of the Grafana time-range.
|
||||
> If there are no time-ranges specified within the query, the default Log Analytics time-range will apply.
|
||||
> For more details on this change, refer to the [Azure Monitor Logs API documentation](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters).
|
||||
> If the `Intersection` option was previously chosen it will be migrated by default to `Dashboard`.
|
||||
|
||||
- Selecting **Query** uses only time-ranges specified within the query.
|
||||
- Selecting **Dashboard** uses only the Grafana dashboard time-range.
|
||||
- If no time-range is specified in the query, the default Log Analytics time-range applies.
|
||||
|
||||
For more details, refer to the [Azure Monitor Logs API documentation](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters). If you previously used the `Intersection` option, it has been migrated to `Dashboard`.
|
||||
{{< /admonition >}}
|
||||
|
||||
This example query returns a virtual machine's CPU performance, averaged over 5-minute time grains:
|
||||
This example query returns a virtual machine's CPU performance, averaged over 5ms time grains:
|
||||
|
||||
```kusto
|
||||
Perf
|
||||
// $__timeFilter is a special Grafana macro that filters the results to the time span of the dashboard
|
||||
# $__timeFilter is a special Grafana macro that filters the results to the time span of the dashboard
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where CounterName == "% Processor Time"
|
||||
| summarize avg(CounterValue) by bin(TimeGenerated, 5m), Computer
|
||||
| order by TimeGenerated asc
|
||||
```
|
||||
|
||||
Use time series queries for values that change over time, usually for graph visualizations such as the Time series panel.
|
||||
Use time series queries for values that change over time, usually for graph visualisations such as the Time series panel.
|
||||
Each query should return at least a datetime column and numeric value column.
|
||||
The result must also be sorted in ascending order by the datetime column.
|
||||
|
||||
@@ -443,33 +357,21 @@ Application Insights stores trace data in an underlying Log Analytics workspace
|
||||
This query type only supports Application Insights resources.
|
||||
{{< /admonition >}}
|
||||
|
||||
1. (Optional) Specify an **Operation ID** value to filter traces.
|
||||
1. (Optional) Specify **event types** to filter by.
|
||||
1. (Optional) Specify **event properties** to filter by.
|
||||
1. (Optional) Change the **Result format** to switch between tabular format and trace format.
|
||||
Running a query of this kind will return all trace data within the timespan specified by the panel/dashboard.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Selecting the trace format filters events to only the `trace` type. Use this format with the Trace visualization.
|
||||
{{< /admonition >}}
|
||||
Optionally, you can apply further filtering or select a specific Operation ID to query. The result format can also be switched between a tabular format or the trace format which will return the data in a format that can be used with the Trace visualization.
|
||||
|
||||
Running a query returns all trace data within the time span specified by the panel or dashboard time range.
|
||||
{{< admonition type="note" >}}
|
||||
Selecting the trace format will filter events with the `trace` type.
|
||||
{{< /admonition >}}
|
||||
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
1. Specify an Operation ID value.
|
||||
1. Specify event types to filter by.
|
||||
1. Specify event properties to filter by.
|
||||
|
||||
## Use queries for alerting and recording rules
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
|
||||
All Azure Monitor query types (Metrics, Logs, Azure Resource Graph, and Traces) can be used with Grafana Alerting and recording rules.
|
||||
|
||||
For detailed information about creating alert rules, supported query types, authentication requirements, and examples, refer to [Azure Monitor alerting](ref:alerting-azure-monitor).
|
||||
|
||||
## Work with large Azure resource datasets
|
||||
## Working with large Azure resource data sets
|
||||
|
||||
If a request exceeds the [maximum allowed value of records](https://docs.microsoft.com/en-us/azure/governance/resource-graph/concepts/work-with-data#paging-results), the result is paginated and only the first page of results are returned.
|
||||
You can use filters to reduce the amount of records returned under that value.
|
||||
|
||||
## Next steps
|
||||
|
||||
- [Use template variables](../template-variables/) to create dynamic, reusable dashboards
|
||||
- [Add annotations](ref:annotations-azure-monitor) to overlay events on your graphs
|
||||
- [Set up alerting](ref:alerting-azure-monitor) to create alert rules based on Azure Monitor data
|
||||
- [Troubleshoot](ref:troubleshoot-azure-monitor) common query and configuration issues
|
||||
|
||||
@@ -23,7 +23,6 @@ labels:
|
||||
menuTitle: Template variables
|
||||
title: Azure Monitor template variables
|
||||
weight: 400
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
variables:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -35,11 +34,6 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
---
|
||||
|
||||
# Azure Monitor template variables
|
||||
@@ -48,173 +42,58 @@ Instead of hard-coding details such as resource group or resource name values in
|
||||
This helps you create more interactive, dynamic, and reusable dashboards.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables).
|
||||
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables) documentation.
|
||||
|
||||
## Before you begin
|
||||
## Use query variables
|
||||
|
||||
- Ensure you have [configured the Azure Monitor data source](ref:configure-azure-monitor).
|
||||
- If you want template variables to auto-populate subscriptions, set a **Default Subscription** in the data source configuration.
|
||||
You can specify these Azure Monitor data source queries in the Variable edit view's **Query Type** field.
|
||||
|
||||
## Create a template variable
|
||||
|
||||
To create a template variable for Azure Monitor:
|
||||
|
||||
1. Open the dashboard where you want to add the variable.
|
||||
1. Click **Dashboard settings** (gear icon) in the top navigation.
|
||||
1. Select **Variables** in the left menu.
|
||||
1. Click **Add variable**.
|
||||
1. Enter a **Name** for your variable (e.g., `subscription`, `resourceGroup`, `resource`).
|
||||
1. In the **Type** dropdown, select **Query**.
|
||||
1. In the **Data source** dropdown, select your Azure Monitor data source.
|
||||
1. In the **Query Type** dropdown, select the appropriate query type (see [Available query types](#available-query-types)).
|
||||
1. Configure any additional fields required by the selected query type.
|
||||
1. Click **Run query** to preview the variable values.
|
||||
1. Configure display options such as **Multi-value** or **Include All option** as needed.
|
||||
1. Click **Apply** to save the variable.
|
||||
|
||||
## Available query types
|
||||
|
||||
The Azure Monitor data source provides the following query types for template variables:
|
||||
|
||||
| Query type | Description |
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Subscriptions** | Returns a list of Azure subscriptions accessible to the configured credentials. |
|
||||
| **Resource Groups** | Returns resource groups for a specified subscription. Supports multi-value selection. |
|
||||
| **Namespaces** | Returns metric namespaces for the specified subscription. If a resource group is specified, returns only namespaces within that group. |
|
||||
| **Regions** | Returns Azure regions available for the specified subscription. |
|
||||
| **Resource Names** | Returns resource names for a specified subscription, resource group, and namespace. Supports multi-value selection. |
|
||||
| **Metric Names** | Returns available metric names for a specified resource. |
|
||||
| **Workspaces** | Returns Log Analytics workspaces for the specified subscription. |
|
||||
| **Logs** | Executes a KQL query and returns the results as variable values. See [Create a Logs variable](#create-a-logs-variable). |
|
||||
| **Custom Namespaces** | Returns custom metric namespaces for a specified resource. |
|
||||
| **Custom Metric Names** | Returns custom metric names for a specified resource. |
|
||||
| Name | Description |
|
||||
| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Subscriptions** | Returns subscriptions. |
|
||||
| **Resource Groups** | Returns resource groups for a specified subscription. Supports multi-value. |
|
||||
| **Namespaces** | Returns metric namespaces for the specified subscription. If a resource group is provided, only the namespaces within that group are returned. |
|
||||
| **Regions** | Returns regions for the specified subscription |
|
||||
| **Resource Names** | Returns a list of resource names for a specified subscription, resource group and namespace. Supports multi-value. |
|
||||
| **Metric Names** | Returns a list of metric names for a resource. |
|
||||
| **Workspaces** | Returns a list of workspaces for the specified subscription. |
|
||||
| **Logs** | Use a KQL query to return values. |
|
||||
| **Custom Namespaces** | Returns metric namespaces for the specified resource. |
|
||||
| **Custom Metric Names** | Returns a list of custom metric names for the specified resource. |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Custom metrics cannot be emitted against a subscription or resource group. Select specific resources when retrieving custom metric namespaces or custom metric names.
|
||||
Custom metrics cannot be emitted against a subscription or resource group. Select resources only when you need to retrieve custom metric namespaces or custom metric names associated with a specific resource.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Create cascading variables
|
||||
You can use any Log Analytics Kusto Query Language (KQL) query that returns a single list of values in the `Query` field.
|
||||
For example:
|
||||
|
||||
Cascading variables (also called dependent or chained variables) allow you to create dropdown menus that filter based on previous selections. This is useful for drilling down from subscription to resource group to specific resource.
|
||||
| Query | List of values returned |
|
||||
| ----------------------------------------------------------------------------------------- | --------------------------------------- |
|
||||
| `workspace("myWorkspace").Heartbeat \| distinct Computer` | Virtual machines |
|
||||
| `workspace("$workspace").Heartbeat \| distinct Computer` | Virtual machines with template variable |
|
||||
| `workspace("$workspace").Perf \| distinct ObjectName` | Objects from the Perf table |
|
||||
| `workspace("$workspace").Perf \| where ObjectName == "$object"` `\| distinct CounterName` | Metric names from the Perf table |
|
||||
|
||||
### Example: Subscription → Resource Group → Resource Name
|
||||
### Query variable example
|
||||
|
||||
**Step 1: Create a Subscription variable**
|
||||
|
||||
1. Create a variable named `subscription`.
|
||||
1. Set **Query Type** to **Subscriptions**.
|
||||
|
||||
**Step 2: Create a Resource Group variable**
|
||||
|
||||
1. Create a variable named `resourceGroup`.
|
||||
1. Set **Query Type** to **Resource Groups**.
|
||||
1. In the **Subscription** field, select `$subscription`.
|
||||
|
||||
**Step 3: Create a Resource Name variable**
|
||||
|
||||
1. Create a variable named `resource`.
|
||||
1. Set **Query Type** to **Resource Names**.
|
||||
1. In the **Subscription** field, select `$subscription`.
|
||||
1. In the **Resource Group** field, select `$resourceGroup`.
|
||||
1. Select the appropriate **Namespace** for your resources (e.g., `Microsoft.Compute/virtualMachines`).
|
||||
|
||||
Now when you change the subscription, the resource group dropdown updates automatically, and when you change the resource group, the resource name dropdown updates.
|
||||
|
||||
## Create a Logs variable
|
||||
|
||||
The **Logs** query type lets you use a KQL query to populate variable values. The query must return a single column of values.
|
||||
|
||||
**To create a Logs variable:**
|
||||
|
||||
1. Create a new variable with **Query Type** set to **Logs**.
|
||||
1. Select a **Resource** (Log Analytics workspace or Application Insights resource).
|
||||
1. Enter a KQL query that returns a single column.
|
||||
|
||||
### Logs variable query examples
|
||||
|
||||
| Query | Returns |
|
||||
| ----------------------------------------- | ------------------------------------- |
|
||||
| `Heartbeat \| distinct Computer` | List of virtual machine names |
|
||||
| `Perf \| distinct ObjectName` | List of performance object names |
|
||||
| `AzureActivity \| distinct ResourceGroup` | List of resource groups with activity |
|
||||
| `AppRequests \| distinct Name` | List of application request names |
|
||||
|
||||
You can reference other variables in your Logs query:
|
||||
|
||||
```kusto
|
||||
workspace("$workspace").Heartbeat | distinct Computer
|
||||
```
|
||||
|
||||
```kusto
|
||||
workspace("$workspace").Perf
|
||||
| where ObjectName == "$object"
|
||||
| distinct CounterName
|
||||
```
|
||||
|
||||
## Variable refresh options
|
||||
|
||||
Control when your variables refresh by setting the **Refresh** option:
|
||||
|
||||
| Option | Behavior |
|
||||
| ------------------------ | ----------------------------------------------------------------------------------------- |
|
||||
| **On dashboard load** | Variables refresh each time the dashboard loads. Best for data that changes infrequently. |
|
||||
| **On time range change** | Variables refresh when the dashboard time range changes. Use for time-sensitive queries. |
|
||||
|
||||
For dashboards with many variables or complex queries, use **On dashboard load** to improve performance.
|
||||
|
||||
## Use variables in queries
|
||||
|
||||
After you create template variables, you can use them in your Azure Monitor queries by referencing them with the `$` prefix.
|
||||
|
||||
### Metrics query example
|
||||
|
||||
In a Metrics query, select your variables in the resource picker fields:
|
||||
|
||||
- **Subscription**: `$subscription`
|
||||
- **Resource Group**: `$resourceGroup`
|
||||
- **Resource Name**: `$resource`
|
||||
|
||||
### Logs query example
|
||||
|
||||
Reference variables directly in your KQL queries:
|
||||
This time series query uses query variables:
|
||||
|
||||
```kusto
|
||||
Perf
|
||||
| where ObjectName == "$object" and CounterName == "$metric"
|
||||
| where TimeGenerated >= $__timeFrom() and TimeGenerated <= $__timeTo()
|
||||
| where $__contains(Computer, $computer)
|
||||
| where $__contains(Computer, $computer)
|
||||
| summarize avg(CounterValue) by bin(TimeGenerated, $__interval), Computer
|
||||
| order by TimeGenerated asc
|
||||
```
|
||||
|
||||
## Multi-value variables
|
||||
### Multi-value variables
|
||||
|
||||
You can enable **Multi-value** selection for **Resource Groups** and **Resource Names** variables. When using multi-value variables in a Metrics query, all selected resources must:
|
||||
It is possible to select multiple values for **Resource Groups** and **Resource Names** and use a single metrics query pointing to those values as long as they:
|
||||
|
||||
- Belong to the same subscription
|
||||
- Be in the same Azure region
|
||||
- Be of the same resource type (namespace)
|
||||
- Belong to the same subscription.
|
||||
- Are in the same region.
|
||||
- Are of the same type (namespace).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
When a multi-value variable is used as a parameter in another variable query (for example, to retrieve metric names), only the first selected value is used. Ensure the first resource group and resource name combination is valid.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Troubleshoot template variables
|
||||
|
||||
If you encounter issues with template variables, try the following solutions.
|
||||
|
||||
### Variable returns no values
|
||||
|
||||
- Verify the Azure Monitor data source is configured correctly and can connect to Azure.
|
||||
- Check that the credentials have appropriate permissions to list the requested resources.
|
||||
- For cascading variables, ensure parent variables have valid selections.
|
||||
|
||||
### Variable values are outdated
|
||||
|
||||
- Check the **Refresh** setting and adjust if needed.
|
||||
- Click the refresh icon next to the variable dropdown to manually refresh.
|
||||
|
||||
### Multi-value selection not working in queries
|
||||
|
||||
- Ensure the resources meet the requirements (same subscription, region, and type).
|
||||
- For Logs queries, use the `$__contains()` macro to handle multi-value variables properly.
|
||||
Also, note that if a template variable pointing to multiple resource groups or names is used in another template variable as a parameter (e.g. to retrieve metric names), only the first value will be used. This means that the combination of the first resource group and name selected should be valid.
|
||||
|
||||
@@ -1,320 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/troubleshooting/
|
||||
description: Troubleshooting guide for the Azure Monitor data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- azure
|
||||
- monitor
|
||||
- troubleshooting
|
||||
- errors
|
||||
- authentication
|
||||
- query
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshoot
|
||||
title: Troubleshoot Azure Monitor data source issues
|
||||
weight: 500
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
---
|
||||
|
||||
# Troubleshoot Azure Monitor data source issues
|
||||
|
||||
This document provides solutions to common issues you may encounter when configuring or using the Azure Monitor data source.
|
||||
|
||||
## Configuration and authentication errors
|
||||
|
||||
These errors typically occur when setting up the data source or when authentication credentials are invalid.
|
||||
|
||||
### "Authorization failed" or "Access denied"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Save & test fails with "Authorization failed"
|
||||
- Queries return "Access denied" errors
|
||||
- Subscriptions don't load when clicking **Load Subscriptions**
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| -------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| App registration doesn't have required permissions | Assign the `Reader` role to the app registration on the subscription or resource group you want to monitor. Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current). |
|
||||
| Incorrect tenant ID, client ID, or client secret | Verify the credentials in the Azure Portal under **App registrations** > your app > **Overview** (for IDs) and **Certificates & secrets** (for secret). |
|
||||
| Client secret has expired | Create a new client secret in Azure and update the data source configuration. |
|
||||
| Managed Identity not enabled on the Azure resource | For VMs, enable managed identity in the Azure Portal under **Identity**. For App Service, enable it under **Identity** in the app settings. |
|
||||
| Managed Identity not assigned the Reader role | Assign the `Reader` role to the managed identity on the target subscription or resources. |
|
||||
|
||||
### "Invalid client secret" or "Client secret not found"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Authentication fails immediately after configuration
|
||||
- Error message references invalid credentials
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Ensure you copied the client secret **value**, not the secret ID. In Azure Portal under **Certificates & secrets**, the secret value is only shown once when created. The secret ID is a different identifier and won't work for authentication.
|
||||
2. Verify the client secret was copied correctly (no extra spaces or truncation).
|
||||
3. Check if the secret has expired in Azure Portal under **App registrations** > your app > **Certificates & secrets**.
|
||||
4. Create a new secret and update the data source configuration.
|
||||
|
||||
### "Tenant not found" or "Invalid tenant ID"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Data source test fails with tenant-related errors
|
||||
- Unable to authenticate
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the Directory (tenant) ID in Azure Portal under **Microsoft Entra ID** > **Overview**.
|
||||
2. Ensure you're using the correct Azure cloud setting (Azure, Azure Government, or Azure China).
|
||||
3. Check that the tenant ID is a valid GUID format.
|
||||
|
||||
### Managed Identity not working
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Managed Identity option is available but authentication fails
|
||||
- Error: "Managed identity authentication is not available"
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify `managed_identity_enabled = true` is set in the Grafana server configuration under `[azure]`.
|
||||
2. Confirm the Azure resource hosting Grafana has managed identity enabled.
|
||||
3. For user-assigned managed identity, ensure `managed_identity_client_id` is set correctly.
|
||||
4. Verify the managed identity has the `Reader` role on the target resources.
|
||||
5. Restart Grafana after changing server configuration.
|
||||
|
||||
### Workload Identity not working
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Workload Identity authentication fails in Kubernetes/AKS environment
|
||||
- Token file errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify `workload_identity_enabled = true` is set in the Grafana server configuration.
|
||||
2. Check that the service account is correctly annotated for workload identity.
|
||||
3. Verify the federated credential is configured in Azure.
|
||||
4. Ensure the token path is accessible to the Grafana pod.
|
||||
5. Check the workload identity webhook is running in the cluster.
|
||||
|
||||
## Query errors
|
||||
|
||||
These errors occur when executing queries against Azure Monitor services.
|
||||
|
||||
### "No data" or empty results
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query executes without error but returns no data
|
||||
- Charts show "No data" message
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Time range doesn't contain data | Expand the dashboard time range or verify data exists in Azure Portal. |
|
||||
| Wrong resource selected | Verify you've selected the correct subscription, resource group, and resource. |
|
||||
| Metric not available for resource | Not all metrics are available for all resources. Check available metrics in Azure Portal under the resource's **Metrics** blade. |
|
||||
| Metric has no values | Some metrics only populate under certain conditions (e.g., error counts when errors occur). |
|
||||
| Permissions issue | Verify the identity has read access to the specific resource. |
|
||||
|
||||
### "Bad request" or "Invalid query"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query fails with 400 error
|
||||
- Error message indicates query syntax issues
|
||||
|
||||
**Solutions for Logs queries:**
|
||||
|
||||
1. Validate your KQL syntax in the Azure Portal Log Analytics query editor.
|
||||
2. Check for typos in table names or column names.
|
||||
3. Ensure referenced tables exist in the selected workspace.
|
||||
4. Verify the time range is valid (not in the future, not too far in the past for data retention).
|
||||
|
||||
**Solutions for Metrics queries:**
|
||||
|
||||
1. Verify the metric name is valid for the selected resource type.
|
||||
2. Check that dimension filters use valid dimension names and values.
|
||||
3. Ensure the aggregation type is supported for the selected metric.
|
||||
|
||||
### "Resource not found"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query fails with 404 error
|
||||
- Resource picker shows resources that can't be queried
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the resource still exists in Azure (it may have been deleted or moved).
|
||||
2. Check that the subscription is correct.
|
||||
3. Refresh the resource picker by re-selecting the subscription.
|
||||
4. Verify the identity has access to the resource's resource group.
|
||||
|
||||
### Logs query timeout
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query runs for a long time then fails
|
||||
- Error mentions timeout or query limits
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Narrow the time range to reduce data volume.
|
||||
2. Add filters to reduce the result set.
|
||||
3. Use `summarize` to aggregate data instead of returning raw rows.
|
||||
4. Consider using Basic Logs for large datasets (if enabled).
|
||||
5. Break complex queries into smaller parts.
|
||||
|
||||
### "Metrics not available" for a resource
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Resource appears in picker but no metrics are listed
|
||||
- Metric dropdown is empty
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the resource type supports Azure Monitor metrics.
|
||||
2. Check if the resource is in a region that supports metrics.
|
||||
3. Some resources require diagnostic settings to emit metrics—configure these in Azure Portal.
|
||||
4. Try selecting a different namespace for the resource.
|
||||
|
||||
## Azure Resource Graph errors
|
||||
|
||||
These errors are specific to Azure Resource Graph (ARG) queries.
|
||||
|
||||
### "Query execution failed"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- ARG query fails with execution errors
|
||||
- Results don't match expected resources
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Validate query syntax in Azure Portal Resource Graph Explorer.
|
||||
2. Check that you have access to the subscriptions being queried.
|
||||
3. Verify table names are correct (e.g., `Resources`, `ResourceContainers`).
|
||||
4. Some ARG features require specific permissions, check [ARG documentation](https://docs.microsoft.com/en-us/azure/governance/resource-graph/).
|
||||
|
||||
### Query returns incomplete results
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Not all expected resources appear in results
|
||||
- Results seem truncated
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. ARG queries are paginated. The data source handles pagination automatically, but very large result sets may be limited.
|
||||
2. Add filters to reduce result set size.
|
||||
3. Verify you have access to all subscriptions containing the resources.
|
||||
|
||||
## Application Insights Traces errors
|
||||
|
||||
These errors are specific to the Traces query type.
|
||||
|
||||
### "No traces found"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Trace query returns empty results
|
||||
- Operation ID search finds nothing
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the Application Insights resource is collecting trace data.
|
||||
2. Check that the time range includes when the traces were generated.
|
||||
3. Ensure the Operation ID is correct (copy directly from another trace or log).
|
||||
4. Verify the identity has access to the Application Insights resource.
|
||||
|
||||
## Template variable errors
|
||||
|
||||
For detailed troubleshooting of template variables, refer to the [template variables troubleshooting section](ref:template-variables).
|
||||
|
||||
### Variables return no values
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the data source connection is working (test it in the data source settings).
|
||||
2. Check that parent variables (for cascading variables) have valid selections.
|
||||
3. Verify the identity has permissions to list the requested resources.
|
||||
4. For Logs variables, ensure the KQL query returns a single column.
|
||||
|
||||
### Variables are slow to load
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Set variable refresh to **On dashboard load** instead of **On time range change**.
|
||||
2. Reduce the scope of variable queries (e.g., filter by resource group instead of entire subscription).
|
||||
3. For Logs variables, optimize the KQL query to return results faster.
|
||||
|
||||
## Connection and network errors
|
||||
|
||||
These errors indicate problems with network connectivity between Grafana and Azure services.
|
||||
|
||||
### "Connection refused" or timeout errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Data source test fails with network errors
|
||||
- Queries timeout without returning results
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify network connectivity from Grafana to Azure endpoints.
|
||||
2. Check firewall rules allow outbound HTTPS (port 443) to Azure services.
|
||||
3. For private networks, ensure Private Link or VPN is configured correctly.
|
||||
4. For Grafana Cloud, configure [Private Data Source Connect](ref:configure-azure-monitor) if accessing private resources.
|
||||
|
||||
### SSL/TLS certificate errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Certificate validation failures
|
||||
- SSL handshake errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Ensure the system time is correct (certificate validation fails with incorrect time).
|
||||
2. Verify corporate proxy isn't intercepting HTTPS traffic.
|
||||
3. Check that required CA certificates are installed on the Grafana server.
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you've tried the solutions above and still encounter issues:
|
||||
|
||||
1. Check the [Grafana community forums](https://community.grafana.com/) for similar issues.
|
||||
1. Review the [Azure Monitor data source GitHub issues](https://github.com/grafana/grafana/issues) for known bugs.
|
||||
1. Enable debug logging in Grafana to capture detailed error information.
|
||||
1. Contact Grafana Support if you're an Enterprise, Cloud Pro or Cloud Contracted user.
|
||||
1. When reporting issues, include:
|
||||
- Grafana version
|
||||
- Error messages (redact sensitive information)
|
||||
- Steps to reproduce
|
||||
- Relevant configuration (redact credentials)
|
||||
@@ -17,6 +17,16 @@ menuTitle: Elasticsearch
|
||||
title: Elasticsearch data source
|
||||
weight: 325
|
||||
refs:
|
||||
configuration:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#sigv4_auth_enabled
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#sigv4_auth_enabled
|
||||
provisioning-grafana:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
@@ -34,36 +44,12 @@ refs:
|
||||
Elasticsearch is a search and analytics engine used for a variety of use cases.
|
||||
You can create many types of queries to visualize logs or metrics stored in Elasticsearch, and annotate graphs with log events stored in Elasticsearch.
|
||||
|
||||
The following resources will help you get started with Elasticsearch and Grafana:
|
||||
The following will help you get started working with Elasticsearch and Grafana:
|
||||
|
||||
- [What is Elasticsearch?](https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-intro.html)
|
||||
- [Configure the Elasticsearch data source](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/configure/)
|
||||
- [Elasticsearch query editor](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/query-editor/)
|
||||
- [Elasticsearch template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/template-variables/)
|
||||
- [Elasticsearch annotations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/annotations/)
|
||||
- [Elasticsearch alerting](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/alerting/)
|
||||
- [Troubleshooting issues with the Elasticsearch data source](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/troubleshooting/)
|
||||
|
||||
## Key capabilities
|
||||
|
||||
The Elasticsearch data source supports:
|
||||
|
||||
- **Metrics queries:** Aggregate and visualize numeric data using bucket and metric aggregations.
|
||||
- **Log queries:** Search, filter, and explore log data with Lucene query syntax.
|
||||
- **Annotations:** Overlay Elasticsearch events on your dashboard graphs.
|
||||
- **Alerting:** Create alerts based on Elasticsearch query results.
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before you configure the Elasticsearch data source, you need:
|
||||
|
||||
- An Elasticsearch instance (v7.17+, v8.x, or v9.x)
|
||||
- Network access from Grafana to your Elasticsearch server
|
||||
- Appropriate user credentials or API keys with read access
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
If you use Amazon OpenSearch Service (the successor to Amazon Elasticsearch Service), use the [OpenSearch data source](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/opensearch/) instead.
|
||||
{{< /admonition >}}
|
||||
- [Configure the Elasticsearch data source](/docs/grafana/latest/datasources/elasticsearch/configure-elasticsearch-data-source/)
|
||||
- [Elasticsearch query editor](query-editor/)
|
||||
- [Elasticsearch template variables](template-variables/)
|
||||
|
||||
## Supported Elasticsearch versions
|
||||
|
||||
@@ -77,18 +63,86 @@ This data source supports these versions of Elasticsearch:
|
||||
- v8.x
|
||||
- v9.x
|
||||
|
||||
The Grafana maintenance policy for the Elasticsearch data source aligns with [Elastic Product End of Life Dates](https://www.elastic.co/support/eol). Grafana ensures proper functionality for supported versions only. If you use an EOL version of Elasticsearch, you can still run queries, but the query builder displays a warning. Grafana doesn't guarantee functionality or provide fixes for EOL versions.
|
||||
Our maintenance policy for Elasticsearch data source is aligned with the [Elastic Product End of Life Dates](https://www.elastic.co/support/eol) and we ensure proper functionality for supported versions. If you are using an Elasticsearch with version that is past its end-of-life (EOL), you can still execute queries, but you will receive a notification in the query builder indicating that the version of Elasticsearch you are using is no longer supported. It's important to note that in such cases, we do not guarantee the correctness of the functionality, and we will not be addressing any related issues.
|
||||
|
||||
## Additional resources
|
||||
## Provision the data source
|
||||
|
||||
Once you have configured the Elasticsearch data source, you can:
|
||||
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
|
||||
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-grafana).
|
||||
|
||||
- Use [Explore](ref:explore) to run ad-hoc queries against your Elasticsearch data.
|
||||
- Configure and use [template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/template-variables/) for dynamic dashboards.
|
||||
- Add [Transformations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/) to process query results.
|
||||
- [Build dashboards](ref:build-dashboards) to visualize your Elasticsearch data.
|
||||
{{< admonition type="note" >}}
|
||||
The previously used `database` field has now been [deprecated](https://github.com/grafana/grafana/pull/58647).
|
||||
You should now use the `index` field in `jsonData` to store the index name.
|
||||
Please see the examples below.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Related data sources
|
||||
### Provisioning examples
|
||||
|
||||
- [OpenSearch](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/opensearch/) - For Amazon OpenSearch Service.
|
||||
- [Loki](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/loki/) - Grafana's log aggregation system.
|
||||
**Basic provisioning**
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Elastic
|
||||
type: elasticsearch
|
||||
access: proxy
|
||||
url: http://localhost:9200
|
||||
jsonData:
|
||||
index: '[metrics-]YYYY.MM.DD'
|
||||
interval: Daily
|
||||
timeField: '@timestamp'
|
||||
```
|
||||
|
||||
**Provision for logs**
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: elasticsearch-v7-filebeat
|
||||
type: elasticsearch
|
||||
access: proxy
|
||||
url: http://localhost:9200
|
||||
jsonData:
|
||||
index: '[filebeat-]YYYY.MM.DD'
|
||||
interval: Daily
|
||||
timeField: '@timestamp'
|
||||
logMessageField: message
|
||||
logLevelField: fields.level
|
||||
dataLinks:
|
||||
- datasourceUid: my_jaeger_uid # Target UID needs to be known
|
||||
field: traceID
|
||||
url: '$${__value.raw}' # Careful about the double "$$" because of env var expansion
|
||||
```
|
||||
|
||||
## Configure Amazon Elasticsearch Service
|
||||
|
||||
If you use Amazon Elasticsearch Service, you can use Grafana's Elasticsearch data source to visualize data from it.
|
||||
|
||||
If you use an AWS Identity and Access Management (IAM) policy to control access to your Amazon Elasticsearch Service domain, you must use AWS Signature Version 4 (AWS SigV4) to sign all requests to that domain.
|
||||
|
||||
For details on AWS SigV4, refer to the [AWS documentation](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
|
||||
|
||||
### AWS Signature Version 4 authentication
|
||||
|
||||
To sign requests to your Amazon Elasticsearch Service domain, you can enable SigV4 in Grafana's [configuration](ref:configuration).
|
||||
|
||||
Once AWS SigV4 is enabled, you can configure it on the Elasticsearch data source configuration page.
|
||||
For more information about AWS authentication options, refer to [AWS authentication](../aws-cloudwatch/aws-authentication/).
|
||||
|
||||
{{< figure src="/static/img/docs/v73/elasticsearch-sigv4-config-editor.png" max-width="500px" class="docs-image--no-shadow" caption="SigV4 configuration for AWS Elasticsearch Service" >}}
|
||||
|
||||
## Query the data source
|
||||
|
||||
You can select multiple metrics and group by multiple terms or filters when using the Elasticsearch query editor.
|
||||
|
||||
For details, see the [query editor documentation](query-editor/).
|
||||
|
||||
## Use template variables
|
||||
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
|
||||
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For details, see the [template variables documentation](template-variables/).
|
||||
|
||||
@@ -1,144 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/elasticsearch/alerting/
|
||||
description: Using Grafana Alerting with the Elasticsearch data source
|
||||
keywords:
|
||||
- grafana
|
||||
- elasticsearch
|
||||
- alerting
|
||||
- alerts
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Alerting
|
||||
title: Elasticsearch alerting
|
||||
weight: 550
|
||||
refs:
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
|
||||
create-alert-rule:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
---
|
||||
|
||||
# Elasticsearch alerting
|
||||
|
||||
You can use Grafana Alerting with Elasticsearch to create alerts based on your Elasticsearch data. This allows you to monitor metrics, detect anomalies, and receive notifications when specific conditions are met.
|
||||
|
||||
For general information about Grafana Alerting, refer to [Grafana Alerting](ref:alerting).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before creating alerts with Elasticsearch, ensure you have:
|
||||
|
||||
- An Elasticsearch data source configured in Grafana
|
||||
- Appropriate permissions to create alert rules
|
||||
- Understanding of the metrics you want to monitor
|
||||
|
||||
## Supported query types
|
||||
|
||||
Elasticsearch alerting works best with **metrics queries** that return time series data. To create a valid alert query:
|
||||
|
||||
- Use a **Date histogram** as the last bucket aggregation (under **Group by**)
|
||||
- Select appropriate metric aggregations (Count, Average, Sum, Min, Max, etc.)
|
||||
|
||||
Queries that return time series data allow Grafana to evaluate values over time and trigger alerts when thresholds are crossed.
|
||||
|
||||
### Query types and alerting compatibility
|
||||
|
||||
| Query type | Alerting support | Notes |
|
||||
| ------------------------------ | ---------------- | ----------------------------------------------------------- |
|
||||
| Metrics with Date histogram | ✅ Full support | Recommended for alerting |
|
||||
| Metrics without Date histogram | ⚠️ Limited | May not evaluate correctly over time |
|
||||
| Logs | ❌ Not supported | Use metrics queries instead |
|
||||
| Raw data | ❌ Not supported | Use metrics queries instead |
|
||||
| Raw document (deprecated) | ❌ Not supported | Deprecated since Grafana v10.1. Use metrics queries instead |
|
||||
|
||||
## Create an alert rule
|
||||
|
||||
To create an alert rule using Elasticsearch:
|
||||
|
||||
1. Navigate to **Alerting** > **Alert rules**.
|
||||
1. Click **New alert rule**.
|
||||
1. Enter a name for the alert rule.
|
||||
1. Select your **Elasticsearch** data source.
|
||||
1. Build your query using the query editor:
|
||||
- Add metric aggregations (for example, Average, Count, Sum)
|
||||
- Add a Date histogram under **Group by**
|
||||
- Optionally add filters using Lucene query syntax
|
||||
1. Configure the alert condition (for example, when the average is above a threshold).
|
||||
1. Set the evaluation interval and pending period.
|
||||
1. Configure notifications and labels.
|
||||
1. Click **Save rule**.
|
||||
|
||||
For detailed instructions, refer to [Create a Grafana-managed alert rule](ref:create-alert-rule).
|
||||
|
||||
## Example alert queries
|
||||
|
||||
The following examples show common alerting scenarios with Elasticsearch.
|
||||
|
||||
### Alert on high error count
|
||||
|
||||
Monitor the number of error-level log entries:
|
||||
|
||||
1. **Query:** `level:error`
|
||||
1. **Metric:** Count
|
||||
1. **Group by:** Date histogram (interval: 1m)
|
||||
1. **Condition:** When count is above 100
|
||||
|
||||
### Alert on average response time
|
||||
|
||||
Monitor API response times:
|
||||
|
||||
1. **Query:** `type:api_request`
|
||||
1. **Metric:** Average on field `response_time`
|
||||
1. **Group by:** Date histogram (interval: 5m)
|
||||
1. **Condition:** When average is above 500 (milliseconds)
|
||||
|
||||
### Alert on unique user count drop
|
||||
|
||||
Detect drops in active users:
|
||||
|
||||
1. **Query:** `*` (all documents)
|
||||
1. **Metric:** Unique count on field `user_id`
|
||||
1. **Group by:** Date histogram (interval: 1h)
|
||||
1. **Condition:** When unique count is below 100
|
||||
|
||||
## Limitations
|
||||
|
||||
When using Elasticsearch with Grafana Alerting, be aware of the following limitations:
|
||||
|
||||
### Template variables not supported
|
||||
|
||||
Alert queries cannot contain template variables. Grafana evaluates alert rules on the backend without dashboard context, so variables like `$hostname` or `$environment` won't be resolved.
|
||||
|
||||
If your dashboard query uses template variables, create a separate query for alerting with hard coded values.
|
||||
|
||||
### Logs queries not supported
|
||||
|
||||
Queries using the **Logs** metric type cannot be used for alerting. Convert your query to use metric aggregations with a Date histogram instead.
|
||||
|
||||
### Query complexity
|
||||
|
||||
Complex queries with many nested aggregations may timeout or fail to evaluate. Simplify queries for alerting by:
|
||||
|
||||
- Reducing the number of bucket aggregations
|
||||
- Using appropriate time intervals
|
||||
- Adding filters to limit the data scanned
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these best practices when creating Elasticsearch alerts:
|
||||
|
||||
- **Use specific filters:** Add Lucene query filters to focus on relevant data and improve query performance.
|
||||
- **Choose appropriate intervals:** Match the Date histogram interval to your evaluation frequency.
|
||||
- **Test queries first:** Verify your query returns expected results in Explore before creating an alert.
|
||||
- **Set realistic thresholds:** Base alert thresholds on historical data patterns.
|
||||
- **Use meaningful names:** Give alert rules descriptive names that indicate what they monitor.
|
||||
@@ -1,124 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/elasticsearch/annotations/
|
||||
description: Using annotations with Elasticsearch in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- elasticsearch
|
||||
- annotations
|
||||
- events
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Annotations
|
||||
title: Elasticsearch annotations
|
||||
weight: 500
|
||||
refs:
|
||||
annotate-visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
---
|
||||
|
||||
# Elasticsearch annotations
|
||||
|
||||
Annotations overlay event data on your dashboard graphs, helping you correlate log events with metrics.
|
||||
You can use Elasticsearch as a data source for annotations to display events such as deployments, alerts, or other significant occurrences on your visualizations.
|
||||
|
||||
For general information about annotations, refer to [Annotate visualizations](ref:annotate-visualizations).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before creating Elasticsearch annotations, ensure you have:
|
||||
|
||||
- An Elasticsearch data source configured in Grafana
|
||||
- Documents in Elasticsearch containing event data with timestamp fields
|
||||
- Read access to the Elasticsearch index containing your events
|
||||
|
||||
## Create an annotation query
|
||||
|
||||
To add an Elasticsearch annotation to your dashboard:
|
||||
|
||||
1. Navigate to your dashboard and click **Dashboard settings** (gear icon).
|
||||
1. Select **Annotations** in the left menu.
|
||||
1. Click **Add annotation query**.
|
||||
1. Enter a **Name** for the annotation.
|
||||
1. Select your **Elasticsearch** data source from the **Data source** drop-down.
|
||||
1. Configure the annotation query and field mappings.
|
||||
1. Click **Save dashboard**.
|
||||
|
||||
## Query
|
||||
|
||||
Use the query field to filter which Elasticsearch documents appear as annotations. The query uses [Lucene query syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#query-string-syntax).
|
||||
|
||||
**Examples:**
|
||||
|
||||
| Query | Description |
|
||||
| ---------------------------------------- | ---------------------------------------------------- |
|
||||
| `*` | Matches all documents. |
|
||||
| `type:deployment` | Shows only deployment events. |
|
||||
| `level:error OR level:critical` | Shows error and critical events. |
|
||||
| `service:api AND environment:production` | Shows events for a specific service and environment. |
|
||||
| `tags:release` | Shows events tagged as releases. |
|
||||
|
||||
You can use template variables in your annotation queries. For example, `service:$service` filters annotations based on the selected service variable.
|
||||
|
||||
## Field mappings
|
||||
|
||||
Field mappings tell Grafana which Elasticsearch fields contain the annotation data.
|
||||
|
||||
### Time
|
||||
|
||||
The **Time** field specifies which field contains the annotation timestamp.
|
||||
|
||||
- **Default:** `@timestamp`
|
||||
- **Format:** The field must contain a date value that Elasticsearch recognizes.
|
||||
|
||||
### Time End
|
||||
|
||||
The **Time End** field specifies a field containing the end time for range annotations. Range annotations display as a shaded region on the graph instead of a single vertical line.
|
||||
|
||||
- **Default:** Empty (single-point annotations)
|
||||
- **Use case:** Display maintenance windows, incidents, or any event with a duration.
|
||||
|
||||
### Text
|
||||
|
||||
The **Text** field specifies which field contains the annotation description displayed when you hover over the annotation.
|
||||
|
||||
- **Default:** `tags`
|
||||
- **Tip:** Use a descriptive field like `message`, `description`, or `summary`.
|
||||
|
||||
### Tags
|
||||
|
||||
The **Tags** field specifies which field contains tags for the annotation. Tags help categorize and filter annotations.
|
||||
|
||||
- **Default:** Empty
|
||||
- **Format:** The field can contain either a comma-separated string or an array of strings.
|
||||
|
||||
## Example: Deployment annotations
|
||||
|
||||
To display deployment events as annotations:
|
||||
|
||||
1. Create an annotation query with the following settings:
|
||||
- **Query:** `type:deployment`
|
||||
- **Time:** `@timestamp`
|
||||
- **Text:** `message`
|
||||
- **Tags:** `environment`
|
||||
|
||||
This configuration displays deployment events with their messages as the annotation text and environments as tags.
|
||||
|
||||
## Example: Range annotations for incidents
|
||||
|
||||
To display incidents with duration:
|
||||
|
||||
1. Create an annotation query with the following settings:
|
||||
- **Query:** `type:incident`
|
||||
- **Time:** `start_time`
|
||||
- **Time End:** `end_time`
|
||||
- **Text:** `description`
|
||||
- **Tags:** `severity`
|
||||
|
||||
This configuration displays incidents as shaded regions from their start time to end time.
|
||||
@@ -0,0 +1,209 @@
|
||||
---
|
||||
aliases:
|
||||
- ../data-sources/elasticsearch/
|
||||
- ../features/datasources/elasticsearch/
|
||||
description: Guide for configuring the Elasticsearch data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- elasticsearch
|
||||
- guide
|
||||
- data source
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Configure Elasticsearch
|
||||
title: Configure the Elasticsearch data source
|
||||
weight: 200
|
||||
refs:
|
||||
administration-documentation:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
supported-expressions:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#log-level
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#log-level
|
||||
query-and-transform-data:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/panels-visualizations/query-transform-data/
|
||||
provisioning-data-source:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/#provision-the-data-source
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/data-sources/elasticsearch/#provision-the-data-source
|
||||
---
|
||||
|
||||
# Configure the Elasticsearch data source
|
||||
|
||||
Grafana ships with built-in support for Elasticsearch.
|
||||
You can create a variety of queries to visualize logs or metrics stored in Elasticsearch, and annotate graphs with log events stored in Elasticsearch.
|
||||
|
||||
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:administration-documentation).
|
||||
|
||||
Only users with the organization `administrator` role can add data sources.
|
||||
Administrators can also [configure the data source via YAML](ref:provisioning-data-source) with Grafana's provisioning system.
|
||||
|
||||
## Configuring permissions
|
||||
|
||||
When Elasticsearch security features are enabled, it is essential to configure the necessary cluster privileges to ensure seamless operation. Below is a list of the required privileges along with their purposes:
|
||||
|
||||
- **monitor** - Necessary to retrieve the version information of the connected Elasticsearch instance.
|
||||
- **view_index_metadata** - Required for accessing mapping definitions of indices.
|
||||
- **read** - Grants the ability to perform search and retrieval operations on indices. This is essential for querying and extracting data from the cluster.
|
||||
|
||||
## Add the data source
|
||||
|
||||
To add the Elasticsearch data source, complete the following steps:
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Under **Connections**, click **Add new connection**.
|
||||
1. Enter `Elasticsearch` in the search bar.
|
||||
1. Click **Elasticsearch** under the **Data source** section.
|
||||
1. Click **Add new data source** in the upper right.
|
||||
|
||||
You will be taken to the **Settings** tab where you will set up your Elasticsearch configuration.
|
||||
|
||||
## Configuration options
|
||||
|
||||
The following is a list of configuration options for Elasticsearch.
|
||||
|
||||
The first option to configure is the name of your connection:
|
||||
|
||||
- **Name** - The data source name. This is how you refer to the data source in panels and queries. Examples: elastic-1, elasticsearch_metrics.
|
||||
|
||||
- **Default** - Toggle to select as the default data source option. When you go to a dashboard panel or Explore, this will be the default selected data source.
|
||||
|
||||
## Connection
|
||||
|
||||
Connect the Elasticsearch data source by specifying a URL.
|
||||
|
||||
- **URL** - The URL of your Elasticsearch server. If your Elasticsearch server is local, use `http://localhost:9200`. If it is on a server within a network, this is the URL with the port where you are running Elasticsearch. Example: `http://elasticsearch.example.orgname:9200`.
|
||||
|
||||
## Authentication
|
||||
|
||||
There are several authentication methods you can choose in the Authentication section.
|
||||
Select one of the following authentication methods from the dropdown menu.
|
||||
|
||||
- **Basic authentication** - The most common authentication method. Use your `data source` user name and `data source` password to connect.
|
||||
|
||||
- **Forward OAuth identity** - Forward the OAuth access token (and the OIDC ID token if available) of the user querying the data source.
|
||||
|
||||
- **No authentication** - Make the data source available without authentication. Grafana recommends using some type of authentication method.
|
||||
|
||||
<!-- - **With credentials** - Toggle to enable credentials such as cookies or auth headers to be sent with cross-site requests. -->
|
||||
|
||||
### TLS settings
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Use TLS (Transport Layer Security) for an additional layer of security when working with Elasticsearch. For information on setting up TLS encryption with Elasticsearch see [Configure TLS](https://www.elastic.co/guide/en/elasticsearch/reference/8.8/configuring-tls.html#configuring-tls). You must add TLS settings to your Elasticsearch configuration file **prior** to setting these options in Grafana.
|
||||
{{< /admonition >}}
|
||||
|
||||
- **Add self-signed certificate** - Check the box to authenticate with a CA certificate. Follow the instructions of the CA (Certificate Authority) to download the certificate file. Required for verifying self-signed TLS certificates.
|
||||
|
||||
- **TLS client authentication** - Check the box to authenticate with the TLS client, where the server authenticates the client. Add the `Server name`, `Client certificate` and `Client key`. The **ServerName** is used to verify the hostname on the returned certificate. The **Client certificate** can be generated from a Certificate Authority (CA) or be self-signed. The **Client key** can also be generated from a Certificate Authority (CA) or be self-signed. The client key encrypts the data between client and server.
|
||||
|
||||
- **Skip TLS certificate validation** - Check the box to bypass TLS certificate validation. Skipping TLS certificate validation is not recommended unless absolutely necessary or for testing purposes.
|
||||
|
||||
### HTTP headers
|
||||
|
||||
Click **+ Add header** to add one or more HTTP headers. HTTP headers pass additional context and metadata about the request/response.
|
||||
|
||||
- **Header** - Add a custom header. This allows custom headers to be passed based on the needs of your Elasticsearch instance.
|
||||
|
||||
- **Value** - The value of the header.
|
||||
|
||||
## Additional settings
|
||||
|
||||
Additional settings are optional settings that can be configured for more control over your data source.
|
||||
|
||||
### Advanced HTTP settings
|
||||
|
||||
- **Allowed cookies** - Specify cookies by name that should be forwarded to the data source. The Grafana proxy deletes all forwarded cookies by default.
|
||||
|
||||
- **Timeout** - The HTTP request timeout. This must be in seconds. There is no default, so this setting is up to you.
|
||||
|
||||
### Elasticsearch details
|
||||
|
||||
The following settings are specific to the Elasticsearch data source.
|
||||
|
||||
- **Index name** - Use the index settings to specify a default for the `time field` and your Elasticsearch index's name. You can use a time pattern, for example `[logstash-]YYYY.MM.DD`, or a wildcard for the index name. When specifying a time pattern, the fixed part(s) of the pattern should be wrapped in square brackets.
|
||||
|
||||
- **Pattern** - Select the matching pattern if using one in your index name. Options include:
|
||||
- no pattern
|
||||
- hourly
|
||||
- daily
|
||||
- weekly
|
||||
- monthly
|
||||
- yearly
|
||||
|
||||
Only select a pattern option if you have specified a time pattern in the Index name field.
|
||||
|
||||
- **Time field name** - Name of the time field. The default value is @timestamp. You can enter a different name.
|
||||
|
||||
- **Max concurrent shard requests** - Sets the number of shards being queried at the same time. The default is `5`. For more information on shards see [Elasticsearch's documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/scalability.html#scalability).
|
||||
|
||||
- **Min time interval** - Defines a lower limit for the auto group-by time interval. This value **must** be formatted as a number followed by a valid time identifier:
|
||||
|
||||
| Identifier | Description |
|
||||
| ---------- | ----------- |
|
||||
| `y` | year |
|
||||
| `M` | month |
|
||||
| `w` | week |
|
||||
| `d` | day |
|
||||
| `h` | hour |
|
||||
| `m` | minute |
|
||||
| `s` | second |
|
||||
| `ms` | millisecond |
|
||||
|
||||
We recommend setting this value to match your Elasticsearch write frequency.
|
||||
For example, set this to `1m` if Elasticsearch writes data every minute.
|
||||
|
||||
You can also override this setting in a dashboard panel under its data source options. The default is `10s`.
|
||||
|
||||
- **X-Pack enabled** - Toggle to enable `X-Pack`-specific features and options, which provide the [query editor](../query-editor/) with additional aggregations, such as `Rate` and `Top Metrics`.
|
||||
|
||||
- **Include frozen indices** - Toggle on when the `X-Pack enabled` setting is active. Includes frozen indices in searches. You can configure Grafana to include [frozen indices](https://www.elastic.co/guide/en/elasticsearch/reference/7.13/frozen-indices.html) when performing search requests.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Frozen indices are [deprecated in Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/frozen-indices.html) since v7.14.
|
||||
{{< /admonition >}}
|
||||
|
||||
- **Default query mode** - Specifies which query mode the data source uses by default. Options are `Metrics`, `Logs`, `Raw data`, and `Raw document`. The default is `Metrics`.
|
||||
|
||||
### Logs
|
||||
|
||||
In this section you can configure which fields the data source uses for log messages and log levels.
|
||||
|
||||
- **Message field name:** - Grabs the actual log message from the default source.
|
||||
|
||||
- **Level field name:** - Name of the field with log level/severity information. When a level label is specified, the value of this label is used to determine the log level and update the color of each log line accordingly. If the log doesn’t have a specified level label, we try to determine if its content matches any of the [supported expressions](ref:supported-expressions). The first match always determines the log level. If Grafana cannot infer a log-level field, it will be visualized with an unknown log level.
|
||||
|
||||
### Data links
|
||||
|
||||
Data links create a link from a specified field that can be accessed in Explore's logs view. You can add multiple data links by clicking **+ Add**.
|
||||
|
||||
Each data link configuration consists of:
|
||||
|
||||
- **Field** - Sets the name of the field used by the data link.
|
||||
|
||||
- **URL/query** - Sets the full link URL if the link is external. If the link is internal, this input serves as a query for the target data source.<br/>In both cases, you can interpolate the value from the field with the `${__value.raw }` macro.
|
||||
|
||||
- **URL Label** (Optional) - Sets a custom display label for the link. The link label defaults to the full external URL or name of the linked internal data source and is overridden by this setting.
|
||||
|
||||
- **Internal link** - Toggle on to set an internal link. For an internal link, you can select the target data source with a data source selector. This supports only tracing data sources.
|
||||
|
||||
## Private data source connect (PDC) and Elasticsearch
|
||||
|
||||
Use private data source connect (PDC) to connect to and query data within a secure network without opening that network to inbound traffic from Grafana Cloud. See [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) for more information on how PDC works and [Configure Grafana private data source connect (PDC)](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc) for steps on setting up a PDC connection.
|
||||
|
||||
If you use PDC with SIGv4 (AWS Signature Version 4 Authentication), the PDC agent must allow internet egress to`sts.<region>.amazonaws.com:443`.
|
||||
|
||||
- **Private data source connect** - Click in the box to set the default PDC connection from the dropdown menu or create a new connection.
|
||||
|
||||
Once you have configured your Elasticsearch data source options, click **Save & test** at the bottom to test out your data source connection. You can also remove a connection by clicking **Delete**.
|
||||
@@ -1,377 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../configure-elasticsearch-data-source/
|
||||
description: Guide for configuring the Elasticsearch data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- elasticsearch
|
||||
- guide
|
||||
- data source
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Configure
|
||||
title: Configure the Elasticsearch data source
|
||||
weight: 200
|
||||
refs:
|
||||
administration-documentation:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
supported-expressions:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#log-level
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#log-level
|
||||
query-and-transform-data:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/panels-visualizations/query-transform-data/
|
||||
provisioning-data-source:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/configure/#provision-the-data-source
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/data-sources/elasticsearch/configure/#provision-the-data-source
|
||||
configuration:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#sigv4_auth_enabled
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#sigv4_auth_enabled
|
||||
provisioning-grafana:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/
|
||||
---
|
||||
|
||||
# Configure the Elasticsearch data source
|
||||
|
||||
Grafana ships with built-in support for Elasticsearch.
|
||||
You can create a variety of queries to visualize logs or metrics stored in Elasticsearch, and annotate graphs with log events stored in Elasticsearch.
|
||||
|
||||
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:administration-documentation).
|
||||
Administrators can also [configure the data source via YAML](ref:provisioning-data-source) with Grafana's provisioning system.
|
||||
|
||||
## Before you begin
|
||||
|
||||
To configure the Elasticsearch data source, you need:
|
||||
|
||||
- **Grafana administrator permissions:** Only users with the organization `administrator` role can add data sources.
|
||||
- **A supported Elasticsearch version:** v7.17 or later, v8.x, or v9.x. Elastic Cloud Serverless isn't supported.
|
||||
- **Elasticsearch server URL:** The HTTP or HTTPS endpoint for your Elasticsearch instance, including the port (default: `9200`).
|
||||
- **Authentication credentials:** Depending on your Elasticsearch security configuration, you need one of the following:
|
||||
- Username and password for basic authentication
|
||||
- API key
|
||||
- No credentials (if Elasticsearch security is disabled)
|
||||
- **Network access:** Grafana must be able to reach your Elasticsearch server. For Grafana Cloud, consider using [Private data source connect (PDC)](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) if your Elasticsearch instance is in a private network.
|
||||
|
||||
## Elasticsearch permissions
|
||||
|
||||
When Elasticsearch security features are enabled, you must configure the following cluster privileges for the user or API key that Grafana uses to connect:
|
||||
|
||||
- **monitor** - Necessary to retrieve the version information of the connected Elasticsearch instance.
|
||||
- **view_index_metadata** - Required for accessing mapping definitions of indices.
|
||||
- **read** - Grants the ability to perform search and retrieval operations on indices. This is essential for querying and extracting data from the cluster.
|
||||
|
||||
## Add the data source
|
||||
|
||||
To add the Elasticsearch data source, complete the following steps:
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Under **Connections**, click **Add new connection**.
|
||||
1. Enter `Elasticsearch` in the search bar.
|
||||
1. Click **Elasticsearch** under the **Data source** section.
|
||||
1. Click **Add new data source** in the upper right.
|
||||
|
||||
You will be taken to the **Settings** tab where you will set up your Elasticsearch configuration.
|
||||
|
||||
## Configuration options
|
||||
|
||||
Configure the following basic settings for the Elasticsearch data source:
|
||||
|
||||
- **Name** - The data source name. This is how you refer to the data source in panels and queries. Examples: `elastic-1`, `elasticsearch_metrics`.
|
||||
|
||||
- **Default** - Toggle on to make this the default data source. New panels and Explore queries use the default data source.
|
||||
|
||||
## Connection
|
||||
|
||||
- **URL** - The URL of your Elasticsearch server, including the port. Examples: `http://localhost:9200`, `http://elasticsearch.example.com:9200`.
|
||||
|
||||
## Authentication
|
||||
|
||||
Select an authentication method from the drop-down menu:
|
||||
|
||||
- **Basic authentication** - Enter the username and password for your Elasticsearch user.
|
||||
|
||||
- **Forward OAuth identity** - Forward the OAuth access token (and the OIDC ID token if available) of the user querying the data source.
|
||||
|
||||
- **No authentication** - Connect without credentials. Only use this option if your Elasticsearch instance doesn't require authentication.
|
||||
|
||||
### API key authentication
|
||||
|
||||
To authenticate using an Elasticsearch API key, select **No authentication** and configure the API key using HTTP headers:
|
||||
|
||||
1. In the **HTTP headers** section, click **+ Add header**.
|
||||
1. Set **Header** to `Authorization`.
|
||||
1. Set **Value** to `ApiKey <your-api-key>`, replacing `<your-api-key>` with your base64-encoded Elasticsearch API key.
|
||||
|
||||
For information about creating API keys, refer to the [Elasticsearch API keys documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html).
|
||||
|
||||
### Amazon Elasticsearch Service
|
||||
|
||||
If you use Amazon Elasticsearch Service, you can use Grafana's Elasticsearch data source to visualize data from it.
|
||||
|
||||
If you use an AWS Identity and Access Management (IAM) policy to control access to your Amazon Elasticsearch Service domain, you must use AWS Signature Version 4 (AWS SigV4) to sign all requests to that domain.
|
||||
|
||||
For details on AWS SigV4, refer to the [AWS documentation](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
|
||||
|
||||
To sign requests to your Amazon Elasticsearch Service domain, you can enable SigV4 in Grafana's [configuration](ref:configuration).
|
||||
|
||||
Once AWS SigV4 is enabled, you can configure it on the Elasticsearch data source configuration page.
|
||||
For more information about AWS authentication options, refer to [AWS authentication](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/).
|
||||
|
||||
{{< figure src="/static/img/docs/v73/elasticsearch-sigv4-config-editor.png" max-width="500px" class="docs-image--no-shadow" caption="SigV4 configuration for AWS Elasticsearch Service" >}}
|
||||
|
||||
### TLS settings
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Use TLS (Transport Layer Security) for an additional layer of security when working with Elasticsearch. For information on setting up TLS encryption with Elasticsearch, refer to [Configure TLS](https://www.elastic.co/guide/en/elasticsearch/reference/8.8/configuring-tls.html#configuring-tls). You must add TLS settings to your Elasticsearch configuration file **prior** to setting these options in Grafana.
|
||||
{{< /admonition >}}
|
||||
|
||||
- **Add self-signed certificate** - Check the box to authenticate with a CA certificate. Follow the instructions of the CA (Certificate Authority) to download the certificate file. Required for verifying self-signed TLS certificates.
|
||||
|
||||
- **TLS client authentication** - Check the box to authenticate with the TLS client, where the server authenticates the client. Add the `Server name`, `Client certificate` and `Client key`. The **ServerName** is used to verify the hostname on the returned certificate. The **Client certificate** can be generated from a Certificate Authority (CA) or be self-signed. The **Client key** can also be generated from a Certificate Authority (CA) or be self-signed. The client key encrypts the data between client and server.
|
||||
|
||||
- **Skip TLS certificate validation** - Check the box to bypass TLS certificate validation. Skipping TLS certificate validation is not recommended unless absolutely necessary or for testing purposes.
|
||||
|
||||
### HTTP headers
|
||||
|
||||
Click **+ Add header** to add one or more HTTP headers. HTTP headers pass additional context and metadata about the request/response.
|
||||
|
||||
- **Header** - Add a custom header. This allows custom headers to be passed based on the needs of your Elasticsearch instance.
|
||||
|
||||
- **Value** - The value of the header.
|
||||
|
||||
## Additional settings
|
||||
|
||||
Additional settings are optional settings that can be configured for more control over your data source.
|
||||
|
||||
### Advanced HTTP settings
|
||||
|
||||
- **Allowed cookies** - Specify cookies by name that should be forwarded to the data source. The Grafana proxy deletes all forwarded cookies by default.
|
||||
|
||||
- **Timeout** - The HTTP request timeout. This must be in seconds. There is no default, so this setting is up to you.
|
||||
|
||||
### Elasticsearch details
|
||||
|
||||
The following settings are specific to the Elasticsearch data source.
|
||||
|
||||
- **Index name** - The name of your Elasticsearch index. You can use the following formats:
|
||||
- **Wildcard patterns** - Use `*` to match multiple indices. Examples: `logs-*`, `metrics-*`, `filebeat-*`.
|
||||
- **Time patterns** - Use date placeholders for time-based indices. Wrap the fixed portion in square brackets. Examples: `[logstash-]YYYY.MM.DD`, `[metrics-]YYYY.MM`.
|
||||
- **Specific index** - Enter the exact index name. Example: `application-logs`.
|
||||
|
||||
- **Pattern** - Select the matching pattern if you use a time pattern in your index name. Options include:
|
||||
- no pattern
|
||||
- hourly
|
||||
- daily
|
||||
- weekly
|
||||
- monthly
|
||||
- yearly
|
||||
|
||||
Only select a pattern option if you have specified a time pattern in the Index name field.
|
||||
|
||||
- **Time field name** - Name of the time field. The default value is `@timestamp`. You can enter a different name.
|
||||
|
||||
- **Max concurrent shard requests** - Sets the number of shards being queried at the same time. The default is `5`. For more information on shards, refer to the [Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/scalability.html#scalability).
|
||||
|
||||
- **Min time interval** - Defines a lower limit for the auto group-by time interval. This value **must** be formatted as a number followed by a valid time identifier:
|
||||
|
||||
| Identifier | Description |
|
||||
| ---------- | ----------- |
|
||||
| `y` | year |
|
||||
| `M` | month |
|
||||
| `w` | week |
|
||||
| `d` | day |
|
||||
| `h` | hour |
|
||||
| `m` | minute |
|
||||
| `s` | second |
|
||||
| `ms` | millisecond |
|
||||
|
||||
We recommend setting this value to match your Elasticsearch write frequency.
|
||||
For example, set this to `1m` if Elasticsearch writes data every minute.
|
||||
|
||||
You can also override this setting in a dashboard panel under its data source options. The default is `10s`.
|
||||
|
||||
- **X-Pack enabled** - Toggle to enable `X-Pack`-specific features and options, which provide the [query editor](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/query-editor/) with additional aggregations, such as `Rate` and `Top Metrics`.
|
||||
|
||||
- **Include frozen indices** - Toggle on when the `X-Pack enabled` setting is active. Includes frozen indices in searches. You can configure Grafana to include [frozen indices](https://www.elastic.co/guide/en/elasticsearch/reference/7.13/frozen-indices.html) when performing search requests.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Frozen indices are [deprecated in Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/frozen-indices.html) since v7.14.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Logs
|
||||
|
||||
Configure which fields the data source uses for log messages and log levels.
|
||||
|
||||
- **Message field name** - The field that contains the log message content.
|
||||
|
||||
- **Level field name** - The field that contains log level or severity information. When specified, Grafana uses this field to determine the log level and color-code each log line. If the log doesn't have a level field, Grafana tries to match the content against [supported expressions](ref:supported-expressions). If Grafana can't determine the log level, it displays as unknown.
|
||||
|
||||
### Data links
|
||||
|
||||
Data links create a link from a specified field that can be accessed in Explore's logs view. You can add multiple data links by clicking **+ Add**.
|
||||
|
||||
Each data link configuration consists of:
|
||||
|
||||
- **Field** - Sets the name of the field used by the data link.
|
||||
|
||||
- **URL/query** - Sets the full link URL if the link is external. If the link is internal, this input serves as a query for the target data source.<br/>In both cases, you can interpolate the value from the field with the `${__value.raw }` macro.
|
||||
|
||||
- **URL Label** (Optional) - Sets a custom display label for the link. The link label defaults to the full external URL or name of the linked internal data source and is overridden by this setting.
|
||||
|
||||
- **Internal link** - Toggle on to set an internal link. For an internal link, you can select the target data source with a data source selector. This supports only tracing data sources.
|
||||
|
||||
## Private data source connect (PDC) and Elasticsearch
|
||||
|
||||
Use private data source connect (PDC) to connect to and query data within a secure network without opening that network to inbound traffic from Grafana Cloud. Refer to [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) for more information on how PDC works and [Configure Grafana private data source connect (PDC)](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc) for steps on setting up a PDC connection.
|
||||
|
||||
If you use PDC with SigV4 (AWS Signature Version 4 Authentication), the PDC agent must allow internet egress to `sts.<region>.amazonaws.com:443`.
|
||||
|
||||
- **Private data source connect** - Click in the box to set the default PDC connection from the drop-down menu or create a new connection.
|
||||
|
||||
Once you have configured your Elasticsearch data source options, click **Save & test** to test the connection. A successful connection displays the following message:
|
||||
|
||||
`Elasticsearch data source is healthy.`
|
||||
|
||||
## Provision the data source
|
||||
|
||||
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
|
||||
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-grafana).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The previously used `database` field has now been [deprecated](https://github.com/grafana/grafana/pull/58647).
|
||||
Use the `index` field in `jsonData` to store the index name.
|
||||
Refer to the examples below.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Basic provisioning
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Elastic
|
||||
type: elasticsearch
|
||||
access: proxy
|
||||
url: http://localhost:9200
|
||||
jsonData:
|
||||
index: '[metrics-]YYYY.MM.DD'
|
||||
interval: Daily
|
||||
timeField: '@timestamp'
|
||||
```
|
||||
|
||||
### Provision for logs
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: elasticsearch-v7-filebeat
|
||||
type: elasticsearch
|
||||
access: proxy
|
||||
url: http://localhost:9200
|
||||
jsonData:
|
||||
index: '[filebeat-]YYYY.MM.DD'
|
||||
interval: Daily
|
||||
timeField: '@timestamp'
|
||||
logMessageField: message
|
||||
logLevelField: fields.level
|
||||
dataLinks:
|
||||
- datasourceUid: my_jaeger_uid # Target UID needs to be known
|
||||
field: traceID
|
||||
url: '$${__value.raw}' # Careful about the double "$$" because of env var expansion
|
||||
```
|
||||
|
||||
## Provision the data source using Terraform
|
||||
|
||||
You can provision the Elasticsearch data source using [Terraform](https://www.terraform.io/) with the [Grafana Terraform provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs).
|
||||
|
||||
For more information about provisioning resources with Terraform, refer to the [Grafana as code using Terraform](https://grafana.com/docs/grafana-cloud/developer-resources/infrastructure-as-code/terraform/) documentation.
|
||||
|
||||
### Basic Terraform example
|
||||
|
||||
The following example creates a basic Elasticsearch data source for metrics:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "elasticsearch" {
|
||||
name = "Elasticsearch"
|
||||
type = "elasticsearch"
|
||||
url = "http://localhost:9200"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
index = "[metrics-]YYYY.MM.DD"
|
||||
interval = "Daily"
|
||||
timeField = "@timestamp"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Terraform example for logs
|
||||
|
||||
The following example creates an Elasticsearch data source configured for logs with a data link to Jaeger:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "elasticsearch_logs" {
|
||||
name = "Elasticsearch Logs"
|
||||
type = "elasticsearch"
|
||||
url = "http://localhost:9200"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
index = "[filebeat-]YYYY.MM.DD"
|
||||
interval = "Daily"
|
||||
timeField = "@timestamp"
|
||||
logMessageField = "message"
|
||||
logLevelField = "fields.level"
|
||||
dataLinks = [
|
||||
{
|
||||
datasourceUid = grafana_data_source.jaeger.uid
|
||||
field = "traceID"
|
||||
url = "$${__value.raw}"
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Terraform example with basic authentication
|
||||
|
||||
The following example includes basic authentication:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "elasticsearch_auth" {
|
||||
name = "Elasticsearch"
|
||||
type = "elasticsearch"
|
||||
url = "http://localhost:9200"
|
||||
|
||||
basic_auth_enabled = true
|
||||
basic_auth_username = "elastic_user"
|
||||
|
||||
secure_json_data_encoded = jsonencode({
|
||||
basicAuthPassword = var.elasticsearch_password
|
||||
})
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
index = "[metrics-]YYYY.MM.DD"
|
||||
interval = "Daily"
|
||||
timeField = "@timestamp"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
For all available configuration options, refer to the [Grafana provider data source resource documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source).
|
||||
@@ -30,7 +30,7 @@ refs:
|
||||
# Elasticsearch query editor
|
||||
|
||||
Grafana provides a query editor for Elasticsearch. Elasticsearch queries are in Lucene format.
|
||||
For more information about query syntax, refer to [Lucene query syntax](https://www.elastic.co/guide/en/kibana/current/lucene-query.html) and [Query string syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#query-string-syntax).
|
||||
See [Lucene query syntax](https://www.elastic.co/guide/en/kibana/current/lucene-query.html) and [Query string syntax](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/query-dsl-query-string-query.html#query-string-syntax) if you are new to working with Lucene queries in Elasticsearch.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
When composing Lucene queries, ensure that you use uppercase boolean operators: `AND`, `OR`, and `NOT`. Lowercase versions of these operators are not supported by the Lucene query syntax.
|
||||
@@ -38,17 +38,17 @@ When composing Lucene queries, ensure that you use uppercase boolean operators:
|
||||
|
||||
{{< figure src="/static/img/docs/elasticsearch/elastic-query-editor-10.1.png" max-width="800px" class="docs-image--no-shadow" caption="Elasticsearch query editor" >}}
|
||||
|
||||
For general documentation on querying data sources in Grafana, including options and functions common to all query editors, refer to [Query and transform data](ref:query-and-transform-data).
|
||||
For general documentation on querying data sources in Grafana, including options and functions common to all query editors, see [Query and transform data](ref:query-and-transform-data).
|
||||
|
||||
## Aggregation types
|
||||
|
||||
Elasticsearch groups aggregations into three categories:
|
||||
|
||||
- **Bucket** - Bucket aggregations don't calculate metrics, they create buckets of documents based on field values, ranges and a variety of other criteria. Refer to [Bucket aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket.html) for additional information. Use bucket aggregations under `Group by` when creating a metrics query in the query builder.
|
||||
- **Bucket** - Bucket aggregations don't calculate metrics, they create buckets of documents based on field values, ranges and a variety of other criteria. See [Bucket aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket.html) for additional information. Use bucket aggregations under `Group by` when creating a metrics query in the query builder.
|
||||
|
||||
- **Metrics** - Metrics aggregations perform calculations such as sum, average, min, etc. They can be single-value or multi-value. Refer to [Metrics aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html) for additional information. Use metrics aggregations in the metrics query type in the query builder.
|
||||
- **Metrics** - Metrics aggregations perform calculations such as sum, average, min, etc. They can be single-value or multi-value. See [Metrics aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html) for additional information. Use metrics aggregations in the metrics query type in the query builder.
|
||||
|
||||
- **Pipeline** - Pipeline aggregations work on the output of other aggregations rather than on documents or fields. Refer to [Pipeline aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline.html) for additional information.
|
||||
- **Pipeline** - Elasticsearch pipeline aggregations work with inputs or metrics created from other aggregations (not documents or fields). There are parent and sibling and sibling pipeline aggregations. See [Pipeline aggregations](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-pipeline.html) for additional information.
|
||||
|
||||
## Select a query type
|
||||
|
||||
@@ -56,51 +56,44 @@ There are three types of queries you can create with the Elasticsearch query bui
|
||||
|
||||
### Metrics query type
|
||||
|
||||
Metrics queries aggregate data and produce calculations such as count, min, max, and more. Click the metric box to view options in the drop-down menu. The default is `count`.
|
||||
Metrics queries aggregate data and produce a variety of calculations such as count, min, max, etc. Click on the metric box to view a list of options in the dropdown menu. The default is `count`.
|
||||
|
||||
- **Alias** - Aliasing only applies to **time series queries**, where the last group is `date histogram`. This is ignored for any other type of query.
|
||||
|
||||
- **Metric** - Metrics aggregations include:
|
||||
- count - refer to [Value count aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html)
|
||||
- average - refer to [Avg aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html)
|
||||
- sum - refer to [Sum aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html)
|
||||
- max - refer to [Max aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html)
|
||||
- min - refer to [Min aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html)
|
||||
- extended stats - refer to [Extended stats aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html)
|
||||
- percentiles - refer to [Percentiles aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html)
|
||||
- unique count - refer to [Cardinality aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html)
|
||||
- top metrics - refer to [Top metrics aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-metrics.html)
|
||||
- rate - refer to [Rate aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-rate-aggregation.html)
|
||||
|
||||
- **Pipeline aggregations** - Pipeline aggregations work on the output of other aggregations rather than on documents. The following pipeline aggregations are available:
|
||||
- moving function - Calculates a value based on a sliding window of aggregated values. Refer to [Moving function aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-movfn-aggregation.html).
|
||||
- derivative - Calculates the derivative of a metric. Refer to [Derivative aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-derivative-aggregation.html).
|
||||
- cumulative sum - Calculates the cumulative sum of a metric. Refer to [Cumulative sum aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-cumulative-sum-aggregation.html).
|
||||
- serial difference - Calculates the difference between values in a time series. Refer to [Serial differencing aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-serialdiff-aggregation.html).
|
||||
- bucket script - Executes a script on metric values from other aggregations. Refer to [Bucket script aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-pipeline-bucket-script-aggregation.html).
|
||||
- count - see [Value count aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-valuecount-aggregation.html)
|
||||
- average - see [Avg aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-rate-aggregation.html)
|
||||
- sum - see [Sum aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html)
|
||||
- max - see [Max aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-max-aggregation.html)
|
||||
- min - see [Min aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-min-aggregation.html)
|
||||
- extended stats - see [Extended stats aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html)
|
||||
- percentiles - see [Percentiles aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-percentile-aggregation.html)
|
||||
- unique count - see [Cardinality aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-cardinality-aggregation.html)
|
||||
- top metrics - see [Top metrics aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-top-metrics.html)
|
||||
- rate - see [Rate aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/8.9/search-aggregations-metrics-rate-aggregation.html)
|
||||
|
||||
You can select multiple metrics and group by multiple terms or filters when using the Elasticsearch query editor.
|
||||
|
||||
Use the **+ sign** to the right to add multiple metrics to your query. Click on the **eye icon** next to **Metric** to hide metrics, and the **garbage can icon** to remove metrics.
|
||||
|
||||
- **Group by options** - Create multiple group by options when constructing your Elasticsearch query. Date histogram is the default option. The following options are available in the drop-down menu:
|
||||
- terms - refer to [Terms aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html).
|
||||
- filter - refer to [Filter aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html).
|
||||
- geo hash grid - refer to [Geohash grid aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html).
|
||||
- date histogram - for time series queries. Refer to [Date histogram aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html).
|
||||
- histogram - Depicts frequency distributions. Refer to [Histogram aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html).
|
||||
- nested (experimental) - Refer to [Nested aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-nested-aggregation.html).
|
||||
- **Group by options** - Create multiple group by options when constructing your Elasticsearch query. Date histogram is the default option. Below is a list of options in the dropdown menu.
|
||||
- terms - see [Terms aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html).
|
||||
- filter - see [Filter aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html).
|
||||
- geo hash grid - see [Geohash grid aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html).
|
||||
- date histogram - for time series queries. See [Date histogram aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html).
|
||||
- histogram - Depicts frequency distributions. See [Histogram aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html).
|
||||
- nested (experimental) - See [Nested aggregation](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-nested-aggregation.html).
|
||||
|
||||
Each group by option will have a different subset of options to further narrow your query.
|
||||
|
||||
The following options are specific to the **date histogram** bucket aggregation option.
|
||||
|
||||
- **Time field** - The field used for time-based queries. The default can be set when configuring the data source in the **Time field name** setting under [Elasticsearch details](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/configure/#elasticsearch-details). The default is `@timestamp`.
|
||||
- **Interval** - The time interval for grouping data. Select from the drop-down menu or enter a custom interval such as `30d` (30 days). The default is `Auto`.
|
||||
- **Min doc count** - The minimum number of documents required to include a bucket. The default is `0`.
|
||||
- **Trim edges** - Removes partial buckets at the edges of the time range. The default is `0`.
|
||||
- **Offset** - Shifts the start of each bucket by the specified duration. Use positive (`+`) or negative (`-`) values. Examples: `1h`, `5s`, `1d`.
|
||||
- **Timezone** - The timezone for date calculations. The default is `Coordinated Universal Time`.
|
||||
- **Time field** - Depicts date data options. The default option can be specified when configuring the Elasticsearch data source in the **Time field name** under the [**Elasticsearch details**](/docs/grafana/latest/datasources/elasticsearch/configure-elasticsearch-data-source/#elasticsearch-details) section. Otherwise **@timestamp** field will be used as a default option.
|
||||
- **Interval** - Group by a type of interval. There are option to choose from the dropdown menu to select seconds, minutes, hours or day. You can also add a custom interval such as `30d` (30 days). `Auto` is the default option.
|
||||
- **Min doc count** - The minimum amount of data to include in your query. The default is `0`.
|
||||
- **Thin edges** - Select to trim edges on the time series data points. The default is `0`.
|
||||
- **Offset** - Changes the start value of each bucket by the specified positive(+) or negative (-) offset duration. Examples include `1h` for 1 hour, `5s` for 5 seconds or `1d` for 1 day.
|
||||
- **Timezone** - Select a timezone from the dropdown menu. The default is `Coordinated universal time`.
|
||||
|
||||
Configure the following options for the **terms** bucket aggregation option:
|
||||
|
||||
@@ -108,7 +101,7 @@ Configure the following options for the **terms** bucket aggregation option:
|
||||
- **Size** - Limits the number of documents, or size of the data set. You can set a custom number or `no limit`.
|
||||
- **Min doc count** - The minimum amount of data to include in your query. The default is `0`.
|
||||
- **Order by** - Order terms by `term value`, `doc count` or `count`.
|
||||
- **Missing** - Defines how documents missing a value should be treated. Missing values are ignored by default, but they can be treated as if they had a value. Refer to [Missing value](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html#_missing_value_5) in the Elasticsearch documentation for more information.
|
||||
- **Missing** - Defines how documents missing a value should be treated. Missing values are ignored by default, but they can be treated as if they had a value. See [Missing value](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html#_missing_value_5) in Elasticsearch's documentation for more information.
|
||||
|
||||
Configure the following options for the **filters** bucket aggregation option:
|
||||
|
||||
@@ -121,8 +114,8 @@ Configure the following options for the **geo hash grid** bucket aggregation opt
|
||||
|
||||
Configure the following options for the **histogram** bucket aggregation option:
|
||||
|
||||
- **Interval** - The numeric interval for grouping values into buckets.
|
||||
- **Min doc count** - The minimum number of documents required to include a bucket. The default is `0`.
|
||||
- **Interval** - Group by a type of interval. There are option to choose from the dropdown menu to select seconds, minutes, hours or day. You can also add a custom interval such as `30d` (30 days). `Auto` is the default option.
|
||||
- **Min doc count** - The minimum amount of data to include in your query. The default is `0`
|
||||
|
||||
The **nested** group by option is currently experimental, you can select a field and then settings specific to that field.
|
||||
|
||||
@@ -148,7 +141,7 @@ The option to run a **raw document query** is deprecated as of Grafana v10.1.
|
||||
|
||||
## Use template variables
|
||||
|
||||
You can also augment queries by using [template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/template-variables/).
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
|
||||
Queries of `terms` have a 500-result limit by default.
|
||||
To set a custom limit, set the `size` property in your query.
|
||||
|
||||
@@ -22,11 +22,6 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
add-template-variables-add-ad-hoc-filters:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/#add-ad-hoc-filters
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/#add-ad-hoc-filters
|
||||
add-template-variables-multi-value-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/#multi-value-variables
|
||||
@@ -42,29 +37,11 @@ refs:
|
||||
# Elasticsearch template variables
|
||||
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
|
||||
Grafana lists these variables in drop-down select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables) documentation.
|
||||
|
||||
## Use ad hoc filters
|
||||
|
||||
Elasticsearch supports the **Ad hoc filters** variable type.
|
||||
You can use this variable type to specify any number of key/value filters, and Grafana applies them automatically to all of your Elasticsearch queries.
|
||||
|
||||
Ad hoc filters support the following operators:
|
||||
|
||||
| Operator | Description |
|
||||
| -------- | ------------------------------------------------------------- |
|
||||
| `=` | Equals. Adds `AND field:"value"` to the query. |
|
||||
| `!=` | Not equals. Adds `AND -field:"value"` to the query. |
|
||||
| `=~` | Matches regex. Adds `AND field:/value/` to the query. |
|
||||
| `!~` | Does not match regex. Adds `AND -field:/value/` to the query. |
|
||||
| `>` | Greater than. Adds `AND field:>value` to the query. |
|
||||
| `<` | Less than. Adds `AND field:<value` to the query. |
|
||||
|
||||
For more information, refer to [Add ad hoc filters](ref:add-template-variables-add-ad-hoc-filters).
|
||||
|
||||
## Choose a variable syntax
|
||||
|
||||
The Elasticsearch data source supports two variable syntaxes for use in the **Query** field:
|
||||
@@ -73,35 +50,34 @@ The Elasticsearch data source supports two variable syntaxes for use in the **Qu
|
||||
- `[[varname]]`, such as `hostname:[[hostname]]`
|
||||
|
||||
When the _Multi-value_ or _Include all value_ options are enabled, Grafana converts the labels from plain text to a Lucene-compatible condition.
|
||||
For details, refer to the [Multi-value variables](ref:add-template-variables-multi-value-variables) documentation.
|
||||
For details, see the [Multi-value variables](ref:add-template-variables-multi-value-variables) documentation.
|
||||
|
||||
## Use variables in queries
|
||||
|
||||
You can use variables in the Lucene query field, metric aggregation fields, bucket aggregation fields, and the alias field.
|
||||
|
||||
### Variables in Lucene queries
|
||||
|
||||
Use variables to filter your Elasticsearch queries dynamically:
|
||||
You can use other variables inside the query.
|
||||
This example is used to define a variable named `$host`:
|
||||
|
||||
```
|
||||
hostname:$hostname AND level:$level
|
||||
{"find": "terms", "field": "hostname", "query": "source:$source"}
|
||||
```
|
||||
|
||||
### Chain or nest variables
|
||||
This uses another variable named `$source` inside the query definition.
|
||||
Whenever you change the value of the `$source` variable via the dropdown, Grafana triggers an update of the `$host` variable to contain only hostnames filtered by, in this case, the `source` document property.
|
||||
|
||||
You can create nested variables, where one variable's values depend on another variable's selection.
|
||||
These queries by default return results in term order (which can then be sorted alphabetically or numerically as for any variable).
|
||||
To produce a list of terms sorted by doc count (a top-N values list), add an `orderBy` property of "doc_count".
|
||||
This automatically selects a descending sort.
|
||||
|
||||
This example defines a variable named `$host` that only shows hosts matching the selected `$environment`:
|
||||
{{< admonition type="note" >}}
|
||||
To use an ascending sort (`asc`) with doc_count (a bottom-N list), set `order: "asc"`. However, Elasticsearch [discourages this](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html#search-aggregations-bucket-terms-aggregation-order) because sorting by ascending doc count can return inaccurate results.
|
||||
{{< /admonition >}}
|
||||
|
||||
To keep terms in the doc count order, set the variable's Sort dropdown to **Disabled**.
|
||||
You can alternatively use other sorting criteria, such as **Alphabetical**, to re-sort them.
|
||||
|
||||
```json
|
||||
{ "find": "terms", "field": "hostname", "query": "environment:$environment" }
|
||||
```
|
||||
|
||||
Whenever you change the value of the `$environment` variable via the drop-down, Grafana triggers an update of the `$host` variable to contain only hostnames filtered by the selected environment.
|
||||
|
||||
### Variables in aggregations
|
||||
|
||||
You can use variables in bucket aggregation fields to dynamically change how data is grouped. For example, use a variable in the **Terms** group by field to let users switch between grouping by `hostname`, `service`, or `datacenter`.
|
||||
{"find": "terms", "field": "hostname", "orderBy": "doc_count"}
|
||||
```
|
||||
|
||||
## Template variable examples
|
||||
|
||||
@@ -116,36 +92,11 @@ Write the query using a custom JSON string, with the field mapped as a [keyword]
|
||||
|
||||
If the query is [multi-field](https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-fields.html) with both a `text` and `keyword` type, use `"field":"fieldname.keyword"` (sometimes `fieldname.raw`) to specify the keyword field in your query.
|
||||
|
||||
| Query | Description |
|
||||
| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ |
|
||||
| `{"find": "fields", "type": "keyword"}` | Returns a list of field names with the index type `keyword`. |
|
||||
| `{"find": "fields", "type": "number"}` | Returns a list of numeric field names (includes `float`, `double`, `integer`, `long`, `scaled_float`). |
|
||||
| `{"find": "fields", "type": "date"}` | Returns a list of date field names. |
|
||||
| `{"find": "terms", "field": "hostname.keyword", "size": 1000}` | Returns a list of values for a keyword field. Uses the current dashboard time range. |
|
||||
| `{"find": "terms", "field": "hostname", "query": "<Lucene query>"}` | Returns a list of values filtered by a Lucene query. Uses the current dashboard time range. |
|
||||
| `{"find": "terms", "field": "status", "orderBy": "doc_count"}` | Returns values sorted by document count (descending by default). |
|
||||
| `{"find": "terms", "field": "status", "orderBy": "doc_count", "order": "asc"}` | Returns values sorted by document count in ascending order. |
|
||||
| Query | Description |
|
||||
| ------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `{"find": "fields", "type": "keyword"}` | Returns a list of field names with the index type `keyword`. |
|
||||
| `{"find": "terms", "field": "hostname.keyword", "size": 1000}` | Returns a list of values for a keyword using term aggregation. Query will use current dashboard time range as time range query. |
|
||||
| `{"find": "terms", "field": "hostname", "query": '<Lucene query>'}` | Returns a list of values for a keyword field using term aggregation and a specified Lucene query filter. Query will use current dashboard time range as time range for query. |
|
||||
|
||||
Queries of `terms` have a 500-result limit by default. To set a custom limit, set the `size` property in your query.
|
||||
|
||||
### Sort query results
|
||||
|
||||
By default, queries return results in term order (which can then be sorted alphabetically or numerically using the variable's Sort setting).
|
||||
|
||||
To produce a list of terms sorted by document count (a top-N values list), add an `orderBy` property of `doc_count`. This automatically selects a descending sort:
|
||||
|
||||
```json
|
||||
{ "find": "terms", "field": "status", "orderBy": "doc_count" }
|
||||
```
|
||||
|
||||
You can also use the `order` property to explicitly set ascending or descending sort:
|
||||
|
||||
```json
|
||||
{ "find": "terms", "field": "hostname", "orderBy": "doc_count", "order": "asc" }
|
||||
```
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Elasticsearch [discourages](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html#search-aggregations-bucket-terms-aggregation-order) sorting by ascending doc count because it can return inaccurate results.
|
||||
{{< /admonition >}}
|
||||
|
||||
To keep terms in the document count order, set the variable's Sort drop-down to **Disabled**. You can alternatively use other sorting criteria, such as **Alphabetical**, to re-sort them.
|
||||
Queries of `terms` have a 500-result limit by default.
|
||||
To set a custom limit, set the `size` property in your query.
|
||||
|
||||
@@ -1,266 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/elasticsearch/troubleshooting/
|
||||
description: Troubleshooting the Elasticsearch data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- elasticsearch
|
||||
- troubleshooting
|
||||
- errors
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshooting
|
||||
title: Troubleshoot issues with the Elasticsearch data source
|
||||
weight: 600
|
||||
---
|
||||
|
||||
# Troubleshoot issues with the Elasticsearch data source
|
||||
|
||||
This document provides troubleshooting information for common errors you may encounter when using the Elasticsearch data source in Grafana.
|
||||
|
||||
## Connection errors
|
||||
|
||||
The following errors occur when Grafana cannot establish or maintain a connection to Elasticsearch.
|
||||
|
||||
### Failed to connect to Elasticsearch
|
||||
|
||||
**Error message:** "Health check failed: Failed to connect to Elasticsearch"
|
||||
|
||||
**Cause:** Grafana cannot establish a network connection to the Elasticsearch server.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify that the Elasticsearch URL is correct in the data source configuration.
|
||||
1. Check that Elasticsearch is running and accessible from the Grafana server.
|
||||
1. Ensure there are no firewall rules blocking the connection.
|
||||
1. If using a proxy, verify the proxy settings are correct.
|
||||
1. For Grafana Cloud, ensure you have configured [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) if your Elasticsearch instance is not publicly accessible.
|
||||
|
||||
### Request timed out
|
||||
|
||||
**Error message:** "Health check failed: Elasticsearch data source is not healthy. Request timed out"
|
||||
|
||||
**Cause:** The connection to Elasticsearch timed out before receiving a response.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the network latency between Grafana and Elasticsearch.
|
||||
1. Verify that Elasticsearch is not overloaded or experiencing performance issues.
|
||||
1. Increase the timeout setting in the data source configuration if needed.
|
||||
1. Check if any network devices (load balancers, proxies) are timing out the connection.
|
||||
|
||||
### Failed to parse data source URL
|
||||
|
||||
**Error message:** "Failed to parse data source URL"
|
||||
|
||||
**Cause:** The URL entered in the data source configuration is not valid.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the URL format is correct (for example, `http://localhost:9200` or `https://elasticsearch.example.com:9200`).
|
||||
1. Ensure the URL includes the protocol (`http://` or `https://`).
|
||||
1. Remove any trailing slashes or invalid characters from the URL.
|
||||
|
||||
## Authentication errors
|
||||
|
||||
The following errors occur when there are issues with authentication credentials or permissions.
|
||||
|
||||
### Unauthorized (401)
|
||||
|
||||
**Error message:** "Health check failed: Elasticsearch data source is not healthy. Status: 401 Unauthorized"
|
||||
|
||||
**Cause:** The authentication credentials are invalid or missing.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify that the username and password are correct.
|
||||
1. If using an API key, ensure the key is valid and has not expired.
|
||||
1. Check that the authentication method selected matches your Elasticsearch configuration.
|
||||
1. Verify the user has the required permissions to access the Elasticsearch cluster.
|
||||
|
||||
### Forbidden (403)
|
||||
|
||||
**Error message:** "Health check failed: Elasticsearch data source is not healthy. Status: 403 Forbidden"
|
||||
|
||||
**Cause:** The authenticated user does not have permission to access the requested resource.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the user has read access to the specified index.
|
||||
1. Check Elasticsearch security settings and role mappings.
|
||||
1. Ensure the user has permission to access the `_cluster/health` endpoint.
|
||||
1. If using AWS Elasticsearch Service with SigV4 authentication, verify the IAM policy grants the required permissions.
|
||||
|
||||
## Cluster health errors
|
||||
|
||||
The following errors occur when the Elasticsearch cluster is unhealthy or unavailable.
|
||||
|
||||
### Cluster status is red
|
||||
|
||||
**Error message:** "Health check failed: Elasticsearch data source is not healthy"
|
||||
|
||||
**Cause:** The Elasticsearch cluster health status is red, indicating one or more primary shards are not allocated.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the Elasticsearch cluster health using `GET /_cluster/health`.
|
||||
1. Review Elasticsearch logs for errors.
|
||||
1. Verify all nodes in the cluster are running and connected.
|
||||
1. Check for unassigned shards using `GET /_cat/shards?v&h=index,shard,prirep,state,unassigned.reason`.
|
||||
1. Consider increasing the cluster's resources or reducing the number of shards.
|
||||
|
||||
### Bad Gateway (502)
|
||||
|
||||
**Error message:** "Health check failed: Elasticsearch data source is not healthy. Status: 502 Bad Gateway"
|
||||
|
||||
**Cause:** A proxy or load balancer between Grafana and Elasticsearch returned an error.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the health of any proxies or load balancers in the connection path.
|
||||
1. Verify Elasticsearch is running and accepting connections.
|
||||
1. Review proxy/load balancer logs for more details.
|
||||
1. Ensure the proxy timeout is configured appropriately for Elasticsearch requests.
|
||||
|
||||
## Index errors
|
||||
|
||||
The following errors occur when there are issues with the configured index or index pattern.
|
||||
|
||||
### Index not found
|
||||
|
||||
**Error message:** "Error validating index: index_not_found"
|
||||
|
||||
**Cause:** The specified index or index pattern does not match any existing indices.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the index name or pattern in the data source configuration.
|
||||
1. Check that the index exists using `GET /_cat/indices`.
|
||||
1. If using a time-based index pattern (for example, `[logs-]YYYY.MM.DD`), ensure indices exist for the selected time range.
|
||||
1. Verify the user has permission to access the index.
|
||||
|
||||
### Time field not found
|
||||
|
||||
**Error message:** "Could not find time field '@timestamp' with type date in index"
|
||||
|
||||
**Cause:** The specified time field does not exist in the index or is not of type `date`.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the time field name in the data source configuration matches the field in your index.
|
||||
1. Check the field mapping using `GET /<index>/_mapping`.
|
||||
1. Ensure the time field is mapped as a `date` type, not `text` or `keyword`.
|
||||
1. If the field name is different (for example, `timestamp` instead of `@timestamp`), update the data source configuration.
|
||||
|
||||
## Query errors
|
||||
|
||||
The following errors occur when there are issues with query syntax or configuration.
|
||||
|
||||
### Too many buckets
|
||||
|
||||
**Error message:** "Trying to create too many buckets. Must be less than or equal to: [65536]."
|
||||
|
||||
**Cause:** The query is generating more aggregation buckets than Elasticsearch allows.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Increase the date histogram interval (for example, change from `10s` to `1m`).
|
||||
1. Add filters to reduce the number of documents being aggregated.
|
||||
1. Increase the `search.max_buckets` setting in Elasticsearch (requires cluster admin access).
|
||||
|
||||
### Required field missing
|
||||
|
||||
**Error message:** "Required one of fields [field, script], but none were specified."
|
||||
|
||||
**Cause:** A metric aggregation (such as Average, Sum, or Min) was added without specifying a field.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Select a field for the metric aggregation in the query editor.
|
||||
1. Ensure the selected field exists in your index and contains numeric data.
|
||||
|
||||
### Unsupported interval
|
||||
|
||||
**Error message:** "unsupported interval '<interval>'"
|
||||
|
||||
**Cause:** The interval specified for the index pattern is not valid.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Use a supported interval: `Hourly`, `Daily`, `Weekly`, `Monthly`, or `Yearly`.
|
||||
1. If you don't need a time-based index pattern, use `No pattern` and specify the exact index name.
|
||||
|
||||
## Version errors
|
||||
|
||||
The following errors occur when there are Elasticsearch version compatibility issues.
|
||||
|
||||
### Unsupported Elasticsearch version
|
||||
|
||||
**Error message:** "Support for Elasticsearch versions after their end-of-life (currently versions < 7.16) was removed. Using unsupported version of Elasticsearch may lead to unexpected and incorrect results."
|
||||
|
||||
**Cause:** The Elasticsearch version is no longer supported by the Grafana data source.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Upgrade Elasticsearch to a supported version (7.17+, 8.x, or 9.x).
|
||||
1. Refer to [Elastic Product End of Life Dates](https://www.elastic.co/support/eol) for version support information.
|
||||
1. Note that queries may still work, but Grafana does not guarantee functionality for unsupported versions.
|
||||
|
||||
## Other common issues
|
||||
|
||||
The following issues don't produce specific error messages but are commonly encountered.
|
||||
|
||||
### Empty query results
|
||||
|
||||
**Cause:** The query returns no data.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the time range includes data in your index.
|
||||
1. Check the Lucene query syntax for errors.
|
||||
1. Test the query directly in Elasticsearch using the `_search` API.
|
||||
1. Ensure the index contains documents matching your query filters.
|
||||
|
||||
### Slow query performance
|
||||
|
||||
**Cause:** Queries take a long time to execute.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Add more specific filters to limit the data scanned.
|
||||
1. Increase the date histogram interval.
|
||||
1. Check Elasticsearch cluster performance and resource utilization.
|
||||
1. Consider using index aliases or data streams for better query routing.
|
||||
|
||||
### CORS errors in browser console
|
||||
|
||||
**Cause:** Cross-Origin Resource Sharing (CORS) is blocking requests from the browser to Elasticsearch.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Use Server (proxy) access mode instead of Browser access mode in the data source configuration.
|
||||
1. If Browser access is required, configure CORS settings in Elasticsearch:
|
||||
|
||||
```yaml
|
||||
http.cors.enabled: true
|
||||
http.cors.allow-origin: '<your-grafana-url>'
|
||||
http.cors.allow-headers: 'Authorization, Content-Type'
|
||||
http.cors.allow-credentials: true
|
||||
```
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Server (proxy) access mode is recommended for security and reliability.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you continue to experience issues after following this troubleshooting guide:
|
||||
|
||||
1. Check the [Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) for API-specific guidance.
|
||||
1. Review the [Grafana community forums](https://community.grafana.com/) for similar issues.
|
||||
1. Contact Grafana Support if you have an Enterprise license.
|
||||
@@ -52,7 +52,6 @@ The following documents will help you get started with the InfluxDB data source
|
||||
- [Configure the InfluxDB data source](./configure-influxdb-data-source/)
|
||||
- [InfluxDB query editor](./query-editor/)
|
||||
- [InfluxDB templates and variables](./template-variables/)
|
||||
- [Troubleshoot issues with the InfluxDB data source](./troubleshooting/)
|
||||
|
||||
Once you have configured the data source you can:
|
||||
|
||||
|
||||
@@ -1,291 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/influxdb/troubleshooting/
|
||||
description: Troubleshooting the InfluxDB data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- influxdb
|
||||
- troubleshooting
|
||||
- errors
|
||||
- flux
|
||||
- influxql
|
||||
- sql
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshooting
|
||||
title: Troubleshoot issues with the InfluxDB data source
|
||||
weight: 600
|
||||
---
|
||||
|
||||
# Troubleshoot issues with the InfluxDB data source
|
||||
|
||||
This document provides troubleshooting information for common errors you may encounter when using the InfluxDB data source in Grafana.
|
||||
|
||||
## Connection errors
|
||||
|
||||
The following errors occur when Grafana cannot establish or maintain a connection to InfluxDB.
|
||||
|
||||
### Failed to connect to InfluxDB
|
||||
|
||||
**Error message:** "error performing influxQL query" or "error performing flux query" or "error performing sql query"
|
||||
|
||||
**Cause:** Grafana cannot establish a network connection to the InfluxDB server.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify that the InfluxDB URL is correct in the data source configuration.
|
||||
1. Check that InfluxDB is running and accessible from the Grafana server.
|
||||
1. Ensure the URL includes the protocol (`http://` or `https://`).
|
||||
1. Verify the port is correct (the InfluxDB default API port is `8086`).
|
||||
1. Ensure there are no firewall rules blocking the connection.
|
||||
1. For Grafana Cloud, ensure you have configured [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) if your InfluxDB instance is not publicly accessible.
|
||||
|
||||
### Request timed out
|
||||
|
||||
**Error message:** "context deadline exceeded" or "request timeout"
|
||||
|
||||
**Cause:** The connection to InfluxDB timed out before receiving a response.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the network latency between Grafana and InfluxDB.
|
||||
1. Verify that InfluxDB is not overloaded or experiencing performance issues.
|
||||
1. Increase the timeout setting in the data source configuration under **Advanced HTTP Settings**.
|
||||
1. Reduce the time range or complexity of your query.
|
||||
|
||||
## Authentication errors
|
||||
|
||||
The following errors occur when there are issues with authentication credentials or permissions.
|
||||
|
||||
### Unauthorized (401)
|
||||
|
||||
**Error message:** "401 Unauthorized" or "authorization failed"
|
||||
|
||||
**Cause:** The authentication credentials are invalid or missing.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify that the token or password is correct in the data source configuration.
|
||||
1. For Flux and SQL, ensure the token has not expired.
|
||||
1. For InfluxQL with InfluxDB 2.x, verify the token is set as an `Authorization` header with the value `Token <your-token>`.
|
||||
1. For InfluxDB 1.x, verify the username and password are correct.
|
||||
1. Check that the token has the required permissions to access the specified bucket or database.
|
||||
|
||||
### Forbidden (403)
|
||||
|
||||
**Error message:** "403 Forbidden" or "access denied"
|
||||
|
||||
**Cause:** The authenticated user or token does not have permission to access the requested resource.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the token has read access to the specified bucket or database.
|
||||
1. Check the token's permissions in the InfluxDB UI under **API Tokens**.
|
||||
1. Ensure the organization ID is correct for Flux queries.
|
||||
1. For InfluxQL with InfluxDB 2.x, verify the DBRP mapping is configured correctly.
|
||||
|
||||
## Configuration errors
|
||||
|
||||
The following errors occur when the data source is not configured correctly.
|
||||
|
||||
### Unknown influx version
|
||||
|
||||
**Error message:** "unknown influx version"
|
||||
|
||||
**Cause:** The query language is not properly configured in the data source settings.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Open the data source configuration in Grafana.
|
||||
1. Verify that a valid query language is selected: **Flux**, **InfluxQL**, or **SQL**.
|
||||
1. Ensure the selected query language matches your InfluxDB version:
|
||||
- Flux: InfluxDB 1.8+ and 2.x
|
||||
- InfluxQL: InfluxDB 1.x and 2.x (with DBRP mapping)
|
||||
- SQL: InfluxDB 3.x only
|
||||
|
||||
### Invalid data source info received
|
||||
|
||||
**Error message:** "invalid data source info received"
|
||||
|
||||
**Cause:** The data source configuration is incomplete or corrupted.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Delete and recreate the data source.
|
||||
1. Ensure all required fields are populated based on your query language:
|
||||
- **Flux:** URL, Organization, Token, Default Bucket
|
||||
- **InfluxQL:** URL, Database, User, Password
|
||||
- **SQL:** URL, Database, Token
|
||||
|
||||
### DBRP mapping required
|
||||
|
||||
**Error message:** "database not found" or queries return no data with InfluxQL on InfluxDB 2.x
|
||||
|
||||
**Cause:** InfluxQL queries on InfluxDB 2.x require a Database and Retention Policy (DBRP) mapping.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Create a DBRP mapping in InfluxDB using the CLI or API.
|
||||
1. Refer to [Manage DBRP Mappings](https://docs.influxdata.com/influxdb/cloud/query-data/influxql/dbrp/) for guidance.
|
||||
1. Verify the database name in Grafana matches the DBRP mapping.
|
||||
|
||||
## Query errors
|
||||
|
||||
The following errors occur when there are issues with query syntax or execution.
|
||||
|
||||
### Query syntax error
|
||||
|
||||
**Error message:** "error parsing query: found THING" or "failed to parse query: found WERE, expected ; at line 1, char 38"
|
||||
|
||||
**Cause:** The query contains invalid syntax.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check your query syntax for typos or invalid keywords.
|
||||
1. For InfluxQL, verify the query follows the correct syntax:
|
||||
|
||||
```sql
|
||||
SELECT <field> FROM <measurement> WHERE <condition>
|
||||
```
|
||||
|
||||
1. For Flux, ensure proper pipe-forward syntax and function calls.
|
||||
1. Use the InfluxDB UI or CLI to test your query directly.
|
||||
|
||||
### Query timeout limit exceeded
|
||||
|
||||
**Error message:** "query-timeout limit exceeded"
|
||||
|
||||
**Cause:** The query took longer than the configured timeout limit in InfluxDB.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Add more specific filters to limit the data scanned.
|
||||
1. Increase the query timeout setting in InfluxDB if you have admin access.
|
||||
1. Optimize your query to reduce complexity.
|
||||
|
||||
### Too many series or data points
|
||||
|
||||
**Error message:** "max-series-per-database limit exceeded" or "A query returned too many data points and the results have been truncated"
|
||||
|
||||
**Cause:** The query is returning more data than the configured limits allow.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Add filters to limit the number of series returned.
|
||||
1. Increase the **Max series** setting in the data source configuration under **Advanced Database Settings**.
|
||||
1. Use aggregation functions to reduce the number of data points.
|
||||
1. For Flux, use `aggregateWindow()` to downsample data.
|
||||
|
||||
### No time column found
|
||||
|
||||
**Error message:** "no time column found"
|
||||
|
||||
**Cause:** The query result does not include a time column, which is required for time series visualization.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Ensure your query includes a time field.
|
||||
1. For Flux, verify the query includes `_time` in the output.
|
||||
1. For SQL, ensure the query returns a timestamp column.
|
||||
1. Check that the time field is not being filtered out or excluded.
|
||||
|
||||
## Health check errors
|
||||
|
||||
The following errors occur when testing the data source connection.
|
||||
|
||||
### Error getting flux query buckets
|
||||
|
||||
**Error message:** "error getting flux query buckets"
|
||||
|
||||
**Cause:** The health check query `buckets()` failed to return results.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the token has permission to list buckets.
|
||||
1. Check that the organization ID is correct.
|
||||
1. Ensure InfluxDB is running and accessible.
|
||||
|
||||
### Error connecting InfluxDB influxQL
|
||||
|
||||
**Error message:** "error connecting InfluxDB influxQL"
|
||||
|
||||
**Cause:** The health check query `SHOW MEASUREMENTS` failed.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the database name is correct.
|
||||
1. Check that the user has permission to run `SHOW MEASUREMENTS`.
|
||||
1. Ensure the database exists and contains measurements.
|
||||
1. For InfluxDB 2.x, verify DBRP mapping is configured.
|
||||
|
||||
### 0 measurements found
|
||||
|
||||
**Error message:** "data source is working. 0 measurements found"
|
||||
|
||||
**Cause:** The connection is successful, but the database contains no measurements.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify you are connecting to the correct database.
|
||||
1. Check that data has been written to the database.
|
||||
1. If the database is new, add some test data to verify the connection.
|
||||
|
||||
## Other common issues
|
||||
|
||||
The following issues don't produce specific error messages but are commonly encountered.
|
||||
|
||||
### Empty query results
|
||||
|
||||
**Cause:** The query returns no data.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the time range includes data in your database.
|
||||
1. Check that the measurement and field names are correct.
|
||||
1. Test the query directly in the InfluxDB UI or CLI.
|
||||
1. Ensure filters are not excluding all data.
|
||||
1. For InfluxQL, verify the retention policy contains data for the selected time range.
|
||||
|
||||
### Slow query performance
|
||||
|
||||
**Cause:** Queries take a long time to execute.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Add more specific filters to limit the data scanned.
|
||||
1. Increase the **Min time interval** setting to reduce the number of data points.
|
||||
1. Check InfluxDB server performance and resource utilization.
|
||||
1. For Flux, use `aggregateWindow()` to downsample data before visualization.
|
||||
1. Consider using continuous queries or tasks to pre-aggregate data.
|
||||
|
||||
### Data appears delayed or missing recent points
|
||||
|
||||
**Cause:** The visualization doesn't show the most recent data.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the dashboard time range and refresh settings.
|
||||
1. Verify the **Min time interval** is not set too high.
|
||||
1. Ensure InfluxDB has finished writing the data.
|
||||
1. Check for clock synchronization issues between Grafana and InfluxDB.
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you continue to experience issues after following this troubleshooting guide:
|
||||
|
||||
1. Check the [InfluxDB documentation](https://docs.influxdata.com/) for API-specific guidance.
|
||||
1. Review the [Grafana community forums](https://community.grafana.com/) for similar issues.
|
||||
1. Contact Grafana Support if you're an Enterprise, Cloud Pro or Cloud Contracted user.
|
||||
1. When reporting issues, include:
|
||||
- Grafana version
|
||||
- InfluxDB version and product (OSS, Cloud, Enterprise)
|
||||
- Query language (Flux, InfluxQL, or SQL)
|
||||
- Error messages (redact sensitive information)
|
||||
- Steps to reproduce
|
||||
- Relevant configuration such as data source settings, HTTP method, and TLS settings (redact tokens, passwords, and other credentials)
|
||||
@@ -60,7 +60,6 @@ The following documents will help you get started with the PostgreSQL data sourc
|
||||
|
||||
- [Configure the PostgreSQL data source](ref:configure-postgres-data-source)
|
||||
- [PostgreSQL query editor](ref:postgres-query-editor)
|
||||
- [Troubleshooting](troubleshooting/)
|
||||
|
||||
After you have configured the data source you can:
|
||||
|
||||
|
||||
@@ -1,380 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/postgres/troubleshooting/
|
||||
description: Troubleshooting the PostgreSQL data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- postgresql
|
||||
- troubleshooting
|
||||
- errors
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshooting
|
||||
title: Troubleshoot PostgreSQL data source issues
|
||||
weight: 600
|
||||
---
|
||||
|
||||
# Troubleshoot PostgreSQL data source issues
|
||||
|
||||
This document provides troubleshooting information for common errors you may encounter when using the PostgreSQL data source in Grafana.
|
||||
|
||||
## Connection errors
|
||||
|
||||
The following errors occur when Grafana cannot establish or maintain a connection to PostgreSQL.
|
||||
|
||||
### Failed to connect to PostgreSQL
|
||||
|
||||
**Error message:** `failed to connect to ... : connect: connection refused` or `dial tcp: connect: connection refused`
|
||||
|
||||
**Cause:** Grafana cannot establish a network connection to the PostgreSQL server.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify that the Host URL is correct in the data source configuration.
|
||||
1. Check that PostgreSQL is running and accessible from the Grafana server.
|
||||
1. Verify the port is correct (the PostgreSQL default port is `5432`).
|
||||
1. Ensure there are no firewall rules blocking the connection.
|
||||
1. Check that PostgreSQL is configured to accept connections from the Grafana server in `pg_hba.conf`.
|
||||
1. For Grafana Cloud, ensure you have configured [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) if your PostgreSQL instance is not publicly accessible.
|
||||
|
||||
### Request timed out
|
||||
|
||||
**Error message:** "context deadline exceeded" or "i/o timeout"
|
||||
|
||||
**Cause:** The connection to PostgreSQL timed out before receiving a response.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the network latency between Grafana and PostgreSQL.
|
||||
1. Verify that PostgreSQL is not overloaded or experiencing performance issues.
|
||||
1. Increase the **Max lifetime** setting in the data source configuration under **Connection limits**.
|
||||
1. Reduce the time range or complexity of your query.
|
||||
1. Check if any network devices (load balancers, proxies) are timing out the connection.
|
||||
|
||||
### Host not found
|
||||
|
||||
**Error message:** `failed to connect to ... : hostname resolving error` or `lookup hostname: no such host`
|
||||
|
||||
**Cause:** The hostname specified in the data source configuration cannot be resolved.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the hostname is spelled correctly.
|
||||
1. Check that DNS resolution is working on the Grafana server.
|
||||
1. Try using an IP address instead of a hostname.
|
||||
1. Ensure the PostgreSQL server is accessible from the Grafana server's network.
|
||||
|
||||
## Authentication errors
|
||||
|
||||
The following errors occur when there are issues with authentication credentials or permissions.
|
||||
|
||||
### Password authentication failed
|
||||
|
||||
**Error message:** `failed to connect to ... : server error: FATAL: password authentication failed for user "username" (SQLSTATE 28P01)`
|
||||
|
||||
**Cause:** The username or password is incorrect.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify that the username and password are correct in the data source configuration.
|
||||
1. Check that the user exists in PostgreSQL.
|
||||
1. Verify the password has not expired.
|
||||
1. If no password is specified, ensure a [PostgreSQL password file](https://www.postgresql.org/docs/current/static/libpq-pgpass.html) is configured.
|
||||
|
||||
### Permission denied
|
||||
|
||||
**Error message:** `ERROR: permission denied for table table_name (SQLSTATE 42501)` or `ERROR: permission denied for schema schema_name (SQLSTATE 42501)`
|
||||
|
||||
**Cause:** The database user does not have permission to access the requested table or schema.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the user has `SELECT` permissions on the required tables.
|
||||
1. Grant the necessary permissions:
|
||||
|
||||
```sql
|
||||
GRANT USAGE ON SCHEMA schema_name TO grafanareader;
|
||||
GRANT SELECT ON schema_name.table_name TO grafanareader;
|
||||
```
|
||||
|
||||
1. Check that the user has access to the correct database.
|
||||
1. Verify the search path includes the schema containing your tables.
|
||||
|
||||
### No pg_hba.conf entry
|
||||
|
||||
**Error message:** `failed to connect to ... : server error: FATAL: no pg_hba.conf entry for host "ip_address", user "username", database "database_name" (SQLSTATE 28000)`
|
||||
|
||||
**Cause:** PostgreSQL is not configured to accept connections from the Grafana server.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Edit the `pg_hba.conf` file on the PostgreSQL server.
|
||||
1. Add an entry to allow connections from the Grafana server:
|
||||
|
||||
```text
|
||||
host database_name username grafana_ip/32 md5
|
||||
```
|
||||
|
||||
1. Reload PostgreSQL configuration: `SELECT pg_reload_conf();`
|
||||
1. If using SSL, ensure the correct authentication method is specified (for example, `hostssl` instead of `host`).
|
||||
|
||||
## TLS and certificate errors
|
||||
|
||||
The following errors occur when there are issues with TLS configuration.
|
||||
|
||||
### Certificate verification failed
|
||||
|
||||
**Error message:** "x509: certificate signed by unknown authority" or "certificate verify failed"
|
||||
|
||||
**Cause:** Grafana cannot verify the TLS certificate presented by PostgreSQL.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Set the **TLS/SSL Mode** to the appropriate level (`require`, `verify-ca`, or `verify-full`).
|
||||
1. If using a self-signed certificate, add the CA certificate in **TLS/SSL Auth Details**.
|
||||
1. Verify the certificate chain is complete and valid.
|
||||
1. Ensure the certificate has not expired.
|
||||
1. For testing only, set **TLS/SSL Mode** to `disable` (not recommended for production).
|
||||
|
||||
### SSL not supported
|
||||
|
||||
**Error message:** `failed to connect to ... : server refused TLS connection` or `server does not support SSL`
|
||||
|
||||
**Cause:** The PostgreSQL server is not configured for SSL connections, but the data source requires SSL.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Set **TLS/SSL Mode** to `disable` if SSL is not required.
|
||||
1. Alternatively, enable SSL on the PostgreSQL server by configuring `ssl = on` in `postgresql.conf`.
|
||||
1. Ensure the server has valid SSL certificates configured.
|
||||
|
||||
### Client certificate error
|
||||
|
||||
**Error message:** "TLS: failed to find any PEM data in certificate input" or "could not load client certificate"
|
||||
|
||||
**Cause:** The client certificate or key is invalid or incorrectly formatted.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the certificate and key are in PEM format.
|
||||
1. Ensure the certificate file path is correct and readable by the Grafana process.
|
||||
1. Check that the certificate and key match (belong to the same key pair).
|
||||
1. If using certificate content, ensure you've pasted the complete certificate including headers.
|
||||
|
||||
## Database errors
|
||||
|
||||
The following errors occur when there are issues with the database configuration.
|
||||
|
||||
### Database does not exist
|
||||
|
||||
**Error message:** `failed to connect to ... : server error: FATAL: database "database_name" does not exist (SQLSTATE 3D000)`
|
||||
|
||||
**Cause:** The specified database name is incorrect or the database doesn't exist.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the database name in the data source configuration.
|
||||
1. Check that the database exists: `\l` in psql or `SELECT datname FROM pg_database;`
|
||||
1. Ensure the database name is case-sensitive and matches exactly.
|
||||
1. Verify the user has permission to connect to the database.
|
||||
|
||||
### Relation does not exist
|
||||
|
||||
**Error message:** `ERROR: relation "table_name" does not exist (SQLSTATE 42P01)`
|
||||
|
||||
**Cause:** The specified table or view does not exist, or the user cannot access it.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the table name is correct and exists in the database.
|
||||
1. Check the schema name if the table is not in the public schema.
|
||||
1. Use fully qualified names: `schema_name.table_name`.
|
||||
1. Verify the user has `SELECT` permission on the table.
|
||||
1. Check the search path: `SHOW search_path;`
|
||||
|
||||
## Query errors
|
||||
|
||||
The following errors occur when there are issues with SQL syntax or query execution.
|
||||
|
||||
### Query syntax error
|
||||
|
||||
**Error message:** `ERROR: syntax error at or near "keyword" (SQLSTATE 42601)`
|
||||
|
||||
**Cause:** The SQL query contains invalid syntax.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check your query syntax for typos or invalid keywords.
|
||||
1. Verify column and table names are correctly quoted if they contain special characters or are reserved words.
|
||||
1. Use double quotes for identifiers: `"column_name"`.
|
||||
1. Test the query directly in a PostgreSQL client (psql, pgAdmin).
|
||||
|
||||
### Column does not exist
|
||||
|
||||
**Error message:** `ERROR: column "column_name" does not exist (SQLSTATE 42703)`
|
||||
|
||||
**Cause:** The specified column name is incorrect or doesn't exist in the table.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the column name is spelled correctly.
|
||||
1. Check that column names are case-sensitive in PostgreSQL when quoted.
|
||||
1. Use the correct quoting for column names: `"Column_Name"` for case-sensitive names.
|
||||
1. Verify the column exists in the table: `\d table_name` in psql.
|
||||
|
||||
### No time column found
|
||||
|
||||
**Error message:** "no time column found" or time series visualization shows no data
|
||||
|
||||
**Cause:** The query result does not include a properly formatted time column.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Ensure your query includes a column named `time` that returns a timestamp or epoch value.
|
||||
1. Use an alias to rename your time column: `SELECT created_at AS time`.
|
||||
1. Ensure the time column is of type `timestamp`, `timestamptz`, or a numeric epoch value.
|
||||
1. Order results by the time column: `ORDER BY time ASC`.
|
||||
|
||||
### Macro expansion error
|
||||
|
||||
**Error message:** "macro '$\_\_timeFilter' not found" or incorrect query results with macros
|
||||
|
||||
**Cause:** Grafana macros are not being properly expanded.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the macro syntax is correct, for example `$__timeFilter(time_column)`.
|
||||
1. Ensure the column name passed to the macro exists in your table.
|
||||
1. Use the **Preview** toggle in Builder mode to see the expanded query.
|
||||
1. For time-based macros, ensure the column contains timestamp data.
|
||||
|
||||
## Performance issues
|
||||
|
||||
The following issues relate to slow query execution or resource constraints.
|
||||
|
||||
### Query timeout
|
||||
|
||||
**Error message:** "canceling statement due to statement timeout" or "query timeout"
|
||||
|
||||
**Cause:** The query took longer than the configured timeout.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Add indexes to columns used in WHERE clauses and joins.
|
||||
1. Use the `$__timeFilter` macro to limit data to the dashboard time range.
|
||||
1. Increase the statement timeout in PostgreSQL if you have admin access.
|
||||
1. Optimize your query to reduce complexity.
|
||||
|
||||
### Too many connections
|
||||
|
||||
**Error message:** `failed to connect to ... : server error: FATAL: too many connections for role "username" (SQLSTATE 53300)` or `connection pool exhausted`
|
||||
|
||||
**Cause:** The maximum number of connections to PostgreSQL has been reached.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the **Max open** connections setting in the data source configuration.
|
||||
1. Increase `max_connections` in PostgreSQL's `postgresql.conf` if you have admin access.
|
||||
1. Check for connection leaks in other applications connecting to the same database.
|
||||
1. Enable **Auto max idle** to automatically manage idle connections.
|
||||
|
||||
### Slow query performance
|
||||
|
||||
**Cause:** Queries take a long time to execute.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Reduce the time range of your query.
|
||||
1. Add appropriate indexes to your tables.
|
||||
1. Use the `$__timeFilter` macro to limit the data scanned.
|
||||
1. Increase the **Min time interval** setting to reduce the number of data points.
|
||||
1. Use `EXPLAIN ANALYZE` in PostgreSQL to identify query bottlenecks.
|
||||
1. Consider using materialized views for complex aggregations.
|
||||
|
||||
## Provisioning errors
|
||||
|
||||
The following errors occur when provisioning the data source via YAML.
|
||||
|
||||
### Invalid provisioning configuration
|
||||
|
||||
**Error message:** "metric request error" or data source test fails after provisioning
|
||||
|
||||
**Cause:** The provisioning YAML file contains incorrect configuration.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Ensure parameter names match the expected format exactly.
|
||||
1. Verify the database name is **not** included in the URL.
|
||||
1. Use the correct format for the URL: `hostname:port`.
|
||||
1. Check that string values are properly quoted in the YAML file.
|
||||
1. Refer to the [provisioning example](../configure/#provision-the-data-source) for the correct format.
|
||||
|
||||
Example correct configuration:
|
||||
|
||||
```yaml
|
||||
datasources:
|
||||
- name: Postgres
|
||||
type: postgres
|
||||
url: localhost:5432
|
||||
user: grafana
|
||||
secureJsonData:
|
||||
password: 'Password!'
|
||||
jsonData:
|
||||
database: grafana
|
||||
sslmode: 'disable'
|
||||
```
|
||||
|
||||
## Other common issues
|
||||
|
||||
The following issues don't produce specific error messages but are commonly encountered.
|
||||
|
||||
### Empty query results
|
||||
|
||||
**Cause:** The query returns no data.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the time range includes data in your database.
|
||||
1. Check that table and column names are correct.
|
||||
1. Test the query directly in PostgreSQL.
|
||||
1. Ensure filters are not excluding all data.
|
||||
1. Verify the `$__timeFilter` macro is using the correct time column.
|
||||
|
||||
### TimescaleDB functions not available
|
||||
|
||||
**Cause:** TimescaleDB-specific functions like `time_bucket` are not available in the query builder.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Enable the **TimescaleDB** toggle in the data source configuration under **PostgreSQL Options**.
|
||||
1. Verify TimescaleDB is installed and enabled in your PostgreSQL database.
|
||||
1. Check that the `timescaledb` extension is created: `CREATE EXTENSION IF NOT EXISTS timescaledb;`
|
||||
|
||||
### Data appears delayed or missing recent points
|
||||
|
||||
**Cause:** The visualization doesn't show the most recent data.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the dashboard time range and refresh settings.
|
||||
1. Verify the **Min time interval** is not set too high.
|
||||
1. Ensure data has been committed to the database (not in an uncommitted transaction).
|
||||
1. Check for clock synchronization issues between Grafana and PostgreSQL.
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you continue to experience issues after following this troubleshooting guide:
|
||||
|
||||
1. Check the [PostgreSQL documentation](https://www.postgresql.org/docs/) for database-specific guidance.
|
||||
1. Review the [Grafana community forums](https://community.grafana.com/) for similar issues.
|
||||
1. Contact Grafana Support if you are a Cloud Pro, Cloud Contracted, or Enterprise user.
|
||||
1. When reporting issues, include:
|
||||
- Grafana version
|
||||
- PostgreSQL version
|
||||
- Error messages (redact sensitive information)
|
||||
- Steps to reproduce
|
||||
- Relevant configuration such as data source settings, TLS mode, and connection limits (redact passwords and other credentials)
|
||||
@@ -83,11 +83,6 @@ This topic lists words and abbreviations that are commonly used in the Grafana d
|
||||
A commonly-used visualization that displays data as points, lines, or bars.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td style="vertical-align: top"><code>grafanactl</code></td>
|
||||
<td>
|
||||
A command-line tool that enables users to authenticate, manage multiple environments, and perform administrative tasks through Grafana's REST API.
|
||||
</tr>
|
||||
<tr>
|
||||
<td style="vertical-align: top">mixin</td>
|
||||
<td>
|
||||
|
||||
@@ -99,7 +99,6 @@ Add links to other dashboards at the top of your current dashboard.
|
||||
- **Include current time range** – Select this option to include the dashboard time range in the link. When the user clicks the link, the linked dashboard opens with the indicated time range already set. **Example:** https://play.grafana.org/d/000000010/annotations?orgId=1&from=now-3h&to=now
|
||||
- **Include current template variable values** – Select this option to include template variables currently used as query parameters in the link. When the user clicks the link, any matching templates in the linked dashboard are set to the values from the link. For more information, see [Dashboard URL variables](ref:dashboard-url-variables).
|
||||
- **Open link in new tab** – Select this option if you want the dashboard link to open in a new tab or window.
|
||||
- **Show in controls menu** – Select this option to display the link in the dashboard controls menu instead of at the top of the dashboard. The dashboard controls menu appears as a button in the dashboard toolbar.
|
||||
|
||||
1. Click **Save dashboard** in the top-right corner.
|
||||
1. Click **Back to dashboard** and then **Exit edit**.
|
||||
@@ -122,7 +121,6 @@ Add a link to a URL at the top of your current dashboard. You can link to any av
|
||||
- **Include current time range** – Select this option to include the dashboard time range in the link. When the user clicks the link, the linked dashboard opens with the indicated time range already set. **Example:** https://play.grafana.org/d/000000010/annotations?orgId=1&from=now-3h&to=now
|
||||
- **Include current template variable values** – Select this option to include template variables currently used as query parameters in the link. When the user clicks the link, any matching templates in the linked dashboard are set to the values from the link.
|
||||
- **Open link in new tab** – Select this option if you want the dashboard link to open in a new tab or window.
|
||||
- **Show in controls menu** – Select this option to display the link in the dashboard controls menu instead of at the top of the dashboard. The dashboard controls menu appears as a button in the dashboard toolbar.
|
||||
|
||||
1. Click **Save dashboard** in the top-right corner.
|
||||
1. Click **Back to dashboard** and then **Exit edit**.
|
||||
|
||||
@@ -123,11 +123,10 @@ To create a variable, follow these steps:
|
||||
|
||||
If you don't enter a display name, then the drop-down list label is the variable name.
|
||||
|
||||
1. Choose a **Display** option:
|
||||
- **Above dashboard** - The variable drop-down list displays above the dashboard with the variable **Name** or **Label** value. This is the default.
|
||||
- **Above dashboard, label hidden** - The variable drop-down list displays above the dashboard, but without showing the name of the variable.
|
||||
- **Controls menu** - The variable is displayed in the dashboard controls menu instead of above the dashboard. The dashboard controls menu appears as a button in the dashboard toolbar.
|
||||
- **Hidden** - No variable drop-down list is displayed on the dashboard.
|
||||
1. Choose a **Show on dashboard** option:
|
||||
- **Label and value** - The variable drop-down list displays the variable **Name** or **Label** value. This is the default.
|
||||
- **Value:** The variable drop-down list only displays the selected variable value and a down arrow.
|
||||
- **Nothing:** No variable drop-down list is displayed on the dashboard.
|
||||
|
||||
1. Click one of the following links to complete the steps for adding your selected variable type:
|
||||
- [Query](#add-a-query-variable)
|
||||
|
||||
@@ -12,13 +12,12 @@ comments: |
|
||||
To build this Markdown, do the following:
|
||||
|
||||
$ cd /docs (from the root of the repository)
|
||||
$ make sources/visualizations/panels-visualizations/query-transform-data/transform-data/index.md
|
||||
$ make sources/panels-visualizations/query-transform-data/transform-data/index.md
|
||||
$ make docs
|
||||
|
||||
Browse to http://localhost:3003/docs/grafana/latest/panels-visualizations/query-transform-data/transform-data/
|
||||
|
||||
Refer to ./docs/README.md "Content guidelines" for more information about editing and building these docs.
|
||||
|
||||
aliases:
|
||||
- ../../../panels/transform-data/ # /docs/grafana/next/panels/transform-data/
|
||||
- ../../../panels/transform-data/about-transformation/ # /docs/grafana/next/panels/transform-data/about-transformation/
|
||||
|
||||
@@ -419,9 +419,6 @@ test.describe(
|
||||
// Select tabs layout
|
||||
await page.getByLabel('layout-selection-option-Tabs').click();
|
||||
|
||||
// confirm layout change
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.ConfirmModal.delete).click();
|
||||
|
||||
await expect(dashboardPage.getByGrafanaSelector(selectors.components.Tab.title('New row'))).toBeVisible();
|
||||
await expect(dashboardPage.getByGrafanaSelector(selectors.components.Tab.title('New row 1'))).toBeVisible();
|
||||
await expect(
|
||||
@@ -760,9 +757,6 @@ test.describe(
|
||||
// Select rows layout
|
||||
await page.getByLabel('layout-selection-option-Rows').click();
|
||||
|
||||
// confirm layout change
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.ConfirmModal.delete).click();
|
||||
|
||||
await dashboardPage
|
||||
.getByGrafanaSelector(selectors.components.DashboardRow.wrapper('New tab 1'))
|
||||
.scrollIntoViewIfNeeded();
|
||||
|
||||
@@ -4,8 +4,6 @@ import { test, expect, E2ESelectorGroups, DashboardPage } from '@grafana/plugin-
|
||||
|
||||
import testV2Dashboard from '../dashboards/TestV2Dashboard.json';
|
||||
|
||||
import { switchToAutoGrid } from './utils';
|
||||
|
||||
test.use({
|
||||
featureToggles: {
|
||||
kubernetesDashboards: true,
|
||||
@@ -35,8 +33,7 @@ test.describe(
|
||||
).toHaveCount(3);
|
||||
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await page.getByLabel('layout-selection-option-Auto grid').click();
|
||||
|
||||
await expect(
|
||||
dashboardPage.getByGrafanaSelector(selectors.components.Panels.Panel.title('New panel'))
|
||||
@@ -67,8 +64,7 @@ test.describe(
|
||||
).toHaveCount(3);
|
||||
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await page.getByLabel('layout-selection-option-Auto grid').click();
|
||||
|
||||
// Get initial positions - standard width should have panels on different rows
|
||||
const firstPanelTop = await getPanelTop(dashboardPage, selectors);
|
||||
@@ -128,8 +124,7 @@ test.describe(
|
||||
).toHaveCount(3);
|
||||
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await page.getByLabel('layout-selection-option-Auto grid').click();
|
||||
|
||||
await dashboardPage
|
||||
.getByGrafanaSelector(selectors.components.PanelEditor.ElementEditPane.AutoGridLayout.minColumnWidth)
|
||||
@@ -186,8 +181,7 @@ test.describe(
|
||||
).toHaveCount(3);
|
||||
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await page.getByLabel('layout-selection-option-Auto grid').click();
|
||||
|
||||
await dashboardPage
|
||||
.getByGrafanaSelector(selectors.components.PanelEditor.ElementEditPane.AutoGridLayout.maxColumns)
|
||||
@@ -222,8 +216,7 @@ test.describe(
|
||||
).toHaveCount(3);
|
||||
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await page.getByLabel('layout-selection-option-Auto grid').click();
|
||||
|
||||
const regularRowHeight = await getPanelHeight(dashboardPage, selectors);
|
||||
|
||||
@@ -278,8 +271,7 @@ test.describe(
|
||||
).toHaveCount(3);
|
||||
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await page.getByLabel('layout-selection-option-Auto grid').click();
|
||||
|
||||
const regularRowHeight = await getPanelHeight(dashboardPage, selectors);
|
||||
|
||||
@@ -336,8 +328,7 @@ test.describe(
|
||||
).toHaveCount(3);
|
||||
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await page.getByLabel('layout-selection-option-Auto grid').click();
|
||||
|
||||
// Set narrow column width first to ensure panels fit horizontally
|
||||
await dashboardPage
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { Page } from 'playwright-core';
|
||||
|
||||
import { test, expect, DashboardPage } from '@grafana/plugin-e2e';
|
||||
import { test, expect } from '@grafana/plugin-e2e';
|
||||
|
||||
import testV2DashWithRepeats from '../dashboards/V2DashWithRepeats.json';
|
||||
|
||||
@@ -12,7 +12,6 @@ import {
|
||||
getPanelPosition,
|
||||
importTestDashboard,
|
||||
goToEmbeddedPanel,
|
||||
switchToAutoGrid,
|
||||
} from './utils';
|
||||
|
||||
const repeatTitleBase = 'repeat - ';
|
||||
@@ -43,7 +42,7 @@ test.describe(
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await switchToAutoGrid(page);
|
||||
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.Panels.Panel.title('New panel')).first().click();
|
||||
|
||||
@@ -79,8 +78,7 @@ test.describe(
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
|
||||
await switchToAutoGrid(page);
|
||||
await saveDashboard(dashboardPage, page, selectors);
|
||||
await page.reload();
|
||||
|
||||
@@ -119,7 +117,7 @@ test.describe(
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await switchToAutoGrid(page);
|
||||
|
||||
// select first/original repeat panel to activate edit pane
|
||||
await dashboardPage
|
||||
@@ -150,7 +148,7 @@ test.describe(
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await switchToAutoGrid(page);
|
||||
await saveDashboard(dashboardPage, page, selectors);
|
||||
await page.reload();
|
||||
|
||||
@@ -216,7 +214,7 @@ test.describe(
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await switchToAutoGrid(page);
|
||||
await saveDashboard(dashboardPage, page, selectors);
|
||||
|
||||
// loading directly into panel editor
|
||||
@@ -273,7 +271,7 @@ test.describe(
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await switchToAutoGrid(page);
|
||||
|
||||
// this moving repeated panel between two normal panels
|
||||
await movePanel(dashboardPage, selectors, `${repeatTitleBase}${repeatOptions.at(0)}`, 'New panel');
|
||||
@@ -321,7 +319,7 @@ test.describe(
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await switchToAutoGrid(page);
|
||||
await saveDashboard(dashboardPage, page, selectors);
|
||||
await page.reload();
|
||||
|
||||
@@ -384,7 +382,7 @@ test.describe(
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await switchToAutoGrid(page);
|
||||
await saveDashboard(dashboardPage, page, selectors);
|
||||
await page.reload();
|
||||
|
||||
@@ -412,7 +410,7 @@ test.describe(
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.Sidebar.optionsButton).click();
|
||||
|
||||
await switchToAutoGrid(page, dashboardPage);
|
||||
await switchToAutoGrid(page);
|
||||
await saveDashboard(dashboardPage, page, selectors);
|
||||
await page.reload();
|
||||
|
||||
@@ -464,3 +462,7 @@ test.describe(
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
async function switchToAutoGrid(page: Page) {
|
||||
await page.getByLabel('layout-selection-option-Auto grid').click();
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import { Page } from '@playwright/test';
|
||||
|
||||
import { selectors } from '@grafana/e2e-selectors';
|
||||
import { DashboardPage, E2ESelectorGroups, expect } from '@grafana/plugin-e2e';
|
||||
|
||||
import testV2Dashboard from '../dashboards/TestV2Dashboard.json';
|
||||
@@ -240,12 +239,3 @@ export async function getTabPosition(dashboardPage: DashboardPage, selectors: E2
|
||||
const boundingBox = await tab.boundingBox();
|
||||
return boundingBox;
|
||||
}
|
||||
|
||||
export async function switchToAutoGrid(page: Page, dashboardPage: DashboardPage) {
|
||||
await page.getByLabel('layout-selection-option-Auto grid').click();
|
||||
// confirm layout change if applicable
|
||||
const confirmModal = dashboardPage.getByGrafanaSelector(selectors.pages.ConfirmModal.delete);
|
||||
if (confirmModal) {
|
||||
await confirmModal.click();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2882,6 +2882,11 @@
|
||||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/features/panel/components/VizTypePicker/PanelTypeCard.tsx": {
|
||||
"@grafana/no-aria-label-selectors": {
|
||||
"count": 1
|
||||
}
|
||||
},
|
||||
"public/app/features/panel/panellinks/linkSuppliers.ts": {
|
||||
"@typescript-eslint/no-explicit-any": {
|
||||
"count": 1
|
||||
|
||||
@@ -289,7 +289,7 @@
|
||||
"@grafana/google-sdk": "0.3.5",
|
||||
"@grafana/i18n": "workspace:*",
|
||||
"@grafana/lezer-logql": "0.2.9",
|
||||
"@grafana/llm": "1.0.1",
|
||||
"@grafana/llm": "0.22.1",
|
||||
"@grafana/monaco-logql": "^0.0.8",
|
||||
"@grafana/o11y-ds-frontend": "workspace:*",
|
||||
"@grafana/plugin-ui": "^0.11.1",
|
||||
@@ -459,8 +459,7 @@
|
||||
"gitconfiglocal": "2.1.0",
|
||||
"tmp@npm:^0.0.33": "~0.2.1",
|
||||
"js-yaml@npm:4.1.0": "^4.1.0",
|
||||
"js-yaml@npm:=4.1.0": "^4.1.0",
|
||||
"nodemailer": "7.0.7"
|
||||
"js-yaml@npm:=4.1.0": "^4.1.0"
|
||||
},
|
||||
"workspaces": {
|
||||
"packages": [
|
||||
|
||||
@@ -165,19 +165,6 @@ const injectedRtkApi = api
|
||||
}),
|
||||
providesTags: ['Search'],
|
||||
}),
|
||||
getSearchUsers: build.query<GetSearchUsersApiResponse, GetSearchUsersApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/searchUsers`,
|
||||
params: {
|
||||
query: queryArg.query,
|
||||
limit: queryArg.limit,
|
||||
page: queryArg.page,
|
||||
offset: queryArg.offset,
|
||||
sort: queryArg.sort,
|
||||
},
|
||||
}),
|
||||
providesTags: ['Search'],
|
||||
}),
|
||||
listServiceAccount: build.query<ListServiceAccountApiResponse, ListServiceAccountApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/serviceaccounts`,
|
||||
@@ -909,18 +896,6 @@ export type GetSearchTeamsApiArg = {
|
||||
/** page number to start from */
|
||||
page?: number;
|
||||
};
|
||||
export type GetSearchUsersApiResponse = unknown;
|
||||
export type GetSearchUsersApiArg = {
|
||||
query?: string;
|
||||
/** number of results to return */
|
||||
limit?: number;
|
||||
/** page number (starting from 1) */
|
||||
page?: number;
|
||||
/** number of results to skip */
|
||||
offset?: number;
|
||||
/** sortable field */
|
||||
sort?: string;
|
||||
};
|
||||
export type ListServiceAccountApiResponse = /** status 200 OK */ ServiceAccountList;
|
||||
export type ListServiceAccountApiArg = {
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
@@ -2092,9 +2067,6 @@ export type UserSpec = {
|
||||
role: string;
|
||||
title: string;
|
||||
};
|
||||
export type UserStatus = {
|
||||
lastSeenAt: number;
|
||||
};
|
||||
export type User = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
apiVersion?: string;
|
||||
@@ -2103,7 +2075,6 @@ export type User = {
|
||||
metadata: ObjectMeta;
|
||||
/** Spec is the spec of the User */
|
||||
spec: UserSpec;
|
||||
status: UserStatus;
|
||||
};
|
||||
export type UserList = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
@@ -2149,8 +2120,6 @@ export const {
|
||||
useUpdateExternalGroupMappingMutation,
|
||||
useGetSearchTeamsQuery,
|
||||
useLazyGetSearchTeamsQuery,
|
||||
useGetSearchUsersQuery,
|
||||
useLazyGetSearchUsersQuery,
|
||||
useListServiceAccountQuery,
|
||||
useLazyListServiceAccountQuery,
|
||||
useCreateServiceAccountMutation,
|
||||
|
||||
@@ -1138,7 +1138,7 @@ export type JobResourceSummary = {
|
||||
delete?: number;
|
||||
/** Create or update (export) */
|
||||
error?: number;
|
||||
/** Report errors/warnings for this resource type This may not be an exhaustive list and recommend looking at the logs for more info */
|
||||
/** Report errors for this resource type This may not be an exhaustive list and recommend looking at the logs for more info */
|
||||
errors?: string[];
|
||||
group?: string;
|
||||
kind?: string;
|
||||
@@ -1146,9 +1146,6 @@ export type JobResourceSummary = {
|
||||
noop?: number;
|
||||
total?: number;
|
||||
update?: number;
|
||||
/** The error count */
|
||||
warning?: number;
|
||||
warnings?: string[];
|
||||
write?: number;
|
||||
};
|
||||
export type RepositoryUrLs = {
|
||||
@@ -1179,7 +1176,6 @@ export type JobStatus = {
|
||||
summary?: JobResourceSummary[];
|
||||
/** URLs contains URLs for the reference branch or commit if applicable. */
|
||||
url?: RepositoryUrLs;
|
||||
warnings?: string[];
|
||||
};
|
||||
export type Job = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
// (a <button> with clear text, for example, does not need an aria-label as it's already labeled)
|
||||
// but you still might need to select it for testing,
|
||||
// in that case please add the attribute data-testid={selector} in the component and
|
||||
// prefix your selector string with 'data-testid' so that when we create the selectors we know to search for it on the right attribute
|
||||
// prefix your selector string with 'data-testid' so that when create the selectors we know to search for it on the right attribute
|
||||
|
||||
import { VersionedSelectorGroup } from '../types';
|
||||
|
||||
@@ -1057,7 +1057,6 @@ export const versionedComponents = {
|
||||
},
|
||||
PluginVisualization: {
|
||||
item: {
|
||||
'12.4.0': (title: string) => `data-testid Plugin visualization item ${title}`,
|
||||
[MIN_GRAFANA_VERSION]: (title: string) => `Plugin visualization item ${title}`,
|
||||
},
|
||||
current: {
|
||||
|
||||
@@ -17,10 +17,6 @@ export interface Options {
|
||||
* Controls the height of the rows
|
||||
*/
|
||||
cellHeight?: ui.TableCellHeight;
|
||||
/**
|
||||
* If true, disables all keyboard events in the table. this is used when previewing a table (i.e. suggestions)
|
||||
*/
|
||||
disableKeyboardEvents?: boolean;
|
||||
/**
|
||||
* Enable pagination on the table
|
||||
*/
|
||||
|
||||
@@ -13,7 +13,6 @@ import * as common from '@grafana/schema';
|
||||
export const pluginVersion = "12.4.0-pre";
|
||||
|
||||
export interface Options extends common.OptionsWithTimezones, common.OptionsWithAnnotations {
|
||||
disableKeyboardEvents?: boolean;
|
||||
legend: common.VizLegendOptions;
|
||||
orientation?: common.VizOrientation;
|
||||
timeCompare?: common.TimeCompareOptions;
|
||||
|
||||
@@ -106,11 +106,6 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
const gaugeId = useId();
|
||||
const styles = useStyles2(getStyles);
|
||||
|
||||
let effectiveTextMode = textMode;
|
||||
if (effectiveTextMode === 'auto') {
|
||||
effectiveTextMode = vizCount === 1 ? 'value' : 'value_and_name';
|
||||
}
|
||||
|
||||
const startAngle = shape === 'gauge' ? 250 : 0;
|
||||
const endAngle = shape === 'gauge' ? 110 : 360;
|
||||
|
||||
@@ -193,7 +188,7 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
// These elements are only added for first value / bar
|
||||
if (barIndex === 0) {
|
||||
if (glowBar) {
|
||||
defs.push(<GlowGradient key="glow-filter" id={glowFilterId} barWidth={dimensions.barWidth} />);
|
||||
defs.push(<GlowGradient key="glow-filter" id={glowFilterId} radius={dimensions.radius} />);
|
||||
}
|
||||
|
||||
if (glowCenter) {
|
||||
@@ -203,14 +198,14 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
graphics.push(
|
||||
<RadialText
|
||||
key="radial-text"
|
||||
textMode={effectiveTextMode}
|
||||
vizCount={vizCount}
|
||||
textMode={textMode}
|
||||
displayValue={displayValue.display}
|
||||
dimensions={dimensions}
|
||||
theme={theme}
|
||||
valueManualFontSize={props.valueManualFontSize}
|
||||
nameManualFontSize={props.nameManualFontSize}
|
||||
shape={shape}
|
||||
sparkline={displayValue.sparkline}
|
||||
/>
|
||||
);
|
||||
|
||||
@@ -259,7 +254,6 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
theme={theme}
|
||||
color={color}
|
||||
shape={shape}
|
||||
textMode={effectiveTextMode}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import { css } from '@emotion/css';
|
||||
|
||||
import { FieldDisplay, GrafanaTheme2, FieldConfig } from '@grafana/data';
|
||||
import { GraphFieldConfig, GraphGradientMode, LineInterpolation } from '@grafana/schema';
|
||||
|
||||
import { Sparkline } from '../Sparkline/Sparkline';
|
||||
|
||||
import { RadialShape, RadialTextMode } from './RadialGauge';
|
||||
import { RadialShape } from './RadialGauge';
|
||||
import { GaugeDimensions } from './utils';
|
||||
|
||||
interface RadialSparklineProps {
|
||||
@@ -12,22 +14,23 @@ interface RadialSparklineProps {
|
||||
theme: GrafanaTheme2;
|
||||
color?: string;
|
||||
shape?: RadialShape;
|
||||
textMode: Exclude<RadialTextMode, 'auto'>;
|
||||
}
|
||||
export function RadialSparkline({ sparkline, dimensions, theme, color, shape, textMode }: RadialSparklineProps) {
|
||||
const { radius, barWidth } = dimensions;
|
||||
|
||||
export function RadialSparkline({ sparkline, dimensions, theme, color, shape }: RadialSparklineProps) {
|
||||
if (!sparkline) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const showNameAndValue = textMode === 'value_and_name';
|
||||
const height = radius / (showNameAndValue ? 4 : 3);
|
||||
const width = radius * (shape === 'gauge' ? 1.6 : 1.4) - barWidth;
|
||||
const topPos =
|
||||
shape === 'gauge'
|
||||
? `${dimensions.gaugeBottomY - height}px`
|
||||
: `calc(50% + ${radius / (showNameAndValue ? 3.3 : 4)}px)`;
|
||||
const { radius, barWidth } = dimensions;
|
||||
|
||||
const height = radius / 4;
|
||||
const widthFactor = shape === 'gauge' ? 1.6 : 1.4;
|
||||
const width = radius * widthFactor - barWidth;
|
||||
const topPos = shape === 'gauge' ? `${dimensions.gaugeBottomY - height}px` : `calc(50% + ${radius / 2.8}px)`;
|
||||
|
||||
const styles = css({
|
||||
position: 'absolute',
|
||||
top: topPos,
|
||||
});
|
||||
|
||||
const config: FieldConfig<GraphFieldConfig> = {
|
||||
color: {
|
||||
@@ -42,7 +45,7 @@ export function RadialSparkline({ sparkline, dimensions, theme, color, shape, te
|
||||
};
|
||||
|
||||
return (
|
||||
<div style={{ position: 'absolute', top: topPos }}>
|
||||
<div className={styles}>
|
||||
<Sparkline height={height} width={width} sparkline={sparkline} theme={theme} config={config} />
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -1,12 +1,6 @@
|
||||
import { css } from '@emotion/css';
|
||||
|
||||
import {
|
||||
DisplayValue,
|
||||
DisplayValueAlignmentFactors,
|
||||
FieldSparkline,
|
||||
formattedValueToString,
|
||||
GrafanaTheme2,
|
||||
} from '@grafana/data';
|
||||
import { DisplayValue, DisplayValueAlignmentFactors, formattedValueToString, GrafanaTheme2 } from '@grafana/data';
|
||||
|
||||
import { useStyles2 } from '../../themes/ThemeContext';
|
||||
import { calculateFontSize } from '../../utils/measureText';
|
||||
@@ -14,13 +8,21 @@ import { calculateFontSize } from '../../utils/measureText';
|
||||
import { RadialShape, RadialTextMode } from './RadialGauge';
|
||||
import { GaugeDimensions } from './utils';
|
||||
|
||||
// function toCartesian(centerX: number, centerY: number, radius: number, angleInDegrees: number) {
|
||||
// let radian = ((angleInDegrees - 90) * Math.PI) / 180.0;
|
||||
// return {
|
||||
// x: centerX + radius * Math.cos(radian),
|
||||
// y: centerY + radius * Math.sin(radian),
|
||||
// };
|
||||
// }
|
||||
|
||||
interface RadialTextProps {
|
||||
displayValue: DisplayValue;
|
||||
theme: GrafanaTheme2;
|
||||
dimensions: GaugeDimensions;
|
||||
textMode: Exclude<RadialTextMode, 'auto'>;
|
||||
textMode: RadialTextMode;
|
||||
vizCount: number;
|
||||
shape: RadialShape;
|
||||
sparkline?: FieldSparkline;
|
||||
alignmentFactors?: DisplayValueAlignmentFactors;
|
||||
valueManualFontSize?: number;
|
||||
nameManualFontSize?: number;
|
||||
@@ -31,8 +33,8 @@ export function RadialText({
|
||||
theme,
|
||||
dimensions,
|
||||
textMode,
|
||||
vizCount,
|
||||
shape,
|
||||
sparkline,
|
||||
alignmentFactors,
|
||||
valueManualFontSize,
|
||||
nameManualFontSize,
|
||||
@@ -44,6 +46,10 @@ export function RadialText({
|
||||
return null;
|
||||
}
|
||||
|
||||
if (textMode === 'auto') {
|
||||
textMode = vizCount === 1 ? 'value' : 'value_and_name';
|
||||
}
|
||||
|
||||
const nameToAlignTo = (alignmentFactors ? alignmentFactors.title : displayValue.title) ?? '';
|
||||
const valueToAlignTo = formattedValueToString(alignmentFactors ? alignmentFactors : displayValue);
|
||||
|
||||
@@ -53,7 +59,7 @@ export function RadialText({
|
||||
|
||||
// Not sure where this comes from but svg text is not using body line-height
|
||||
const lineHeight = 1.21;
|
||||
const valueWidthToRadiusFactor = 0.82;
|
||||
const valueWidthToRadiusFactor = 0.85;
|
||||
const nameToHeightFactor = 0.45;
|
||||
const largeRadiusScalingDecay = 0.86;
|
||||
|
||||
@@ -92,23 +98,18 @@ export function RadialText({
|
||||
const valueHeight = valueFontSize * lineHeight;
|
||||
const nameHeight = nameFontSize * lineHeight;
|
||||
|
||||
const valueY = showName ? centerY - nameHeight * 0.3 : centerY;
|
||||
const nameY = showValue ? valueY + valueHeight * 0.7 : centerY;
|
||||
const valueY = showName ? centerY - nameHeight / 2 : centerY;
|
||||
const valueNameSpacing = valueHeight / 3.5;
|
||||
const nameY = showValue ? valueY + valueHeight / 2 + valueNameSpacing : centerY;
|
||||
const nameColor = showValue ? theme.colors.text.secondary : theme.colors.text.primary;
|
||||
const suffixShift = (valueFontSize - unitFontSize * 1.2) / 2;
|
||||
|
||||
// adjust the text up on gauges and when sparklines are present
|
||||
let yOffset = 0;
|
||||
if (shape === 'gauge') {
|
||||
// we render from the center of the gauge, so move up by half of half of the total height
|
||||
yOffset -= (valueHeight + nameHeight) / 4;
|
||||
}
|
||||
if (sparkline) {
|
||||
yOffset -= 8;
|
||||
}
|
||||
// For gauge shape we shift text up a bit
|
||||
const valueDy = shape === 'gauge' ? -valueFontSize * 0.3 : 0;
|
||||
const nameDy = shape === 'gauge' ? -nameFontSize * 0.7 : 0;
|
||||
|
||||
return (
|
||||
<g transform={`translate(0, ${yOffset})`}>
|
||||
<g>
|
||||
{showValue && (
|
||||
<text
|
||||
x={centerX}
|
||||
@@ -118,6 +119,7 @@ export function RadialText({
|
||||
className={styles.text}
|
||||
textAnchor="middle"
|
||||
dominantBaseline="middle"
|
||||
dy={valueDy}
|
||||
>
|
||||
<tspan fontSize={unitFontSize}>{displayValue.prefix ?? ''}</tspan>
|
||||
<tspan>{displayValue.text}</tspan>
|
||||
@@ -131,6 +133,7 @@ export function RadialText({
|
||||
fontSize={nameFontSize}
|
||||
x={centerX}
|
||||
y={nameY}
|
||||
dy={nameDy}
|
||||
textAnchor="middle"
|
||||
dominantBaseline="middle"
|
||||
fill={nameColor}
|
||||
|
||||
@@ -4,12 +4,11 @@ import { GaugeDimensions } from './utils';
|
||||
|
||||
export interface GlowGradientProps {
|
||||
id: string;
|
||||
barWidth: number;
|
||||
radius: number;
|
||||
}
|
||||
|
||||
export function GlowGradient({ id, barWidth }: GlowGradientProps) {
|
||||
// 0.75 is the minimum glow size, and it scales with bar width
|
||||
const glowSize = 0.75 + barWidth * 0.08;
|
||||
export function GlowGradient({ id, radius }: GlowGradientProps) {
|
||||
const glowSize = 0.02 * radius;
|
||||
|
||||
return (
|
||||
<filter id={id} filterUnits="userSpaceOnUse">
|
||||
@@ -83,7 +82,7 @@ export function MiddleCircleGlow({ dimensions, gaugeId, color }: CenterGlowProps
|
||||
<>
|
||||
<defs>
|
||||
<radialGradient id={gradientId} r={'50%'} fr={'0%'}>
|
||||
<stop offset="0%" stopColor={color} stopOpacity={0.15} />
|
||||
<stop offset="0%" stopColor={color} stopOpacity={0.2} />
|
||||
<stop offset="90%" stopColor={color} stopOpacity={0} />
|
||||
</radialGradient>
|
||||
</defs>
|
||||
|
||||
@@ -16,7 +16,7 @@ export interface SparklineProps extends Themeable2 {
|
||||
sparkline: FieldSparkline;
|
||||
}
|
||||
|
||||
const SparklineFn: React.FC<SparklineProps> = memo((props) => {
|
||||
export const Sparkline: React.FC<SparklineProps> = memo((props) => {
|
||||
const { sparkline, config: fieldConfig, theme, width, height } = props;
|
||||
|
||||
const { frame: alignedDataFrame, warning } = prepareSeries(sparkline, fieldConfig);
|
||||
@@ -30,14 +30,4 @@ const SparklineFn: React.FC<SparklineProps> = memo((props) => {
|
||||
return <UPlotChart data={data} config={configBuilder} width={width} height={height} />;
|
||||
});
|
||||
|
||||
SparklineFn.displayName = 'Sparkline';
|
||||
|
||||
// we converted to function component above, but some apps extend Sparkline, so we need
|
||||
// to keep exporting a class component until those apps are all rolled out.
|
||||
// see https://github.com/grafana/app-observability-plugin/pull/2079
|
||||
// eslint-disable-next-line react-prefer-function-component/react-prefer-function-component
|
||||
export class Sparkline extends React.PureComponent<SparklineProps> {
|
||||
render() {
|
||||
return <SparklineFn {...this.props} />;
|
||||
}
|
||||
}
|
||||
Sparkline.displayName = 'Sparkline';
|
||||
|
||||
@@ -119,14 +119,7 @@ describe('Get y range', () => {
|
||||
values: [2, 1.999999999999999, 2.000000000000001, 2, 2],
|
||||
type: FieldType.number,
|
||||
config: {},
|
||||
state: { range: { min: 1.9999999999999999999, max: 2.000000000000000001, delta: 0 } },
|
||||
};
|
||||
const decimalsNotCloseYField: Field = {
|
||||
name: 'y',
|
||||
values: [2, 0.0094, 0.0053, 0.0078, 0.0061],
|
||||
type: FieldType.number,
|
||||
config: {},
|
||||
state: { range: { min: 0.0053, max: 0.0094, delta: 0.0041 } },
|
||||
state: { range: { min: 1.999999999999999, max: 2.000000000000001, delta: 0 } },
|
||||
};
|
||||
const xField: Field = {
|
||||
name: 'x',
|
||||
@@ -190,11 +183,6 @@ describe('Get y range', () => {
|
||||
field: decimalsCloseYField,
|
||||
expected: [2, 4],
|
||||
},
|
||||
{
|
||||
description: 'decimal values which are not close to equal should not be rounded out',
|
||||
field: decimalsNotCloseYField,
|
||||
expected: [0.0053, 0.0094],
|
||||
},
|
||||
])(`should return correct range for $description`, ({ field, expected }) => {
|
||||
const actual = getYRange(getAlignedFrame(field));
|
||||
expect(actual).toEqual(expected);
|
||||
|
||||
@@ -8,7 +8,6 @@ import {
|
||||
FieldType,
|
||||
getFieldColorModeForField,
|
||||
GrafanaTheme2,
|
||||
guessDecimals,
|
||||
isLikelyAscendingVector,
|
||||
nullToValue,
|
||||
roundDecimals,
|
||||
@@ -77,6 +76,8 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
|
||||
min = Math.min(min!, field.config.min ?? Infinity);
|
||||
max = Math.max(max!, field.config.max ?? -Infinity);
|
||||
|
||||
// console.log({ min, max });
|
||||
|
||||
// if noValue is set, ensure that it is included in the range as well
|
||||
const noValue = +field.config?.noValue!;
|
||||
if (!Number.isNaN(noValue)) {
|
||||
@@ -84,11 +85,9 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
|
||||
max = Math.max(max, noValue);
|
||||
}
|
||||
|
||||
const decimals = field.config.decimals ?? Math.max(guessDecimals(min), guessDecimals(max));
|
||||
|
||||
// call roundDecimals to mirror what is going to eventually happen in uplot
|
||||
let roundedMin = roundDecimals(min, decimals);
|
||||
let roundedMax = roundDecimals(max, decimals);
|
||||
let roundedMin = roundDecimals(min, field.config.decimals ?? 0);
|
||||
let roundedMax = roundDecimals(max, field.config.decimals ?? 0);
|
||||
|
||||
// if the rounded min and max are different,
|
||||
// we can return the real min and max.
|
||||
@@ -103,9 +102,11 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
|
||||
roundedMax = 1;
|
||||
} else if (roundedMin < 0) {
|
||||
// both are negative
|
||||
// max = 0;
|
||||
roundedMin *= 2;
|
||||
} else {
|
||||
// both are positive
|
||||
// min = 0;
|
||||
roundedMax *= 2;
|
||||
}
|
||||
|
||||
|
||||
@@ -105,7 +105,6 @@ export function TableNG(props: TableNGProps) {
|
||||
const {
|
||||
cellHeight,
|
||||
data,
|
||||
disableKeyboardEvents,
|
||||
disableSanitizeHtml,
|
||||
enablePagination = false,
|
||||
enableSharedCrosshair = false,
|
||||
@@ -820,9 +819,9 @@ export function TableNG(props: TableNGProps) {
|
||||
}
|
||||
}}
|
||||
onCellKeyDown={
|
||||
hasNestedFrames || disableKeyboardEvents
|
||||
hasNestedFrames
|
||||
? (_, event) => {
|
||||
if (disableKeyboardEvents || event.isDefaultPrevented()) {
|
||||
if (event.isDefaultPrevented()) {
|
||||
// skip parent grid keyboard navigation if nested grid handled it
|
||||
event.preventGridDefault();
|
||||
}
|
||||
|
||||
@@ -138,8 +138,6 @@ export interface BaseTableProps {
|
||||
enableVirtualization?: boolean;
|
||||
// for MarkdownCell, this flag disables sanitization of HTML content. Configured via config.ini.
|
||||
disableSanitizeHtml?: boolean;
|
||||
// if true, disables all keyboard events in the table. this is used when previewing a table (i.e. suggestions)
|
||||
disableKeyboardEvents?: boolean;
|
||||
}
|
||||
|
||||
/* ---------------------------- Table cell props ---------------------------- */
|
||||
|
||||
@@ -187,15 +187,6 @@ func (hs *HTTPServer) registerRoutes() {
|
||||
publicdashboardsapi.CountPublicDashboardRequest(),
|
||||
hs.Index,
|
||||
)
|
||||
|
||||
r.Get("/bootdata/:accessToken",
|
||||
reqNoAuth,
|
||||
hs.PublicDashboardsApi.Middleware.HandleView,
|
||||
publicdashboardsapi.SetPublicDashboardAccessToken,
|
||||
publicdashboardsapi.SetPublicDashboardOrgIdOnContext(hs.PublicDashboardsApi.PublicDashboardService),
|
||||
publicdashboardsapi.CountPublicDashboardRequest(),
|
||||
hs.GetBootdata,
|
||||
)
|
||||
}
|
||||
|
||||
r.Get("/explore", authorize(ac.EvalPermission(ac.ActionDatasourcesExplore)), hs.Index)
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package auditing
|
||||
|
||||
import (
|
||||
auditinternal "k8s.io/apiserver/pkg/apis/audit"
|
||||
"k8s.io/apiserver/pkg/audit"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
)
|
||||
|
||||
// NoopBackend is a no-op implementation of audit.Backend
|
||||
type NoopBackend struct{}
|
||||
|
||||
func ProvideNoopBackend() audit.Backend { return &NoopBackend{} }
|
||||
|
||||
func (b *NoopBackend) ProcessEvents(k8sEvents ...*auditinternal.Event) bool { return false }
|
||||
|
||||
func (NoopBackend) Run(stopCh <-chan struct{}) error { return nil }
|
||||
|
||||
func (NoopBackend) Shutdown() {}
|
||||
|
||||
func (NoopBackend) String() string { return "" }
|
||||
|
||||
// NoopPolicyRuleEvaluator is a no-op implementation of audit.PolicyRuleEvaluator
|
||||
type NoopPolicyRuleEvaluator struct{}
|
||||
|
||||
func ProvideNoopPolicyRuleEvaluator() audit.PolicyRuleEvaluator { return &NoopPolicyRuleEvaluator{} }
|
||||
|
||||
func (NoopPolicyRuleEvaluator) EvaluatePolicyRule(authorizer.Attributes) audit.RequestAuditConfig {
|
||||
return audit.RequestAuditConfig{Level: auditinternal.LevelNone}
|
||||
}
|
||||
@@ -61,24 +61,20 @@ func (s *legacyStorage) List(ctx context.Context, options *internalversion.ListO
|
||||
}
|
||||
|
||||
func (s *legacyStorage) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
|
||||
if s.dsConfigHandlerRequestsDuration != nil {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Get"), time.Since(start).Seconds())
|
||||
}()
|
||||
}
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Get"), time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
return s.datasources.GetDataSource(ctx, name)
|
||||
}
|
||||
|
||||
// Create implements rest.Creater.
|
||||
func (s *legacyStorage) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
|
||||
if s.dsConfigHandlerRequestsDuration != nil {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
|
||||
}()
|
||||
}
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
ds, ok := obj.(*v0alpha1.DataSource)
|
||||
if !ok {
|
||||
@@ -89,12 +85,10 @@ func (s *legacyStorage) Create(ctx context.Context, obj runtime.Object, createVa
|
||||
|
||||
// Update implements rest.Updater.
|
||||
func (s *legacyStorage) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
|
||||
if s.dsConfigHandlerRequestsDuration != nil {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
|
||||
}()
|
||||
}
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
old, err := s.Get(ctx, name, &metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -132,12 +126,10 @@ func (s *legacyStorage) Update(ctx context.Context, name string, objInfo rest.Up
|
||||
|
||||
// Delete implements rest.GracefulDeleter.
|
||||
func (s *legacyStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) {
|
||||
if s.dsConfigHandlerRequestsDuration != nil {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
|
||||
}()
|
||||
}
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metricutil.ObserveWithExemplar(ctx, s.dsConfigHandlerRequestsDuration.WithLabelValues("new", "Create"), time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
err := s.datasources.DeleteDataSource(ctx, name)
|
||||
return nil, false, err
|
||||
|
||||
@@ -3,7 +3,6 @@ package datasource
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
|
||||
@@ -39,14 +38,14 @@ var (
|
||||
// DataSourceAPIBuilder is used just so wire has something unique to return
|
||||
type DataSourceAPIBuilder struct {
|
||||
datasourceResourceInfo utils.ResourceInfo
|
||||
pluginJSON plugins.JSONData
|
||||
client PluginClient // will only ever be called with the same plugin id!
|
||||
datasources PluginDatasourceProvider
|
||||
contextProvider PluginContextWrapper
|
||||
accessControl accesscontrol.AccessControl
|
||||
queryTypes *queryV0.QueryTypeDefinitionList
|
||||
configCrudUseNewApis bool
|
||||
dataSourceCRUDMetric *prometheus.HistogramVec
|
||||
|
||||
pluginJSON plugins.JSONData
|
||||
client PluginClient // will only ever be called with the same plugin id!
|
||||
datasources PluginDatasourceProvider
|
||||
contextProvider PluginContextWrapper
|
||||
accessControl accesscontrol.AccessControl
|
||||
queryTypes *queryV0.QueryTypeDefinitionList
|
||||
configCrudUseNewApis bool
|
||||
}
|
||||
|
||||
func RegisterAPIService(
|
||||
@@ -67,16 +66,6 @@ func RegisterAPIService(
|
||||
var err error
|
||||
var builder *DataSourceAPIBuilder
|
||||
|
||||
dataSourceCRUDMetric := metricutil.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: "grafana",
|
||||
Name: "ds_config_handler_requests_duration_seconds",
|
||||
Help: "Duration of requests handled by datasource configuration handlers",
|
||||
}, []string{"code_path", "handler"})
|
||||
regErr := reg.Register(dataSourceCRUDMetric)
|
||||
if regErr != nil && !errors.As(regErr, &prometheus.AlreadyRegisteredError{}) {
|
||||
return nil, regErr
|
||||
}
|
||||
|
||||
pluginJSONs, err := getDatasourcePlugins(pluginSources)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting list of datasource plugins: %s", err)
|
||||
@@ -102,7 +91,6 @@ func RegisterAPIService(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.SetDataSourceCRUDMetrics(dataSourceCRUDMetric)
|
||||
|
||||
apiRegistrar.RegisterAPI(builder)
|
||||
}
|
||||
@@ -173,10 +161,6 @@ func (b *DataSourceAPIBuilder) GetGroupVersion() schema.GroupVersion {
|
||||
return b.datasourceResourceInfo.GroupVersion()
|
||||
}
|
||||
|
||||
func (b *DataSourceAPIBuilder) SetDataSourceCRUDMetrics(datasourceCRUDMetric *prometheus.HistogramVec) {
|
||||
b.dataSourceCRUDMetric = datasourceCRUDMetric
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme, gv schema.GroupVersion) {
|
||||
scheme.AddKnownTypes(gv,
|
||||
&datasourceV0.DataSource{},
|
||||
@@ -234,9 +218,13 @@ func (b *DataSourceAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver
|
||||
|
||||
if b.configCrudUseNewApis {
|
||||
legacyStore := &legacyStorage{
|
||||
datasources: b.datasources,
|
||||
resourceInfo: &ds,
|
||||
dsConfigHandlerRequestsDuration: b.dataSourceCRUDMetric,
|
||||
datasources: b.datasources,
|
||||
resourceInfo: &ds,
|
||||
dsConfigHandlerRequestsDuration: metricutil.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: "grafana",
|
||||
Name: "ds_config_handler_requests_duration_seconds",
|
||||
Help: "Duration of requests handled by datasource configuration handlers",
|
||||
}, []string{"code_path", "handler"}),
|
||||
}
|
||||
unified, err := grafanaregistry.NewRegistryStore(opts.Scheme, ds, opts.OptsGetter)
|
||||
if err != nil {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user