Compare commits
13 Commits
sriram/SQL
...
docs/loki-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f1de558e2 | ||
|
|
ba2ef712d8 | ||
|
|
a45400de8d | ||
|
|
0c7936111f | ||
|
|
7c55021b94 | ||
|
|
8b7bd6f646 | ||
|
|
b889d23c29 | ||
|
|
e014d9a000 | ||
|
|
3eab41557a | ||
|
|
db96b6c1e3 | ||
|
|
3f78facce0 | ||
|
|
361fadf3be | ||
|
|
f244c9d483 |
@@ -13,7 +13,7 @@ import (
|
||||
// schema is unexported to prevent accidental overwrites
|
||||
var (
|
||||
schemaReceiver = resource.NewSimpleSchema("notifications.alerting.grafana.app", "v0alpha1", NewReceiver(), &ReceiverList{}, resource.WithKind("Receiver"),
|
||||
resource.WithPlural("receivers"), resource.WithScope(resource.NamespacedScope), resource.WithSelectableFields([]resource.SelectableField{resource.SelectableField{
|
||||
resource.WithPlural("receivers"), resource.WithScope(resource.NamespacedScope), resource.WithSelectableFields([]resource.SelectableField{{
|
||||
FieldSelector: "spec.title",
|
||||
FieldValueFunc: func(o resource.Object) (string, error) {
|
||||
cast, ok := o.(*Receiver)
|
||||
|
||||
@@ -129,7 +129,7 @@ DashboardLink: {
|
||||
placement?: DashboardLinkPlacement
|
||||
}
|
||||
|
||||
// Dashboard Link placement. Defines where the link should be displayed.
|
||||
// Dashboard Link placement. Defines where the link should be displayed.
|
||||
// - "inControlsMenu" renders the link in bottom part of the dashboard controls dropdown menu
|
||||
DashboardLinkPlacement: "inControlsMenu"
|
||||
|
||||
@@ -790,8 +790,6 @@ VariableOption: {
|
||||
text: string | [...string]
|
||||
// Value of the option
|
||||
value: string | [...string]
|
||||
// Additional properties for multi-props variables
|
||||
properties?: {[string]: string}
|
||||
}
|
||||
|
||||
// Query variable specification
|
||||
@@ -934,7 +932,6 @@ CustomVariableSpec: {
|
||||
skipUrlSync: bool | *false
|
||||
description?: string
|
||||
allowCustomValue: bool | *true
|
||||
valuesFormat?: "csv" | "json"
|
||||
}
|
||||
|
||||
// Custom variable kind
|
||||
|
||||
@@ -794,8 +794,6 @@ VariableOption: {
|
||||
text: string | [...string]
|
||||
// Value of the option
|
||||
value: string | [...string]
|
||||
// Additional properties for multi-props variables
|
||||
properties?: {[string]: string}
|
||||
}
|
||||
|
||||
// Query variable specification
|
||||
@@ -937,7 +935,6 @@ CustomVariableSpec: {
|
||||
skipUrlSync: bool | *false
|
||||
description?: string
|
||||
allowCustomValue: bool | *true
|
||||
valuesFormat?: "csv" | "json"
|
||||
}
|
||||
|
||||
// Custom variable kind
|
||||
|
||||
@@ -222,10 +222,8 @@ lineage: schemas: [{
|
||||
// Optional field, if you want to extract part of a series name or metric node segment.
|
||||
// Named capture groups can be used to separate the display text and value.
|
||||
regex?: string
|
||||
// Optional, indicates whether a custom type variable uses CSV or JSON to define its values
|
||||
valuesFormat?: "csv" | "json" | *"csv"
|
||||
// Determine whether regex applies to variable value or display text
|
||||
regexApplyTo?: #VariableRegexApplyTo
|
||||
// Determine whether regex applies to variable value or display text
|
||||
regexApplyTo?: #VariableRegexApplyTo
|
||||
// Additional static options for query variable
|
||||
staticOptions?: [...#VariableOption]
|
||||
// Ordering of static options in relation to options returned from data source for query variable
|
||||
|
||||
@@ -301,8 +301,6 @@ var _ resource.ListObject = &DashboardList{}
|
||||
|
||||
// Copy methods for all subresource types
|
||||
|
||||
|
||||
|
||||
// DeepCopy creates a full deep copy of DashboardStatus
|
||||
func (s *DashboardStatus) DeepCopy() *DashboardStatus {
|
||||
cpy := &DashboardStatus{}
|
||||
|
||||
@@ -222,10 +222,8 @@ lineage: schemas: [{
|
||||
// Optional field, if you want to extract part of a series name or metric node segment.
|
||||
// Named capture groups can be used to separate the display text and value.
|
||||
regex?: string
|
||||
// Optional, indicates whether a custom type variable uses CSV or JSON to define its values
|
||||
valuesFormat?: "csv" | "json" | *"csv"
|
||||
// Determine whether regex applies to variable value or display text
|
||||
regexApplyTo?: #VariableRegexApplyTo
|
||||
// Determine whether regex applies to variable value or display text
|
||||
regexApplyTo?: #VariableRegexApplyTo
|
||||
// Additional static options for query variable
|
||||
staticOptions?: [...#VariableOption]
|
||||
// Ordering of static options in relation to options returned from data source for query variable
|
||||
|
||||
@@ -301,8 +301,6 @@ var _ resource.ListObject = &DashboardList{}
|
||||
|
||||
// Copy methods for all subresource types
|
||||
|
||||
|
||||
|
||||
// DeepCopy creates a full deep copy of DashboardStatus
|
||||
func (s *DashboardStatus) DeepCopy() *DashboardStatus {
|
||||
cpy := &DashboardStatus{}
|
||||
|
||||
@@ -133,7 +133,7 @@ DashboardLink: {
|
||||
placement?: DashboardLinkPlacement
|
||||
}
|
||||
|
||||
// Dashboard Link placement. Defines where the link should be displayed.
|
||||
// Dashboard Link placement. Defines where the link should be displayed.
|
||||
// - "inControlsMenu" renders the link in bottom part of the dashboard controls dropdown menu
|
||||
DashboardLinkPlacement: "inControlsMenu"
|
||||
|
||||
@@ -794,8 +794,6 @@ VariableOption: {
|
||||
text: string | [...string]
|
||||
// Value of the option
|
||||
value: string | [...string]
|
||||
// Additional properties for multi-props variables
|
||||
properties?: {[string]: string}
|
||||
}
|
||||
|
||||
// Query variable specification
|
||||
@@ -938,7 +936,6 @@ CustomVariableSpec: {
|
||||
skipUrlSync: bool | *false
|
||||
description?: string
|
||||
allowCustomValue: bool | *true
|
||||
valuesFormat?: "csv" | "json"
|
||||
}
|
||||
|
||||
// Custom variable kind
|
||||
|
||||
@@ -1411,8 +1411,6 @@ type DashboardVariableOption struct {
|
||||
Text DashboardStringOrArrayOfString `json:"text"`
|
||||
// Value of the option
|
||||
Value DashboardStringOrArrayOfString `json:"value"`
|
||||
// Additional properties for multi-props variables
|
||||
Properties map[string]string `json:"properties,omitempty"`
|
||||
}
|
||||
|
||||
// NewDashboardVariableOption creates a new DashboardVariableOption object.
|
||||
@@ -1705,19 +1703,18 @@ func NewDashboardCustomVariableKind() *DashboardCustomVariableKind {
|
||||
// Custom variable specification
|
||||
// +k8s:openapi-gen=true
|
||||
type DashboardCustomVariableSpec struct {
|
||||
Name string `json:"name"`
|
||||
Query string `json:"query"`
|
||||
Current DashboardVariableOption `json:"current"`
|
||||
Options []DashboardVariableOption `json:"options"`
|
||||
Multi bool `json:"multi"`
|
||||
IncludeAll bool `json:"includeAll"`
|
||||
AllValue *string `json:"allValue,omitempty"`
|
||||
Label *string `json:"label,omitempty"`
|
||||
Hide DashboardVariableHide `json:"hide"`
|
||||
SkipUrlSync bool `json:"skipUrlSync"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
AllowCustomValue bool `json:"allowCustomValue"`
|
||||
ValuesFormat *DashboardCustomVariableSpecValuesFormat `json:"valuesFormat,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Query string `json:"query"`
|
||||
Current DashboardVariableOption `json:"current"`
|
||||
Options []DashboardVariableOption `json:"options"`
|
||||
Multi bool `json:"multi"`
|
||||
IncludeAll bool `json:"includeAll"`
|
||||
AllValue *string `json:"allValue,omitempty"`
|
||||
Label *string `json:"label,omitempty"`
|
||||
Hide DashboardVariableHide `json:"hide"`
|
||||
SkipUrlSync bool `json:"skipUrlSync"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
AllowCustomValue bool `json:"allowCustomValue"`
|
||||
}
|
||||
|
||||
// NewDashboardCustomVariableSpec creates a new DashboardCustomVariableSpec object.
|
||||
@@ -2101,14 +2098,6 @@ const (
|
||||
DashboardQueryVariableSpecStaticOptionsOrderSorted DashboardQueryVariableSpecStaticOptionsOrder = "sorted"
|
||||
)
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type DashboardCustomVariableSpecValuesFormat string
|
||||
|
||||
const (
|
||||
DashboardCustomVariableSpecValuesFormatCsv DashboardCustomVariableSpecValuesFormat = "csv"
|
||||
DashboardCustomVariableSpecValuesFormatJson DashboardCustomVariableSpecValuesFormat = "json"
|
||||
)
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type DashboardPanelKindOrLibraryPanelKind struct {
|
||||
PanelKind *DashboardPanelKind `json:"PanelKind,omitempty"`
|
||||
|
||||
@@ -1548,12 +1548,6 @@ func schema_pkg_apis_dashboard_v2alpha1_DashboardCustomVariableSpec(ref common.R
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"valuesFormat": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"name", "query", "current", "options", "multi", "includeAll", "hide", "skipUrlSync", "allowCustomValue"},
|
||||
},
|
||||
|
||||
@@ -798,8 +798,6 @@ VariableOption: {
|
||||
text: string | [...string]
|
||||
// Value of the option
|
||||
value: string | [...string]
|
||||
// Additional properties for multi-props variables
|
||||
properties?: {[string]: string}
|
||||
}
|
||||
|
||||
// Query variable specification
|
||||
@@ -941,7 +939,6 @@ CustomVariableSpec: {
|
||||
skipUrlSync: bool | *false
|
||||
description?: string
|
||||
allowCustomValue: bool | *true
|
||||
valuesFormat?: "csv" | "json"
|
||||
}
|
||||
|
||||
// Custom variable kind
|
||||
|
||||
@@ -1414,8 +1414,6 @@ type DashboardVariableOption struct {
|
||||
Text DashboardStringOrArrayOfString `json:"text"`
|
||||
// Value of the option
|
||||
Value DashboardStringOrArrayOfString `json:"value"`
|
||||
// Additional properties for multi-props variables
|
||||
Properties map[string]string `json:"properties,omitempty"`
|
||||
}
|
||||
|
||||
// NewDashboardVariableOption creates a new DashboardVariableOption object.
|
||||
@@ -1709,19 +1707,18 @@ func NewDashboardCustomVariableKind() *DashboardCustomVariableKind {
|
||||
// Custom variable specification
|
||||
// +k8s:openapi-gen=true
|
||||
type DashboardCustomVariableSpec struct {
|
||||
Name string `json:"name"`
|
||||
Query string `json:"query"`
|
||||
Current DashboardVariableOption `json:"current"`
|
||||
Options []DashboardVariableOption `json:"options"`
|
||||
Multi bool `json:"multi"`
|
||||
IncludeAll bool `json:"includeAll"`
|
||||
AllValue *string `json:"allValue,omitempty"`
|
||||
Label *string `json:"label,omitempty"`
|
||||
Hide DashboardVariableHide `json:"hide"`
|
||||
SkipUrlSync bool `json:"skipUrlSync"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
AllowCustomValue bool `json:"allowCustomValue"`
|
||||
ValuesFormat *DashboardCustomVariableSpecValuesFormat `json:"valuesFormat,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Query string `json:"query"`
|
||||
Current DashboardVariableOption `json:"current"`
|
||||
Options []DashboardVariableOption `json:"options"`
|
||||
Multi bool `json:"multi"`
|
||||
IncludeAll bool `json:"includeAll"`
|
||||
AllValue *string `json:"allValue,omitempty"`
|
||||
Label *string `json:"label,omitempty"`
|
||||
Hide DashboardVariableHide `json:"hide"`
|
||||
SkipUrlSync bool `json:"skipUrlSync"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
AllowCustomValue bool `json:"allowCustomValue"`
|
||||
}
|
||||
|
||||
// NewDashboardCustomVariableSpec creates a new DashboardCustomVariableSpec object.
|
||||
@@ -2136,14 +2133,6 @@ const (
|
||||
DashboardQueryVariableSpecStaticOptionsOrderSorted DashboardQueryVariableSpecStaticOptionsOrder = "sorted"
|
||||
)
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type DashboardCustomVariableSpecValuesFormat string
|
||||
|
||||
const (
|
||||
DashboardCustomVariableSpecValuesFormatCsv DashboardCustomVariableSpecValuesFormat = "csv"
|
||||
DashboardCustomVariableSpecValuesFormatJson DashboardCustomVariableSpecValuesFormat = "json"
|
||||
)
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type DashboardPanelKindOrLibraryPanelKind struct {
|
||||
PanelKind *DashboardPanelKind `json:"PanelKind,omitempty"`
|
||||
|
||||
@@ -1560,12 +1560,6 @@ func schema_pkg_apis_dashboard_v2beta1_DashboardCustomVariableSpec(ref common.Re
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"valuesFormat": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"name", "query", "current", "options", "multi", "includeAll", "hide", "skipUrlSync", "allowCustomValue"},
|
||||
},
|
||||
|
||||
4
apps/dashboard/pkg/apis/dashboard_manifest.go
generated
4
apps/dashboard/pkg/apis/dashboard_manifest.go
generated
File diff suppressed because one or more lines are too long
@@ -1336,17 +1336,6 @@ func buildCustomVariable(varMap map[string]interface{}, commonProps CommonVariab
|
||||
customVar.Spec.AllValue = &allValue
|
||||
}
|
||||
|
||||
if valuesFormat := schemaversion.GetStringValue(varMap, "valuesFormat"); valuesFormat != "" {
|
||||
switch valuesFormat {
|
||||
case string(dashv2alpha1.DashboardCustomVariableSpecValuesFormatJson):
|
||||
format := dashv2alpha1.DashboardCustomVariableSpecValuesFormatJson
|
||||
customVar.Spec.ValuesFormat = &format
|
||||
case string(dashv2alpha1.DashboardCustomVariableSpecValuesFormatCsv):
|
||||
format := dashv2alpha1.DashboardCustomVariableSpecValuesFormatCsv
|
||||
customVar.Spec.ValuesFormat = &format
|
||||
}
|
||||
}
|
||||
|
||||
return dashv2alpha1.DashboardVariableKind{
|
||||
CustomVariableKind: customVar,
|
||||
}, nil
|
||||
|
||||
@@ -685,7 +685,6 @@ func convertVariable_V2alpha1_to_V2beta1(in *dashv2alpha1.DashboardVariableKind,
|
||||
SkipUrlSync: in.CustomVariableKind.Spec.SkipUrlSync,
|
||||
Description: in.CustomVariableKind.Spec.Description,
|
||||
AllowCustomValue: in.CustomVariableKind.Spec.AllowCustomValue,
|
||||
ValuesFormat: convertCustomValuesFormat_V2alpha1_to_V2beta1(in.CustomVariableKind.Spec.ValuesFormat),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -759,23 +758,6 @@ func convertVariable_V2alpha1_to_V2beta1(in *dashv2alpha1.DashboardVariableKind,
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertCustomValuesFormat_V2alpha1_to_V2beta1(in *dashv2alpha1.DashboardCustomVariableSpecValuesFormat) *dashv2beta1.DashboardCustomVariableSpecValuesFormat {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch *in {
|
||||
case dashv2alpha1.DashboardCustomVariableSpecValuesFormatJson:
|
||||
v := dashv2beta1.DashboardCustomVariableSpecValuesFormatJson
|
||||
return &v
|
||||
case dashv2alpha1.DashboardCustomVariableSpecValuesFormatCsv:
|
||||
v := dashv2beta1.DashboardCustomVariableSpecValuesFormatCsv
|
||||
return &v
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func convertQueryVariableSpec_V2alpha1_to_V2beta1(in *dashv2alpha1.DashboardQueryVariableSpec, out *dashv2beta1.DashboardQueryVariableSpec, scope conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
out.Current = convertVariableOption_V2alpha1_to_V2beta1(in.Current)
|
||||
|
||||
@@ -18,8 +18,6 @@ import (
|
||||
v1beta1 "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1"
|
||||
)
|
||||
|
||||
var ()
|
||||
|
||||
var appManifestData = app.ManifestData{
|
||||
AppName: "folder",
|
||||
Group: "folder.grafana.app",
|
||||
|
||||
@@ -82,8 +82,8 @@ cloud.google.com/go/storage v1.55.0 h1:NESjdAToN9u1tmhVqhXCaCwYBuvEhZLLv0gBr+2zn
|
||||
cloud.google.com/go/storage v1.55.0/go.mod h1:ztSmTTwzsdXe5syLVS0YsbFxXuvEmEyZj7v7zChEmuY=
|
||||
cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4=
|
||||
cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=
|
||||
connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
|
||||
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
|
||||
connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw=
|
||||
connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8=
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20251212221603-3adeb8663819 h1:Zh+Ur3OsoWpvALHPLT45nOekHkgOt+IOfutBbPqM17I=
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20251212221603-3adeb8663819/go.mod h1:WjmQxb+W6nVNCgj8nXrF24lIz95AHwnSl36tpjDZSU8=
|
||||
cuelang.org/go v0.11.1 h1:pV+49MX1mmvDm8Qh3Za3M786cty8VKPWzQ1Ho4gZRP0=
|
||||
@@ -749,8 +749,6 @@ github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ=
|
||||
github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/gnostic v0.7.1 h1:t5Kc7j/8kYr8t2u11rykRrPPovlEMG4+xdc/SpekATs=
|
||||
github.com/google/gnostic v0.7.1/go.mod h1:KSw6sxnxEBFM8jLPfJd46xZP+yQcfE8XkiqfZx5zR28=
|
||||
github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c=
|
||||
github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@@ -889,8 +887,8 @@ github.com/grafana/prometheus-alertmanager v0.25.1-0.20250911094103-5456b6e45604
|
||||
github.com/grafana/prometheus-alertmanager v0.25.1-0.20250911094103-5456b6e45604/go.mod h1:O/QP1BCm0HHIzbKvgMzqb5sSyH88rzkFk84F4TfJjBU=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU=
|
||||
github.com/grafana/pyroscope/api v1.2.1-0.20251118081820-ace37f973a0f h1:fTlIj5n4x5dU63XHItug7GLjtnaeJdPqBlqg4zlABq0=
|
||||
github.com/grafana/pyroscope/api v1.2.1-0.20251118081820-ace37f973a0f/go.mod h1:VBNcIhunCZsJ3/mcYx+j7uFf0P/108eiWa+8+Z9ll3o=
|
||||
github.com/grafana/pyroscope/api v1.2.1-0.20250415190842-3ff7247547ae h1:35W3Wjp9KWnSoV/DuymmyIj5aHE0CYlDQ5m2KeXUPAc=
|
||||
github.com/grafana/pyroscope/api v1.2.1-0.20250415190842-3ff7247547ae/go.mod h1:6CJ1uXmLZ13ufpO9xE4pST+DyaBt0uszzrV0YnoaVLQ=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/grafana/sqlds/v5 v5.0.3 h1:+yUMUxfa0WANQsmS9xtTFSRX1Q55Iv1B9EjlrW4VlBU=
|
||||
|
||||
@@ -217,13 +217,6 @@ metaV0Alpha1: {
|
||||
title: string
|
||||
description?: string
|
||||
}]
|
||||
// +listType=atomic
|
||||
addedFunctions?: [...{
|
||||
// +listType=set
|
||||
targets: [...string]
|
||||
title: string
|
||||
description?: string
|
||||
}]
|
||||
// +listType=set
|
||||
// +listMapKey=id
|
||||
exposedComponents?: [...{
|
||||
|
||||
@@ -193,8 +193,6 @@ type MetaExtensions struct {
|
||||
AddedComponents []MetaV0alpha1ExtensionsAddedComponents `json:"addedComponents,omitempty"`
|
||||
// +listType=atomic
|
||||
AddedLinks []MetaV0alpha1ExtensionsAddedLinks `json:"addedLinks,omitempty"`
|
||||
// +listType=atomic
|
||||
AddedFunctions []MetaV0alpha1ExtensionsAddedFunctions `json:"addedFunctions,omitempty"`
|
||||
// +listType=set
|
||||
// +listMapKey=id
|
||||
ExposedComponents []MetaV0alpha1ExtensionsExposedComponents `json:"exposedComponents,omitempty"`
|
||||
@@ -398,21 +396,6 @@ func NewMetaV0alpha1ExtensionsAddedLinks() *MetaV0alpha1ExtensionsAddedLinks {
|
||||
}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type MetaV0alpha1ExtensionsAddedFunctions struct {
|
||||
// +listType=set
|
||||
Targets []string `json:"targets"`
|
||||
Title string `json:"title"`
|
||||
Description *string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
// NewMetaV0alpha1ExtensionsAddedFunctions creates a new MetaV0alpha1ExtensionsAddedFunctions object.
|
||||
func NewMetaV0alpha1ExtensionsAddedFunctions() *MetaV0alpha1ExtensionsAddedFunctions {
|
||||
return &MetaV0alpha1ExtensionsAddedFunctions{
|
||||
Targets: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type MetaV0alpha1ExtensionsExposedComponents struct {
|
||||
Id string `json:"id"`
|
||||
|
||||
2
apps/plugins/pkg/apis/plugins_manifest.go
generated
2
apps/plugins/pkg/apis/plugins_manifest.go
generated
File diff suppressed because one or more lines are too long
@@ -367,8 +367,7 @@ func jsonDataToMetaJSONData(jsonData plugins.JSONData) pluginsv0alpha1.MetaJSOND
|
||||
|
||||
// Map Extensions
|
||||
if len(jsonData.Extensions.AddedLinks) > 0 || len(jsonData.Extensions.AddedComponents) > 0 ||
|
||||
len(jsonData.Extensions.ExposedComponents) > 0 || len(jsonData.Extensions.ExtensionPoints) > 0 ||
|
||||
len(jsonData.Extensions.AddedFunctions) > 0 {
|
||||
len(jsonData.Extensions.ExposedComponents) > 0 || len(jsonData.Extensions.ExtensionPoints) > 0 {
|
||||
extensions := &pluginsv0alpha1.MetaExtensions{}
|
||||
|
||||
if len(jsonData.Extensions.AddedLinks) > 0 {
|
||||
@@ -399,20 +398,6 @@ func jsonDataToMetaJSONData(jsonData plugins.JSONData) pluginsv0alpha1.MetaJSOND
|
||||
}
|
||||
}
|
||||
|
||||
if len(jsonData.Extensions.AddedFunctions) > 0 {
|
||||
extensions.AddedFunctions = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsAddedFunctions, 0, len(jsonData.Extensions.AddedFunctions))
|
||||
for _, comp := range jsonData.Extensions.AddedFunctions {
|
||||
v0Comp := pluginsv0alpha1.MetaV0alpha1ExtensionsAddedFunctions{
|
||||
Targets: comp.Targets,
|
||||
Title: comp.Title,
|
||||
}
|
||||
if comp.Description != "" {
|
||||
v0Comp.Description = &comp.Description
|
||||
}
|
||||
extensions.AddedFunctions = append(extensions.AddedFunctions, v0Comp)
|
||||
}
|
||||
}
|
||||
|
||||
if len(jsonData.Extensions.ExposedComponents) > 0 {
|
||||
extensions.ExposedComponents = make([]pluginsv0alpha1.MetaV0alpha1ExtensionsExposedComponents, 0, len(jsonData.Extensions.ExposedComponents))
|
||||
for _, comp := range jsonData.Extensions.ExposedComponents {
|
||||
|
||||
@@ -48,14 +48,6 @@ Recording rules can be helpful in various scenarios, such as:
|
||||
|
||||
The evaluation group of the recording rule determines how often the metric is pre-computed.
|
||||
|
||||
## Recommendations
|
||||
|
||||
- **Use frequent evaluation intervals**. Set frequent evaluation intervals for recording rules. Long intervals, such as an hour, can cause the recorded metric to be stale and lead to misaligned alert rule evaluations, especially when combined with a long pending period.
|
||||
- **Align alert evaluation with recording frequency**. The evaluation interval of an alert rule that depends on a recorded metric should be aligned with the recording rule's interval. If a recording rule runs every 3 minutes, the alert rule should also be evaluated at a similar frequency to ensure it acts on fresh data.
|
||||
- **Use `_over_time` functions for instant queries**. Since all alert rules are ultimately executed as an instant query, you can use functions like `max_over_time(my_metric[5m])` as an instant query. This allows you to get an aggregated value over a period without using a range query and a reduce expression.
|
||||
|
||||
## Types of recording rules
|
||||
|
||||
Similar to alert rules, Grafana supports two types of recording rules:
|
||||
|
||||
1. [Grafana-managed recording rules](ref:grafana-managed-recording-rules), which can query any Grafana data source supported by alerting. It's the recommended option.
|
||||
|
||||
57
docs/sources/alerting/best-practices/_index.md
Normal file
57
docs/sources/alerting/best-practices/_index.md
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/best-practices/
|
||||
description: This section provides a set of guides for useful alerting practices and recommendations
|
||||
keywords:
|
||||
- grafana
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Best practices
|
||||
title: Grafana Alerting best practices
|
||||
weight: 170
|
||||
---
|
||||
|
||||
# Grafana Alerting best practices
|
||||
|
||||
This section provides a set of guides and examples of best practices for Grafana Alerting. Here you can learn more about how to handle common alert management problems and you can see examples of more advanced usage of Grafana Alerting.
|
||||
|
||||
{{< section >}}
|
||||
|
||||
Designing and configuring an alert management set up that works takes time. Here are some additional tips on how to create an effective alert management set up:
|
||||
|
||||
{{< shared id="alert-planning-fundamentals" >}}
|
||||
|
||||
**Which are the key metrics for your business that you want to monitor and alert on?**
|
||||
|
||||
- Find events that are important to know about and not so trivial or frequent that recipients ignore them.
|
||||
- Alerts should only be created for big events that require immediate attention or intervention.
|
||||
- Consider quality over quantity.
|
||||
|
||||
**How do you want to organize your alerts and notifications?**
|
||||
|
||||
- Be selective about who you set to receive alerts. Consider sending them to the right teams, whoever is on call, and the specific channels.
|
||||
- Think carefully about priority and severity levels.
|
||||
- Automate as far as possible provisioning Alerting resources with the API or Terraform.
|
||||
|
||||
**Which information should you include in notifications?**
|
||||
|
||||
- Consider who the alert receivers and responders are.
|
||||
- Share information that helps responders identify and address potential issues.
|
||||
- Link alerts to dashboards to guide responders on which data to investigate.
|
||||
|
||||
**How can you reduce alert fatigue?**
|
||||
|
||||
- Avoid noisy, unnecessary alerts by using silences, mute timings, or pausing alert rule evaluation.
|
||||
- Continually tune your alert rules to review effectiveness. Remove alert rules to avoid duplication or ineffective alerts.
|
||||
- Continually review your thresholds and evaluation rules.
|
||||
|
||||
**How should you configure recording rules?**
|
||||
|
||||
- Use frequent evaluation intervals. It is recommended to set a frequent evaluation interval for recording rules. Long intervals, such as an hour, can cause the recorded metric to be stale and lead to misaligned alert rule evaluations, especially when combined with a long pending period.
|
||||
- Understand query types. Grafana Alerting uses both **Instant** and **Range** queries. Instant queries fetch a single data point, while Range queries fetch a series of data points over time. When using a Range query in an alert condition, you must use a Reduce expression to aggregate the series into a single value.
|
||||
- Align alert evaluation with recording frequency. The evaluation interval of an alert rule that depends on a recorded metric should be aligned with the recording rule's interval. If a recording rule runs every 3 minutes, the alert rule should also be evaluated at a similar frequency to ensure it acts on fresh data.
|
||||
- Use `_over_time` functions for instant queries. Since all alert rules are ultimately executed as an instant query, you can use functions like `max_over_time(my_metric[1h])` as an instant query. This allows you to get an aggregated value over a period without using a range query and a reduce expression.
|
||||
|
||||
{{< /shared >}}
|
||||
@@ -1,7 +1,5 @@
|
||||
---
|
||||
aliases:
|
||||
- ../best-practices/connectivity-errors/ # /docs/grafana/<GRAFANA_VERSION>/alerting/best-practices/connectivity-errors/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/guides/connectivity-errors/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/best-practices/connectivity-errors/
|
||||
description: Learn how to detect and handle connectivity issues in alerts using Prometheus, Grafana Alerting, or both.
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -16,7 +14,7 @@ labels:
|
||||
- oss
|
||||
menuTitle: Handle connectivity errors
|
||||
title: Handle connectivity errors in alerts
|
||||
weight: 1020
|
||||
weight: 1010
|
||||
refs:
|
||||
pending-period:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -1,7 +1,5 @@
|
||||
---
|
||||
aliases:
|
||||
- ../best-practices/dynamic-labels/ # /docs/grafana/<GRAFANA_VERSION>/alerting/best-practices/dynamic-labels/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/examples/dynamic-labels
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/best-practices/dynamic-labels
|
||||
description: This example shows how to define dynamic labels based on query values, along with important behavior to keep in mind when using them.
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -12,7 +10,7 @@ labels:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Dynamic labels
|
||||
menuTitle: Examples of dynamic labels
|
||||
title: Example of dynamic labels in alert instances
|
||||
weight: 1104
|
||||
refs:
|
||||
@@ -1,7 +1,5 @@
|
||||
---
|
||||
aliases:
|
||||
- ../best-practices/dynamic-thresholds/ # /docs/grafana/<GRAFANA_VERSION>/alerting/best-practices/dynamic-thresholds/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/examples/dynamic-thresholds
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/best-practices/dynamic-thresholds
|
||||
description: This example shows how to use a distinct threshold value per dimension using multi-dimensional alerts and a Math expression.
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -12,7 +10,7 @@ labels:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Dynamic thresholds
|
||||
menuTitle: Examples of dynamic thresholds
|
||||
title: Example of dynamic thresholds per dimension
|
||||
weight: 1105
|
||||
refs:
|
||||
@@ -1,7 +1,5 @@
|
||||
---
|
||||
aliases:
|
||||
- ../best-practices/high-cardinality-alerts/ # /docs/grafana/<GRAFANA_VERSION>/alerting/best-practices/high-cardinality-alerts/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/examples/high-cardinality-alerts/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/best-practices/high-cardinality-alerts/
|
||||
description: Learn how to detect and alert on high-cardinality metrics that can overload your metrics backend and increase observability costs.
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -10,7 +8,7 @@ labels:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: High-cardinality alerts
|
||||
menuTitle: Examples of high-cardinality alerts
|
||||
title: Examples of high-cardinality alerts
|
||||
weight: 1105
|
||||
refs:
|
||||
@@ -1,7 +1,5 @@
|
||||
---
|
||||
aliases:
|
||||
- ../best-practices/missing-data/ # /docs/grafana/<GRAFANA_VERSION>/alerting/best-practices/missing-data/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/guides/missing-data/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/best-practices/missing-data/
|
||||
description: Learn how to detect missing metrics and design alerts that handle gaps in data in Prometheus and Grafana Alerting.
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -16,7 +14,7 @@ labels:
|
||||
- oss
|
||||
menuTitle: Handle missing data
|
||||
title: Handle missing data in Grafana Alerting
|
||||
weight: 1030
|
||||
weight: 1020
|
||||
refs:
|
||||
connectivity-errors-guide:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -1,7 +1,5 @@
|
||||
---
|
||||
aliases:
|
||||
- ../best-practices/multi-dimensional-alerts/ # /docs/grafana/<GRAFANA_VERSION>/alerting/best-practices/multi-dimensional-alerts/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/examples/multi-dimensional-alerts/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/best-practices/multi-dimensional-alerts/
|
||||
description: This example shows how a single alert rule can generate multiple alert instances using time series data.
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -10,7 +8,7 @@ labels:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Multi-dimensional alerts
|
||||
menuTitle: Examples of multi-dimensional alerts
|
||||
title: Example of multi-dimensional alerts on time series data
|
||||
weight: 1101
|
||||
refs:
|
||||
@@ -1,7 +1,5 @@
|
||||
---
|
||||
aliases:
|
||||
- ../best-practices/table-data/ # /docs/grafana/<GRAFANA_VERSION>/alerting/best-practices/table-data/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/examples/table-data
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/best-practices/table-data
|
||||
description: This example shows how to create an alert rule using table data.
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -10,7 +8,7 @@ labels:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Table data
|
||||
menuTitle: Examples of table data
|
||||
title: Example of alerting on tabular data
|
||||
weight: 1102
|
||||
refs:
|
||||
@@ -1,7 +1,5 @@
|
||||
---
|
||||
aliases:
|
||||
- ../best-practices/trace-based-alerts/ # /docs/grafana/<GRAFANA_VERSION>/alerting/best-practices/trace-based-alerts/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/examples/trace-based-alerts/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/best-practices/trace-based-alerts/
|
||||
description: This guide provides introductory examples and distinct approaches for setting up trace-based alerts in Grafana.
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -10,7 +8,7 @@ labels:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
title: Trace-based alerts
|
||||
title: Examples of trace-based alerts
|
||||
weight: 1103
|
||||
refs:
|
||||
testdata-data-source:
|
||||
@@ -1,7 +1,5 @@
|
||||
---
|
||||
aliases:
|
||||
- ../best-practices/tutorials/ # /docs/grafana/<GRAFANA_VERSION>/alerting/best-practices/tutorials/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/examples/tutorials/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/best-practices/tutorials/
|
||||
description: This section provides a set of step-by-step tutorials guides to get started with Grafana Aletings.
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/examples/
|
||||
description: This section provides a set of guides for useful alerting practices and recommendations
|
||||
keywords:
|
||||
- grafana
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Examples
|
||||
title: Examples
|
||||
weight: 180
|
||||
---
|
||||
|
||||
# Examples
|
||||
|
||||
This section provides practical examples that show how to work with different types of alerting data, apply alert design patterns, reuse alert logic, and take advantage of specific Grafana Alerting features.
|
||||
|
||||
This section includes:
|
||||
|
||||
{{< section >}}
|
||||
@@ -1,35 +0,0 @@
|
||||
---
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/guides/
|
||||
description: This section provides a set of guides for useful alerting practices and recommendations
|
||||
keywords:
|
||||
- grafana
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Guides
|
||||
title: Guides
|
||||
weight: 170
|
||||
refs:
|
||||
examples:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/examples/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/examples/
|
||||
tutorials:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/examples/tutorials/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/examples/tutorials/
|
||||
---
|
||||
|
||||
# Guides
|
||||
|
||||
Guides in the Grafana Alerting documentation provide best practices and practical recommendations to help you move from a basic alerting setup to real-world use cases.
|
||||
|
||||
These guides cover topics such as:
|
||||
|
||||
{{< section >}}
|
||||
|
||||
For more hands-on examples, refer to [Examples](ref:examples) and [Tutorials](ref:tutorials).
|
||||
@@ -1,201 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../best-practices/ # /docs/grafana/<GRAFANA_VERSION>/alerting/best-practices/
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/guides/best-practices/
|
||||
description: Designing and configuring an effective alerting system takes time. This guide focuses on building alerting systems that scale with real-world operations.
|
||||
keywords:
|
||||
- grafana
|
||||
- alerting
|
||||
- guide
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Best practices
|
||||
title: Best practices
|
||||
weight: 1010
|
||||
refs:
|
||||
recovery-threshold:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/alert-rules/queries-conditions/#recovery-threshold
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/fundamentals/alert-rules/queries-conditions/#recovery-threshold
|
||||
keep-firing-for:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/alert-rule-evaluation/#keep-firing-for
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/fundamentals/alert-rule-evaluation/#keep-firing-for
|
||||
pending-period:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/alert-rule-evaluation/#pending-period
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/fundamentals/alert-rule-evaluation/#pending-period
|
||||
silences:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/configure-notifications/create-silence/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/configure-notifications/create-silence/
|
||||
timing-options:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/notifications/group-alert-notifications/#timing-options
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/fundamentals/notifications/group-alert-notifications/#timing-options
|
||||
group-alert-notifications:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/notifications/group-alert-notifications/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/fundamentals/notifications/group-alert-notifications/
|
||||
notification-policies:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/notifications/notification-policies/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/fundamentals/notifications/notification-policies/
|
||||
annotations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/alert-rules/annotation-label/#annotations
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/fundamentals/alert-rules/annotation-label/#annotations
|
||||
multi-dimensional-alerts:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/examples/multi-dimensional-alerts/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/examples/multi-dimensional-alerts/
|
||||
---
|
||||
|
||||
# Alerting best practices
|
||||
|
||||
Designing and configuring an effective alerting system takes time. This guide focuses on building alerting systems that scale with real-world operations.
|
||||
|
||||
The practices described here are intentionally high-level and apply regardless of tooling. Whether you use Prometheus, Grafana Alerting, or another stack, the same constraints apply: complex systems, imperfect signals, and humans on call.
|
||||
|
||||
Alerting is never finished. It evolves with incidents, organizational changes, and the systems it’s meant to protect.
|
||||
|
||||
{{< shared id="alert-planning-fundamentals" >}}
|
||||
|
||||
## Prioritize symptoms, but don’t ignore infrastructure signals
|
||||
|
||||
Alerts should primarily detect user-facing failures, not internal component behavior. Users don't care that a pod restarted; they care when the application is slow or failing. Symptom-based alerts tie directly to user impact.
|
||||
|
||||
Reliability metrics that impact users—latency, errors, availability—are better paging signals than infrastructure events or internal errors.
|
||||
|
||||
That said, infrastructure signals still matter. They can act as early warning indicators and are often useful when alerting maturity is low. A sustained spike in CPU or memory usage might not justify a page, but it can help explain or anticipate symptom-based failures.
|
||||
|
||||
Infrastructure alerts tend to be noisy and are often ignored when treated like paging signals. They are usually better suited for lower-severity channels such as dashboards, alert lists, or non-paging destinations like a dedicated Slack channel, where they can be monitored without interrupting on-call.
|
||||
|
||||
The key is balance as your alerting matures. Use infrastructure alerts to support diagnosis and prevention, not as a replacement for symptom-based alerts.
|
||||
|
||||
## Escalate priority based on confidence
|
||||
|
||||
Alert priority is often tied to user impact and the urgency to respond, but confidence should determine when escalation is necessary.
|
||||
|
||||
In this context, escalation defines how responders are notified as confidence grows. This can include increasing alert priority, widening notification, paging additional responders, or opening an incident once intervention is clearly required.
|
||||
|
||||
Early signals are often ambiguous, and confidence in a non-transient failure is usually low. Paging too early creates noise; paging too late means users are impacted for longer before anyone acts. A small or sudden increase in latency may not justify immediate action, but it can indicate a failure in progress.
|
||||
|
||||
Confidence increases as signals become stronger or begin to correlate.
|
||||
|
||||
Escalation is justified when issues are sustained or reinforced by multiple signals. For example, high latency combined with a rising error rate, or the same event firing over a sustained period. These patterns reduce the chance of transient noise and increase the likelihood of real impact.
|
||||
|
||||
Use confidence in user impact to drive escalation and avoid unnecessary pages.
|
||||
|
||||
## Scope alerts for scalability and actionability
|
||||
|
||||
In distributed systems, avoid creating separate alert rules for every host, service, or endpoint. Instead, define alert rules that scale automatically using [multi-dimensional alert rules](ref:multi-dimensional-alerts). This reduces rule duplication and allows alerting to scale as the system grows.
|
||||
|
||||
Start simple. Default to a single dimension such as `service` or `endpoint` to keep alerts manageable. Add dimensions only when they improve actionability. For example, when missing a dimension like `region` hides failures or doesn't provide enough information to act quickly.
|
||||
|
||||
Additional dimensions like `region` or `instance` can help identify the root cause, but more isn't always better.
|
||||
|
||||
## Design alerts for first responders and clear actions
|
||||
|
||||
Alerts should be designed for the first responder, not the person who created the alert. Anyone on call should be able to understand what's wrong and what to do next without deep knowledge of the system or alert configuration.
|
||||
|
||||
Avoid vague alerts that force responders to spend time figuring out context. Every alert should clearly explain why it exists, what triggered it, and how to investigate. Use [annotations](ref:annotations) to link to relevant dashboards and runbooks, which are essential for faster resolution.
|
||||
|
||||
Alerts should indicate a real problem and be actionable, even if the impact is low. Informational alerts add noise without improving reliability.
|
||||
|
||||
If no action is possible, it shouldn't be an alert—consider using a dashboard instead. Over time, alerts behave like technical debt: easy to create, costly to maintain, and hard to remove.
|
||||
|
||||
Review alerts often and remove those that don’t lead to action.
|
||||
|
||||
## Alerts should have an owner and system scope
|
||||
|
||||
Alerts without ownership are often ignored. Every alert must have an owner: a team responsible for maintaining the alert and responding when it fires.
|
||||
|
||||
Alerts must also define a system scope, such as a service or infrastructure component. Scope provides organizational context and connects alerts with ownership. Defining clear scopes is easier when services are treated as first-class entities, and organizations are built around service ownership.
|
||||
|
||||
> [Service Center in Grafana Cloud](/docs/grafana-cloud/alerting-and-irm/service-center/) can help operate a service-oriented view of your system and align alert scope with ownership.
|
||||
|
||||
After scope, ownership, and alert priority are defined, routing determines where alerts go and how they escalate. **Notification routing is as important as the alerts**.
|
||||
|
||||
Alerts should be delivered to the right team and channel based on priority, ownership, and team workflows. Use [notification policies](ref:notification-policies) to define a routing tree that matches the context of your service or scope:
|
||||
|
||||
- Define a parent policy for default routing within the scope.
|
||||
- Define nested policies for specific cases or higher-priority issues.
|
||||
|
||||
## Prevent notification overload with alert grouping
|
||||
|
||||
Without alert grouping, responders can receive many notifications for the same underlying problem.
|
||||
|
||||
For example, a database failure can trigger several alerts at the same time like increased latency, higher error rates, and internal errors. Paging separately for each symptom quickly turns into notification spam, even though there is a single root cause.
|
||||
|
||||
[Notification grouping](ref:group-alert-notifications) consolidates related alerts into a single notification. Instead of receiving multiple pages for the same issue, responders get one alert that represents the incident and includes all related firing alerts.
|
||||
|
||||
Grouping should follow operational boundaries such as service or owner, as defined by notification policies. Downstream or cascading failures should be grouped together so they surface as one issue rather than many.
|
||||
|
||||
## Mitigate flapping alerts
|
||||
|
||||
Short-lived failure spikes often trigger alerts that auto-resolve quickly. Alerting on transient failures creates noise and leads responders to ignore them.
|
||||
|
||||
Require issues to persist before alerting. Set a [pending period](ref:pending-period) to define how long a condition must remain true before firing. For example, instead of alerting immediately on high error rate, require it to stay above the threshold for some minutes.
|
||||
|
||||
Also, stabilize alerts by tuning query ranges and aggregations. Using raw data makes alerts sensitive to noise. Instead, evaluate over a time window and aggregate the data to smooth short spikes.
|
||||
|
||||
```promql
|
||||
# Reacts to transient spikes. Avoid this.
|
||||
cpu_usage > 90
|
||||
|
||||
# Smooth fluctuations.
|
||||
avg_over_time(cpu_usage[5m]) > 90
|
||||
```
|
||||
|
||||
For latency and error-based alerts, percentiles are often more useful than averages:
|
||||
|
||||
```promql
|
||||
quantile_over_time(0.95, http_duration_seconds[5m]) > 3
|
||||
```
|
||||
|
||||
Finally, avoid rapid resolve-and-fire notifications by using [`keep_firing_for`](ref:keep-firing-for) or [recovery thresholds](ref:recovery-threshold) to keep alerts active briefly during recovery. Both options reduce flapping and unnecessary notifications.
|
||||
|
||||
## Graduate symptom-based alerts into SLOs
|
||||
|
||||
When a symptom-based alert fires frequently, it usually indicates a reliability concern that should be measured and managed more deliberately. This is often a sign that the alert could evolve into an [SLO](/docs/grafana-cloud/alerting-and-irm/slo/).
|
||||
|
||||
Traditional alerts create pressure to react immediately, while error budgets introduce a buffer of time to act, changing how urgency is handled. Alerts can then be defined in terms of error budget burn rate rather than reacting to every minor deviation.
|
||||
|
||||
SLOs also align distinct teams around common reliability goals by providing a shared definition of what "good" looks like. They help consolidate multiple symptom alerts into a single user-facing objective.
|
||||
|
||||
For example, instead of several teams alerting on high latency, a single SLO can be used across teams to capture overall API performance.
|
||||
|
||||
## Integrate alerting into incident post-mortems
|
||||
|
||||
Every incident is an opportunity to improve alerting. After each incident, evaluate whether alerts helped responders act quickly or added unnecessary noise.
|
||||
|
||||
Assess which alerts fired, and how they influenced incident response. Review whether alerts triggered too late, too early, or without enough context, and adjust thresholds, priority, or escalation based on what actually happened.
|
||||
|
||||
Use [silences](ref:silences) during active incidents to reduce repeated notifications, but scope them carefully to avoid silencing unrelated alerts.
|
||||
|
||||
Post-mortems should evaluate alerts with root causes and lessons learned. If responders lacked key information during the incident, enrich alerts with additional context, dashboards, or better guidance.
|
||||
|
||||
## Alerts should be continuously improved
|
||||
|
||||
Alerting is an iterative process. Alerts that aren’t reviewed and refined lose effectiveness as systems and traffic patterns change.
|
||||
|
||||
Schedule regular reviews of existing alerts. Remove alerts that don’t lead to action, and tune alerts or thresholds that fire too often without providing useful signal. Reduce false positives to combat alert fatigue.
|
||||
|
||||
Prioritize clarity and simplicity in alert design. Simpler alerts are easier to understand, maintain, and trust under pressure. Favor fewer high-quality, actionable alerts over a large number of low-value ones.
|
||||
|
||||
Use dashboards and observability tools for investigation, not alerts.
|
||||
|
||||
{{< /shared >}}
|
||||
@@ -41,13 +41,9 @@ Select a group to expand it and view the list of alert rules within that group.
|
||||
|
||||
The list view includes a number of filters to simplify managing large volumes of alerts.
|
||||
|
||||
## Filter and save searches
|
||||
|
||||
Click the **Filter** button to open the filter popup. You can filter by name, label, folder/namespace, evaluation group, data source, contact point, rule source, rule state, rule type, and the health of the alert rule from the popup menu. Click **Apply** at the bottom of the filter popup to enact the filters as you search.
|
||||
|
||||
Click the **Saved searches** button to open the list of previously saved searches, or click **+ Save current search** to add your current search to the saved searches list. You can also rename a saved search or set it as a default search. When you set a saved search as the default search, the Alert rules page opens with the search applied.
|
||||
|
||||
{{< figure src="/media/docs/alerting/alerting-saved-searches.png" max-width="750px" alt="Alert rule filter options" >}}
|
||||
{{< figure src="/media/docs/alerting/alerting-list-view-filter.png" max-width="750px" alt="Alert rule filter options" >}}
|
||||
|
||||
## Change alert rules list view
|
||||
|
||||
|
||||
@@ -14,147 +14,140 @@ labels:
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Loki
|
||||
title: Configure the Loki data source
|
||||
title: Loki data source
|
||||
weight: 800
|
||||
refs:
|
||||
data-source-management:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
build-dashboards:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
logs-integration-labels-and-detected-fields:
|
||||
visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#labels-and-detected-fields
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#labels-and-detected-fields
|
||||
destination: /docs/grafana-cloud/visualizations/panels-visualizations/visualizations/
|
||||
variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/dashboards/variables/
|
||||
transformations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/panels-visualizations/query-transform-data/transform-data/
|
||||
loki-alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/alerting/
|
||||
loki-annotations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/annotations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/annotations/
|
||||
import-dashboard:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/import-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/dashboards/build-dashboards/import-dashboards/
|
||||
loki-troubleshooting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/troubleshooting/
|
||||
configure-loki:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
loki-query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/query-editor/
|
||||
loki-template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/template-variables/
|
||||
configure-loki-derived-fields:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/#derived-fields
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/#derived-fields
|
||||
---
|
||||
|
||||
# Loki data source
|
||||
|
||||
Grafana Loki is a set of components that can be combined into a fully featured logging stack.
|
||||
Unlike other logging systems, Loki is built around the idea of only indexing metadata about your logs: labels (just like Prometheus labels). Log data itself is then compressed and stored in chunks in object stores such as S3 or GCS, or even locally on a filesystem.
|
||||
Grafana Loki is a log aggregation system that stores and queries logs from your applications and infrastructure. Unlike traditional logging systems, Loki indexes only metadata (labels) about your logs rather than the full text. Log data is compressed and stored in object stores such as Amazon S3 or Google Cloud Storage, or locally on a filesystem.
|
||||
|
||||
The following guides will help you get started with Loki:
|
||||
|
||||
- [Getting started with Loki](/docs/loki/latest/get-started/)
|
||||
- [Install Loki](/docs/loki/latest/installation/)
|
||||
- [Loki best practices](/docs/loki/latest/best-practices/#best-practices)
|
||||
- [Configure the Loki data source](/docs/grafana/latest/datasources/loki/configure-loki-data-source/)
|
||||
- [LogQL](/docs/loki/latest/logql/)
|
||||
- [Loki query editor](query-editor/)
|
||||
You can use this data source to query, visualize, and alert on log data stored in Loki.
|
||||
|
||||
## Supported Loki versions
|
||||
|
||||
This data source supports these versions of Loki:
|
||||
This data source supports Loki v2.9 and later.
|
||||
|
||||
- v2.9+
|
||||
## Key capabilities
|
||||
|
||||
## Adding a data source
|
||||
The Loki data source provides the following capabilities:
|
||||
|
||||
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:data-source-management)
|
||||
Only users with the organization administrator role can add data sources.
|
||||
Administrators can also [configure the data source via YAML](#provision-the-data-source) with Grafana's provisioning system.
|
||||
- **Log queries:** Query and filter logs using [LogQL](https://grafana.com/docs/loki/latest/logql/), Loki's query language inspired by PromQL.
|
||||
- **Metric queries:** Extract metrics from log data using LogQL metric queries, enabling you to count log events, calculate rates, and aggregate values.
|
||||
- **Live tailing:** Stream logs in real time as they're ingested into Loki.
|
||||
- **Derived fields:** Create links from log lines to external systems such as tracing backends, allowing you to jump directly from a log entry to a related trace.
|
||||
- **Annotations:** Overlay log events on time series graphs to correlate logs with metrics.
|
||||
- **Alerting:** Create alert rules based on log queries to notify you when specific patterns or thresholds are detected.
|
||||
|
||||
Once you've added the Loki data source, you can [configure it](#configure-the-data-source) so that your Grafana instance's users can create queries in its [query editor](query-editor/) when they [build dashboards](ref:build-dashboards), use [Explore](ref:explore), and [annotate visualizations](query-editor/#apply-annotations).
|
||||
## Get started
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To troubleshoot configuration and other issues, check the log file located at `/var/log/grafana/grafana.log` on Unix systems, or in `<grafana_install_dir>/data/log` on other platforms and manual installations.
|
||||
{{< /admonition >}}
|
||||
The following documentation helps you get started with the Loki data source:
|
||||
|
||||
## Provision the data source
|
||||
- [Configure the Loki data source](ref:configure-loki)
|
||||
- [Loki query editor](ref:loki-query-editor)
|
||||
- [Loki template variables](ref:loki-template-variables)
|
||||
- [Troubleshoot the Loki data source](ref:loki-troubleshooting)
|
||||
|
||||
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
|
||||
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
For more information about Loki itself, refer to the [Loki documentation](https://grafana.com/docs/loki/latest/):
|
||||
|
||||
### Provisioning examples
|
||||
- [Get started with Loki](https://grafana.com/docs/loki/latest/get-started/)
|
||||
- [Install Loki](https://grafana.com/docs/loki/latest/installation/)
|
||||
- [Loki best practices](https://grafana.com/docs/loki/latest/best-practices/#best-practices)
|
||||
- [LogQL query language](https://grafana.com/docs/loki/latest/logql/)
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
## Additional features
|
||||
|
||||
datasources:
|
||||
- name: Loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://localhost:3100
|
||||
jsonData:
|
||||
timeout: 60
|
||||
maxLines: 1000
|
||||
```
|
||||
After you configure the Loki data source, you can:
|
||||
|
||||
**Using basic authorization and a derived field:**
|
||||
- Create [visualizations](ref:visualizations) to display your log data
|
||||
- Configure and use [templates and variables](ref:variables) for dynamic dashboards
|
||||
- Add [transformations](ref:transformations) to process query results
|
||||
- Add [annotations](ref:loki-annotations) to overlay log events on graphs
|
||||
- Set up [alerting](ref:loki-alerting) to monitor your log data
|
||||
- Use [Explore](ref:explore) for ad-hoc log queries and analysis
|
||||
- Configure [derived fields](ref:configure-loki-derived-fields) to link logs to traces or other data sources
|
||||
|
||||
You must escape the dollar (`$`) character in YAML values because it can be used to interpolate environment variables:
|
||||
If you encounter issues, refer to [Troubleshoot issues with the Loki data source](ref:loki-troubleshooting).
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
## Community dashboards
|
||||
|
||||
datasources:
|
||||
- name: Loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://localhost:3100
|
||||
basicAuth: true
|
||||
basicAuthUser: my_user
|
||||
jsonData:
|
||||
maxLines: 1000
|
||||
derivedFields:
|
||||
# Field with internal link pointing to data source in Grafana.
|
||||
# datasourceUid value can be anything, but it should be unique across all defined data source uids.
|
||||
- datasourceUid: my_jaeger_uid
|
||||
matcherRegex: "traceID=(\\w+)"
|
||||
name: TraceID
|
||||
# url will be interpreted as query for the datasource
|
||||
url: '$${__value.raw}'
|
||||
# optional for URL Label to set a custom display label for the link.
|
||||
urlDisplayLabel: 'View Trace'
|
||||
Grafana doesn't ship pre-configured dashboards with the Loki data source, but you can find community-contributed dashboards on [Grafana Dashboards](https://grafana.com/grafana/dashboards/?dataSource=loki). These dashboards provide ready-made visualizations for common Loki use cases.
|
||||
|
||||
# Field with external link.
|
||||
- matcherRegex: "traceID=(\\w+)"
|
||||
name: TraceID
|
||||
url: 'http://localhost:16686/trace/$${__value.raw}'
|
||||
secureJsonData:
|
||||
basicAuthPassword: test_password
|
||||
```
|
||||
To import a community dashboard:
|
||||
|
||||
**Using a Jaeger data source:**
|
||||
1. Find a dashboard on [grafana.com/grafana/dashboards](https://grafana.com/grafana/dashboards/?dataSource=loki).
|
||||
1. Copy the dashboard ID.
|
||||
1. In Grafana, go to **Dashboards** > **New** > **Import**.
|
||||
1. Paste the dashboard ID and click **Load**.
|
||||
|
||||
In this example, the Jaeger data source's `uid` value should match the Loki data source's `datasourceUid` value.
|
||||
For more information, refer to [Import a dashboard](ref:import-dashboard).
|
||||
|
||||
```
|
||||
datasources:
|
||||
- name: Jaeger
|
||||
type: jaeger
|
||||
url: http://jaeger-tracing-query:16686/
|
||||
access: proxy
|
||||
# UID should match the datasourceUid in derivedFields.
|
||||
uid: my_jaeger_uid
|
||||
```
|
||||
## Related data sources
|
||||
|
||||
## Query the data source
|
||||
Loki integrates with other Grafana data sources to provide full observability across logs, metrics, and traces:
|
||||
|
||||
The Loki data source's query editor helps you create log and metric queries that use Loki's query language, [LogQL](/docs/loki/latest/logql/).
|
||||
- **Tempo:** Use [derived fields](ref:configure-loki-derived-fields) to create links from log lines to traces in Tempo, enabling seamless navigation from logs to distributed traces.
|
||||
- **Prometheus and Mimir:** Display logs alongside metrics on the same dashboard to correlate application behavior with performance data.
|
||||
|
||||
For details, refer to the [query editor documentation](query-editor/).
|
||||
|
||||
## Use template variables
|
||||
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
|
||||
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For details, see the [template variables documentation](template-variables/).
|
||||
For more information about building observability workflows, refer to the [Grafana Tempo documentation](https://grafana.com/docs/tempo/latest/) and [Grafana Mimir documentation](https://grafana.com/docs/mimir/latest/).
|
||||
|
||||
226
docs/sources/datasources/loki/alerting/index.md
Normal file
226
docs/sources/datasources/loki/alerting/index.md
Normal file
@@ -0,0 +1,226 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/loki/alerting/
|
||||
description: Use Grafana Alerting with the Loki data source
|
||||
keywords:
|
||||
- grafana
|
||||
- loki
|
||||
- alerting
|
||||
- alerts
|
||||
- logs
|
||||
- recording rules
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Alerting
|
||||
title: Loki alerting
|
||||
weight: 450
|
||||
refs:
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
|
||||
create-alert-rule:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
configure-loki:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
recording-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-recording-rules/
|
||||
grafana-managed-recording-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
data-source-managed-recording-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-data-source-managed-recording-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-recording-rules/create-data-source-managed-recording-rules/
|
||||
---
|
||||
|
||||
# Loki alerting
|
||||
|
||||
You can use Grafana Alerting with Loki to create alerts based on your log data. This allows you to monitor error rates, detect patterns, and receive notifications when specific conditions are met in your logs.
|
||||
|
||||
For general information about Grafana Alerting, refer to [Grafana Alerting](ref:alerting).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before creating alerts with Loki, ensure you have:
|
||||
|
||||
- A [Loki data source configured](ref:configure-loki) in Grafana.
|
||||
- Appropriate permissions to create alert rules.
|
||||
- Understanding of the log patterns you want to monitor.
|
||||
- The **Manage alert rules in Alerting UI** toggle enabled in the Loki data source settings.
|
||||
|
||||
## Supported query types
|
||||
|
||||
Loki alerting requires **metric queries** that return numeric time series data. You must use LogQL metric queries that wrap log stream selectors with aggregation functions.
|
||||
|
||||
### Query types and alerting compatibility
|
||||
|
||||
| Query type | Alerting support | Notes |
|
||||
| ------------- | ---------------- | ----------------------------------------------- |
|
||||
| Metric query | ✅ Full support | Use range aggregation functions like `rate()` |
|
||||
| Log query | ❌ Not supported | Convert to metric query using aggregations |
|
||||
| Instant query | ⚠️ Limited | Range queries recommended for time-based alerts |
|
||||
|
||||
### Common metric functions for alerting
|
||||
|
||||
Use these LogQL functions to convert log queries into metric queries suitable for alerting:
|
||||
|
||||
| Function | Description | Example |
|
||||
| -------------------- | ---------------------------------------------- | --------------------------------------------------- |
|
||||
| `rate()` | Rate of log entries per second | `rate({job="app"}[5m])` |
|
||||
| `count_over_time()` | Count of log entries in the specified interval | `count_over_time({job="app"}[5m])` |
|
||||
| `sum_over_time()` | Sum of extracted numeric values | `sum_over_time({job="app"} \| unwrap latency [5m])` |
|
||||
| `avg_over_time()` | Average of extracted numeric values | `avg_over_time({job="app"} \| unwrap latency [5m])` |
|
||||
| `max_over_time()` | Maximum extracted value in the interval | `max_over_time({job="app"} \| unwrap latency [5m])` |
|
||||
| `bytes_rate()` | Rate of bytes per second | `bytes_rate({job="app"}[5m])` |
|
||||
| `absent_over_time()` | Returns 1 if no logs exist in the interval | `absent_over_time({job="app"}[5m])` |
|
||||
|
||||
## Create an alert rule
|
||||
|
||||
To create an alert rule using Loki:
|
||||
|
||||
1. Navigate to **Alerting** > **Alert rules**.
|
||||
1. Click **New alert rule**.
|
||||
1. Enter a name for the alert rule.
|
||||
1. Select your **Loki** data source.
|
||||
1. Build your metric query:
|
||||
- Start with a log stream selector (for example, `{job="app"}`)
|
||||
- Add filters if needed (for example, `|= "error"`)
|
||||
- Wrap with a metric function (for example, `rate(...[5m])`)
|
||||
1. Configure the alert condition (for example, when the rate is above a threshold).
|
||||
1. Set the evaluation interval and pending period.
|
||||
1. Configure notifications and labels.
|
||||
1. Click **Save rule**.
|
||||
|
||||
For detailed instructions, refer to [Create a Grafana-managed alert rule](ref:create-alert-rule).
|
||||
|
||||
## Example alert queries
|
||||
|
||||
The following examples show common alerting scenarios with Loki.
|
||||
|
||||
### Alert on high error rate
|
||||
|
||||
Monitor the rate of error logs:
|
||||
|
||||
```logql
|
||||
rate({job="app"} |= "error" [5m]) > 0.1
|
||||
```
|
||||
|
||||
This query calculates the rate of log lines containing "error" per second over the last 5 minutes and alerts when it exceeds 0.1 errors per second.
|
||||
|
||||
### Alert on error count threshold
|
||||
|
||||
Monitor the count of errors in a time window:
|
||||
|
||||
```logql
|
||||
sum(count_over_time({job="app", level="error"}[15m])) > 100
|
||||
```
|
||||
|
||||
This query counts error-level logs over 15 minutes and alerts when the count exceeds 100.
|
||||
|
||||
### Alert on high latency
|
||||
|
||||
Monitor request latency extracted from logs:
|
||||
|
||||
```logql
|
||||
avg_over_time({job="api"} | logfmt | unwrap duration [5m]) > 500
|
||||
```
|
||||
|
||||
This query extracts the `duration` field from logfmt-formatted logs and alerts when the average exceeds 500 milliseconds.
|
||||
|
||||
### Alert on missing logs
|
||||
|
||||
Detect when a service stops sending logs:
|
||||
|
||||
```logql
|
||||
absent_over_time({job="critical-service"}[10m])
|
||||
```
|
||||
|
||||
This query alerts when no logs are received from the critical service for 10 minutes.
|
||||
|
||||
### Alert by label grouping
|
||||
|
||||
Monitor errors grouped by service:
|
||||
|
||||
```logql
|
||||
sum by (service) (rate({namespace="production"} |= "error" [5m])) > 0.05
|
||||
```
|
||||
|
||||
This query calculates error rates per service and alerts when any service exceeds the threshold.
|
||||
|
||||
## Recording rules
|
||||
|
||||
Recording rules pre-compute frequently used or expensive LogQL queries and save the results as new time series metrics. This improves query performance and reduces load on your Loki instance.
|
||||
|
||||
For detailed information about recording rules, refer to [Create recording rules](ref:recording-rules).
|
||||
|
||||
### Use cases for Loki recording rules
|
||||
|
||||
Recording rules are useful when you need to:
|
||||
|
||||
- **Pre-aggregate expensive queries:** Convert complex log aggregations into simple metric queries.
|
||||
- **Track trends over time:** Create metrics from log data that would otherwise be too expensive to query repeatedly.
|
||||
- **Reuse queries across dashboards:** Compute a metric once and reference it in multiple dashboards and alerts.
|
||||
- **Reduce query latency:** Query precomputed results instead of scanning logs in real time.
|
||||
|
||||
### Types of recording rules
|
||||
|
||||
Loki supports two types of recording rules:
|
||||
|
||||
- **Grafana-managed recording rules:** Query Loki using LogQL and store results in a Prometheus-compatible data source. This is the recommended option. Refer to [Create Grafana-managed recording rules](ref:grafana-managed-recording-rules).
|
||||
- **Data source-managed recording rules:** Define recording rules directly in Loki using the Loki ruler. Refer to [Create data source-managed recording rules](ref:data-source-managed-recording-rules).
|
||||
|
||||
### Example recording rule
|
||||
|
||||
The following example creates a metric that tracks the error rate per service:
|
||||
|
||||
```logql
|
||||
sum by (service) (rate({namespace="production"} |= "error" [5m]))
|
||||
```
|
||||
|
||||
This query runs on a schedule (for example, every minute) and stores the result as a new metric. You can then query this precomputed metric in dashboards and alert rules instead of running the full LogQL query each time.
|
||||
|
||||
## Limitations
|
||||
|
||||
When using Loki with Grafana Alerting, be aware of the following limitations:
|
||||
|
||||
### Template variables not supported
|
||||
|
||||
Alert queries cannot contain template variables. Grafana evaluates alert rules on the backend without dashboard context, so variables like `$job` or `$namespace` are not resolved.
|
||||
|
||||
If your dashboard query uses template variables, create a separate query for alerting with hard-coded values.
|
||||
|
||||
### Log queries not supported
|
||||
|
||||
Queries that return log lines cannot be used for alerting. You must convert log queries to metric queries using aggregation functions like `rate()` or `count_over_time()`.
|
||||
|
||||
### Query time range
|
||||
|
||||
Alert queries use the evaluation interval to determine the time range, not the dashboard time picker. Ensure your metric function intervals (for example, `[5m]`) align with your alert evaluation frequency.
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these best practices when creating Loki alerts:
|
||||
|
||||
- **Use metric queries:** Always wrap log stream selectors with metric functions for alerting.
|
||||
- **Match intervals:** Align the LogQL time interval (for example, `[5m]`) with your alert evaluation interval.
|
||||
- **Be specific with selectors:** Use precise label selectors to reduce the amount of data scanned.
|
||||
- **Test queries first:** Verify your query returns expected numeric results in Explore before creating an alert.
|
||||
- **Use meaningful thresholds:** Base alert thresholds on historical patterns in your log data.
|
||||
- **Add context with labels:** Include relevant labels in your alert to help with triage.
|
||||
143
docs/sources/datasources/loki/annotations/index.md
Normal file
143
docs/sources/datasources/loki/annotations/index.md
Normal file
@@ -0,0 +1,143 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/loki/annotations/
|
||||
description: Use Loki log events as annotations in Grafana dashboards
|
||||
keywords:
|
||||
- grafana
|
||||
- loki
|
||||
- annotations
|
||||
- events
|
||||
- logs
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Annotations
|
||||
title: Loki annotations
|
||||
weight: 400
|
||||
refs:
|
||||
annotate-visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
configure-loki:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
---
|
||||
|
||||
# Loki annotations
|
||||
|
||||
Annotations overlay event data on your dashboard graphs, helping you correlate log events with metrics. You can use Loki as a data source for annotations to display events such as deployments, errors, or other significant occurrences on your visualizations.
|
||||
|
||||
For general information about annotations, refer to [Annotate visualizations](ref:annotate-visualizations).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before creating Loki annotations, ensure you have:
|
||||
|
||||
- A [Loki data source configured](ref:configure-loki) in Grafana.
|
||||
- Logs in Loki containing the events you want to display as annotations.
|
||||
- Read access to the Loki logs you want to query.
|
||||
|
||||
## Create an annotation query
|
||||
|
||||
To add a Loki annotation to your dashboard:
|
||||
|
||||
1. Navigate to your dashboard and click **Dashboard settings** (gear icon).
|
||||
1. Select **Annotations** in the left menu.
|
||||
1. Click **Add annotation query**.
|
||||
1. Enter a **Name** for the annotation.
|
||||
1. Select your **Loki** data source from the **Data source** dropdown.
|
||||
1. Enter a LogQL query in the query field.
|
||||
1. Configure the optional formatting fields (Title, Tags, Text).
|
||||
1. Click **Save dashboard**.
|
||||
|
||||
## Query
|
||||
|
||||
Use the query field to enter a LogQL expression that filters the log events to display as annotations. Only log queries are supported for annotations; metric queries are not supported.
|
||||
|
||||
**Examples:**
|
||||
|
||||
| Query | Description |
|
||||
| ----------------------------------------- | ------------------------------------------------- |
|
||||
| `{job="app"}` | Shows all logs from the "app" job. |
|
||||
| `{job="app"} \|= "error"` | Shows logs containing "error" from the "app" job. |
|
||||
| `{namespace="production"} \|= "deployed"` | Shows deployment events in production. |
|
||||
| `{job="app"} \| logfmt \| level="error"` | Shows error-level logs using logfmt parsing. |
|
||||
| `{job="$job"}` | Uses a template variable to filter by job. |
|
||||
|
||||
You can use template variables in your annotation queries to make them dynamic based on dashboard selections.
|
||||
|
||||
## Formatting options
|
||||
|
||||
Loki annotations support optional formatting fields to customize how annotations are displayed.
|
||||
|
||||
### Title
|
||||
|
||||
The **Title** field specifies a pattern for the annotation title. You can use label values by wrapping the label name in double curly braces.
|
||||
|
||||
- **Default:** Empty (uses the log line as the title)
|
||||
- **Pattern example:** `{{instance}}` displays the value of the `instance` label
|
||||
- **Pattern example:** `{{job}} - {{level}}` combines multiple labels
|
||||
|
||||
### Tags
|
||||
|
||||
The **Tags** field specifies which labels to use as annotation tags. Enter label names as a comma-separated list.
|
||||
|
||||
- **Default:** All labels are used as tags
|
||||
- **Example:** `job,instance,level` uses only these three labels as tags
|
||||
|
||||
Tags help categorize and filter annotations in the dashboard.
|
||||
|
||||
### Text
|
||||
|
||||
The **Text** field specifies a pattern for the annotation text displayed when you hover over the annotation. You can use label values by wrapping the label name in double curly braces.
|
||||
|
||||
- **Default:** The log line content
|
||||
- **Pattern example:** `{{message}}` displays the value of a parsed `message` label
|
||||
- **Pattern example:** `Error on {{instance}}: {{error}}` creates a descriptive message
|
||||
|
||||
### Line limit
|
||||
|
||||
The **Line limit** field controls the maximum number of log lines returned for annotations. This helps prevent performance issues when querying logs with many results.
|
||||
|
||||
- **Default:** Uses the data source's configured maximum lines setting
|
||||
|
||||
## Example: Deployment annotations
|
||||
|
||||
To display deployment events as annotations:
|
||||
|
||||
1. Create an annotation query with the following settings:
|
||||
- **Query:** `{job="deploy-service"} |= "deployed"`
|
||||
- **Title:** `Deployment: {{app}}`
|
||||
- **Tags:** `app,environment`
|
||||
- **Text:** `{{message}}`
|
||||
|
||||
This configuration displays deployment logs with the application name in the title and environment as a tag.
|
||||
|
||||
## Example: Error annotations
|
||||
|
||||
To overlay error events on your metrics graphs:
|
||||
|
||||
1. Create an annotation query with the following settings:
|
||||
- **Query:** `{namespace="production"} | logfmt | level="error"`
|
||||
- **Title:** `{{job}} error`
|
||||
- **Tags:** `job,instance`
|
||||
|
||||
This configuration displays error logs from production, grouped by job and instance.
|
||||
|
||||
## Example: Filter annotations with template variables
|
||||
|
||||
To create dynamic annotations that respond to dashboard variable selections:
|
||||
|
||||
1. Create a template variable named `job` that queries Loki label values.
|
||||
1. Create an annotation query with the following settings:
|
||||
- **Query:** `{job="$job"} |= "alert"`
|
||||
- **Title:** `Alert: {{alertname}}`
|
||||
- **Tags:** `severity`
|
||||
|
||||
This configuration displays only alerts for the selected job, making the annotations relevant to the current dashboard context.
|
||||
@@ -1,146 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../data-sources/loki/
|
||||
- ../features/datasources/loki/
|
||||
description: Configure the Loki data source
|
||||
keywords:
|
||||
- grafana
|
||||
- loki
|
||||
- logging
|
||||
- guide
|
||||
- data source
|
||||
menuTitle: Configure Loki
|
||||
title: Configure the Loki data source
|
||||
weight: 200
|
||||
refs:
|
||||
log-details:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#labels-and-detected-fields
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#labels-and-detected-fields
|
||||
---
|
||||
|
||||
# Loki data source
|
||||
|
||||
Grafana ships with built-in support for [Loki](/docs/loki/latest/), an open-source log aggregation system by Grafana Labs. If you are new to Loki the following documentation will help you get started:
|
||||
|
||||
- [Getting started](/docs/loki/latest/get-started/)
|
||||
- [Best practices](/docs/loki/latest/best-practices/#best-practices)
|
||||
|
||||
## Configure the Loki data source
|
||||
|
||||
To add the Loki data source, complete the following steps:
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Under **Connections**, click **Add new connection**.
|
||||
1. Enter `Loki` in the search bar.
|
||||
1. Select **Loki data source**.
|
||||
1. Click **Create a Loki data source** in the upper right.
|
||||
|
||||
You will be taken to the **Settings** tab where you will set up your Loki configuration.
|
||||
|
||||
## Configuration options
|
||||
|
||||
The following is a list of configuration options for Loki.
|
||||
|
||||
The first option to configure is the name of your connection:
|
||||
|
||||
- **Name** - The data source name. This is how you refer to the data source in panels and queries. Examples: loki-1, loki_logs.
|
||||
|
||||
- **Default** - Toggle to select as the default name in dashboard panels. When you go to a dashboard panel this will be the default selected data source.
|
||||
|
||||
### HTTP section
|
||||
|
||||
- **URL** - The URL of your Loki server. Loki uses port 3100. If your Loki server is local, use `http://localhost:3100`. If it is on a server within a network, this is the URL with port where you are running Loki. Example: `http://loki.example.orgname:3100`.
|
||||
|
||||
- **Allowed cookies** - Specify cookies by name that should be forwarded to the data source. The Grafana proxy deletes all forwarded cookies by default.
|
||||
|
||||
- **Timeout** - The HTTP request timeout. This must be in seconds. There is no default, so this setting is up to you.
|
||||
|
||||
### Auth section
|
||||
|
||||
There are several authentication methods you can choose in the Authentication section.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Use TLS (Transport Layer Security) for an additional layer of security when working with Loki. For information on setting up TLS encryption with Loki see [Grafana Loki configuration parameters](/docs/loki/latest/configuration/).
|
||||
{{< /admonition >}}
|
||||
|
||||
- **Basic authentication** - The most common authentication method. Use your `data source` user name and `data source` password to connect.
|
||||
|
||||
- **With credentials** - Toggle on to enable credentials such as cookies or auth headers to be sent with cross-site requests.
|
||||
|
||||
- **TLS client authentication** - Toggle on to use client authentication. When enabled, add the `Server name`, `Client cert` and `Client key`. The client provides a certificate that is validated by the server to establish the client's trusted identity. The client key encrypts the data between client and server.
|
||||
|
||||
- **With CA cert** - Authenticate with a CA certificate. Follow the instructions of the CA (Certificate Authority) to download the certificate file.
|
||||
|
||||
- **Skip TLS verify** - Toggle on to bypass TLS certificate validation.
|
||||
|
||||
- **Forward OAuth identity** - Forward the OAuth access token (and also the OIDC ID token if available) of the user querying the data source.
|
||||
|
||||
### Custom HTTP headers
|
||||
|
||||
- **Header** - Add a custom header. This allows custom headers to be passed based on the needs of your Loki instance.
|
||||
|
||||
- **Value** - The value of the header.
|
||||
|
||||
### Alerting
|
||||
|
||||
- **Manage alert rules in Alerting UI** - Toggle on to manage alert rules for the Loki data source. To manage other alerting resources add an `Alertmanager` data source.
|
||||
|
||||
### Queries
|
||||
|
||||
- **Maximum lines** - Sets the maximum number of log lines returned by Loki. Increase the limit to have a bigger results set for ad-hoc analysis. Decrease the limit if your browser is sluggish when displaying log results. The default is `1000`.
|
||||
|
||||
<!-- {{< admonition type="note" >}}
|
||||
To troubleshoot configuration and other issues, check the log file located at `/var/log/grafana/grafana.log` on Unix systems, or in `<grafana_install_dir>/data/log` on other platforms and manual installations.
|
||||
{{< /admonition >}} -->
|
||||
|
||||
### Derived fields
|
||||
|
||||
Derived Fields are used to extract new fields from your logs and create a link from the value of the field.
|
||||
|
||||
For example, you can link to your tracing backend directly from your logs, or link to a user profile page if the log line contains a corresponding `userId`.
|
||||
These links appear in the [log details](ref:log-details).
|
||||
|
||||
You can add multiple derived fields.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
If you use Grafana Cloud, you can request modifications to this feature by clicking **Open a Support Ticket** from the Grafana Cloud Portal.
|
||||
{{< /admonition >}}
|
||||
|
||||
Each derived field consists of the following:
|
||||
|
||||
- **Name** - Sets the field name. Displayed as a label in the log details.
|
||||
|
||||
- **Type** - Defines the type of the derived field. It can be either:
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
Using complex regular expressions in either type can impact browser performance when processing large volumes of logs. Consider using simpler patterns when possible.
|
||||
{{< /admonition >}}
|
||||
|
||||
- **Regex**: A regular expression to parse a part of the log message and capture it as the value of the new field. Can contain only one capture group.
|
||||
|
||||
- **Label**: A label from the selected log line. This can be any type of label - indexed, parsed or structured metadata. When using this type, the input will match as a regular expression against label keys, allowing you to match variations like `traceid` and `trace_id` with a single regex pattern like `trace[_]?id`. The value of the matched label will be used as the value of the derived field.
|
||||
|
||||
- **URL/query** Sets the full link URL if the link is external, or a query for the target data source if the link is internal. You can interpolate the value from the field with the `${__value.raw}` macro.
|
||||
|
||||
- **URL Label** - Sets a custom display label for the link. This setting overrides the link label, which defaults to the full external URL or name of the linked internal data source.
|
||||
|
||||
- **Internal link** - Toggle on to define an internal link. For internal links, you can select the target data source from a selector. This supports only tracing data sources.
|
||||
|
||||
- **Open in new tab** - Toggle on to open the link in a new tab or window.
|
||||
|
||||
- **Show example log message** - Click to paste an example log line to test the regular expression of your derived fields.
|
||||
|
||||
Click **Save & test** to test your connection.
|
||||
|
||||
#### Troubleshoot interpolation
|
||||
|
||||
You can use a debug section to see what your fields extract and how the URL is interpolated.
|
||||
Select **Show example log message** to display a text area where you can enter a log message.
|
||||
|
||||
{{< figure src="/static/img/docs/v75/loki_derived_fields_settings.png" class="docs-image--no-shadow" max-width="800px" caption="Screenshot of the derived fields debugging" >}}
|
||||
|
||||
The new field with the link shown in log details:
|
||||
|
||||
{{< figure src="/static/img/docs/explore/data-link-9-4.png" max-width="800px" caption="Data link in Explore" >}}
|
||||
363
docs/sources/datasources/loki/configure/index.md
Normal file
363
docs/sources/datasources/loki/configure/index.md
Normal file
@@ -0,0 +1,363 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/loki/configure/
|
||||
description: Configure the Loki data source
|
||||
keywords:
|
||||
- grafana
|
||||
- loki
|
||||
- logging
|
||||
- guide
|
||||
- data source
|
||||
menuTitle: Configure
|
||||
title: Configure the Loki data source
|
||||
weight: 200
|
||||
refs:
|
||||
log-details:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#labels-and-detected-fields
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#labels-and-detected-fields
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
configure-pdc:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
data-source-management:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
loki-query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/query-editor/
|
||||
loki-template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/template-variables/
|
||||
---
|
||||
|
||||
# Configure the Loki data source
|
||||
|
||||
This document provides instructions for configuring the Loki data source and explains available configuration options. For general information about data sources, refer to [Data source management](ref:data-source-management).
|
||||
|
||||
Grafana ships with built-in support for [Loki](https://grafana.com/docs/loki/latest/), an open-source log aggregation system by Grafana Labs. If you are new to Loki, the following documentation will help you get started:
|
||||
|
||||
- [Getting started](https://grafana.com/docs/loki/latest/get-started/)
|
||||
- [Best practices](https://grafana.com/docs/loki/latest/best-practices/#best-practices)
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before configuring the Loki data source, ensure you have the following:
|
||||
|
||||
- **Grafana permissions:** You must have the `Organization administrator` role to configure data sources. Organization administrators can also [configure the data source via YAML](#provision-the-data-source) with the Grafana provisioning system or [using Terraform](#provision-the-data-source-using-terraform).
|
||||
|
||||
- **Loki instance:** You need a running Loki instance and its URL. If you don't have one, refer to the [Loki installation documentation](https://grafana.com/docs/loki/latest/setup/install/).
|
||||
|
||||
- **Authentication details (if applicable):** If your Loki instance requires authentication, gather the necessary credentials such as username and password for basic authentication, or any required certificates for TLS authentication.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The Loki data source plugin is built into Grafana. No additional installation is required.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Add the Loki data source
|
||||
|
||||
To add the Loki data source, complete the following steps:
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Under **Connections**, click **Add new connection**.
|
||||
1. Enter `Loki` in the search bar.
|
||||
1. Select **Loki data source**.
|
||||
1. Click **Create a Loki data source** in the upper right.
|
||||
|
||||
You are taken to the **Settings** tab where you will set up your Loki configuration.
|
||||
|
||||
## Configure Loki using the UI
|
||||
|
||||
The following are the configuration options for Loki.
|
||||
|
||||
| Name | Description |
|
||||
| ----------- | ---------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Name** | The data source name. This is how you refer to the data source in panels and queries. Examples: `loki-1`, `loki_logs`. |
|
||||
| **Default** | Toggle to set this data source as the default. When enabled, new panels automatically use this data source. |
|
||||
|
||||
### Connection section
|
||||
|
||||
| Name | Description |
|
||||
| ------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **URL** | The URL of your Loki server, including the port. The default Loki port is `3100`. Examples: `http://localhost:3100`, `http://loki.example.org:3100`. |
|
||||
|
||||
### Authentication section
|
||||
|
||||
Select an authentication method from the **Authentication** dropdown.
|
||||
|
||||
| Setting | Description |
|
||||
| -------------------------- | ---------------------------------------------------------------------------------------------------------- |
|
||||
| **No authentication** | No authentication is required to access the data source. |
|
||||
| **Basic authentication** | Authenticate using a username and password. Enter the credentials in the **User** and **Password** fields. |
|
||||
| **Forward OAuth identity** | Forward the OAuth access token (and the OIDC ID token if available) of the user querying the data source. |
|
||||
|
||||
### TLS settings
|
||||
|
||||
Use TLS (Transport Layer Security) for an additional layer of security when working with Loki. For more information on setting up TLS encryption with Loki, refer to [Grafana Loki configuration parameters](https://grafana.com/docs/loki/latest/configuration/).
|
||||
|
||||
| Setting | Description |
|
||||
| ----------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Add self-signed certificate** | Enable to add a self-signed CA certificate. When enabled, enter the certificate in the **CA Certificate** field. The certificate must begin with `-----BEGIN CERTIFICATE-----`. |
|
||||
| **TLS Client Authentication** | Enable to use client certificate authentication. When enabled, enter the **ServerName** (for example, `domain.example.com`), **Client Certificate** (begins with `-----BEGIN CERTIFICATE-----`), and **Client Key** (begins with `-----BEGIN RSA PRIVATE KEY-----`). |
|
||||
| **Skip TLS certificate validation** | Enable to bypass TLS certificate validation. Use this option only for testing or when connecting to Loki instances with self-signed certificates. |
|
||||
|
||||
### HTTP headers
|
||||
|
||||
Use HTTP headers to pass along additional context and metadata about the request/response.
|
||||
|
||||
| Setting | Description |
|
||||
| ---------- | -------------------------------------------------------------- |
|
||||
| **Header** | The name of the custom header. For example, `X-Custom-Header`. |
|
||||
| **Value** | The value of the custom header. For example, `Header value`. |
|
||||
|
||||
Click **+ Add another header** to add additional headers.
|
||||
|
||||
## Additional settings
|
||||
|
||||
Additional settings are optional settings that you can configure for more control over your data source.
|
||||
|
||||
### Advanced HTTP settings
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Allowed cookies** | Specify cookies by name that should be forwarded to the data source. The Grafana proxy deletes all forwarded cookies by default. |
|
||||
| **Timeout** | The HTTP request timeout in seconds. If not set, the default Grafana timeout is used. |
|
||||
|
||||
### Alerting
|
||||
|
||||
Manage alert rules for the Loki data source. For more information, refer to [Alerting](ref:alerting).
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------------------------- | ---------------------------------------------------------------------------------- |
|
||||
| **Manage alert rules in Alerting UI** | Toggle to manage alert rules for this Loki data source in the Grafana Alerting UI. |
|
||||
|
||||
### Queries
|
||||
|
||||
Configure options to customize your querying experience.
|
||||
|
||||
| Setting | Description |
|
||||
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Maximum lines** | The maximum number of log lines returned by Loki. The default is `1000`. Increase for larger result sets during ad-hoc analysis. Decrease if your browser is sluggish when displaying log results. |
|
||||
|
||||
### Derived fields
|
||||
|
||||
Derived fields can be used to extract new fields from a log message and create a link from its value. For example, you can link to your tracing backend directly from your logs. These links appear in the [log details](ref:log-details).
|
||||
|
||||
Click **+ Add** to add a derived field. Each derived field has the following settings:
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Name** | The field name. Displayed as a label in the log details. |
|
||||
| **Type** | The type of derived field. Select **Regex in log line** to extract values using a regular expression, or **Label** to use an existing label value. |
|
||||
| **Regex** | A regular expression to parse a part of the log message and capture it as the value of the new field. Can contain only one capture group. |
|
||||
| **URL** | The full link URL if the link is external, or a query for the target data source if the link is internal. You can interpolate the value from the field with the `${__value.raw}` macro. For example, `http://example.com/${__value.raw}`. |
|
||||
| **URL Label** | A custom display label for the link. This setting overrides the link label, which defaults to the full external URL or name of the linked internal data source. |
|
||||
| **Internal link** | Toggle to define an internal link. When enabled, you can select the target data source from a selector. This supports only tracing data sources. |
|
||||
| **Open in new tab** | Toggle to open the link in a new browser tab or window. |
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
Using complex regular expressions can impact browser performance when processing large volumes of logs. Consider using simpler patterns when possible.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Test derived fields
|
||||
|
||||
To test your derived field configuration:
|
||||
|
||||
1. Click **Show example log message** to display the debug section.
|
||||
1. In the **Debug log message** field, paste an example log line to test the regular expressions of your derived fields.
|
||||
1. Verify that the field extracts the expected value and the URL is interpolated correctly.
|
||||
|
||||
### Private data source connect
|
||||
|
||||
_Only for Grafana Cloud users._
|
||||
|
||||
Private data source connect, or PDC, allows you to establish a private, secured connection between a Grafana Cloud instance, or stack, and data sources secured within a private network. Click the drop-down to locate the URL for PDC. For more information regarding Grafana PDC, refer to [Private data source connect (PDC)](ref:private-data-source-connect) and [Configure Grafana private data source connect (PDC)](ref:configure-pdc) for instructions on setting up a PDC connection.
|
||||
|
||||
Click **Manage private data source connect** to open your PDC connection page and view your configuration details.
|
||||
|
||||
## Verify the connection
|
||||
|
||||
After configuring the data source, click **Save & test** to save your settings and verify the connection. A successful connection displays the following message:
|
||||
|
||||
**Data source successfully connected.**
|
||||
|
||||
If the test fails, verify:
|
||||
|
||||
- The Loki URL is correct and accessible from the Grafana server.
|
||||
- Any required authentication credentials are correct.
|
||||
- Network connectivity and firewall rules allow the connection.
|
||||
- TLS certificates are valid (if using HTTPS).
|
||||
|
||||
## Provision the data source
|
||||
|
||||
You can define and configure the data source in YAML files as part of the Grafana provisioning system.
|
||||
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
|
||||
### Provisioning examples
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://localhost:3100
|
||||
jsonData:
|
||||
timeout: 60
|
||||
maxLines: 1000
|
||||
```
|
||||
|
||||
**Using basic authorization and a derived field:**
|
||||
|
||||
You must escape the dollar (`$`) character in YAML values because it can be used to interpolate environment variables:
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://localhost:3100
|
||||
basicAuth: true
|
||||
basicAuthUser: my_user
|
||||
jsonData:
|
||||
maxLines: 1000
|
||||
derivedFields:
|
||||
# Field with internal link pointing to data source in Grafana.
|
||||
# datasourceUid value can be anything, but it should be unique across all defined data source uids.
|
||||
- datasourceUid: my_jaeger_uid
|
||||
matcherRegex: "traceID=(\\w+)"
|
||||
name: TraceID
|
||||
# url will be interpreted as query for the datasource
|
||||
url: '$${__value.raw}'
|
||||
# optional for URL Label to set a custom display label for the link.
|
||||
urlDisplayLabel: 'View Trace'
|
||||
|
||||
# Field with external link.
|
||||
- matcherRegex: "traceID=(\\w+)"
|
||||
name: TraceID
|
||||
url: 'http://localhost:16686/trace/$${__value.raw}'
|
||||
secureJsonData:
|
||||
basicAuthPassword: test_password
|
||||
```
|
||||
|
||||
**Using a Jaeger data source:**
|
||||
|
||||
In this example, the Jaeger data source's `uid` value should match the Loki data source's `datasourceUid` value.
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Jaeger
|
||||
type: jaeger
|
||||
url: http://jaeger-tracing-query:16686/
|
||||
access: proxy
|
||||
# UID should match the datasourceUid in derivedFields.
|
||||
uid: my_jaeger_uid
|
||||
```
|
||||
|
||||
## Provision the data source using Terraform
|
||||
|
||||
You can provision the Loki data source using [Terraform](https://www.terraform.io/) with the [Grafana Terraform provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs).
|
||||
|
||||
For more information about provisioning resources with Terraform, refer to the [Grafana as code using Terraform](https://grafana.com/docs/grafana-cloud/developer-resources/infrastructure-as-code/terraform/) documentation.
|
||||
|
||||
### Basic Terraform example
|
||||
|
||||
The following example creates a basic Loki data source:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "loki" {
|
||||
name = "Loki"
|
||||
type = "loki"
|
||||
url = "http://localhost:3100"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
maxLines = 1000
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Terraform example with derived fields
|
||||
|
||||
The following example creates a Loki data source with a derived field that links to a Jaeger data source for trace correlation:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "loki_with_tracing" {
|
||||
name = "Loki"
|
||||
type = "loki"
|
||||
url = "http://localhost:3100"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
maxLines = 1000
|
||||
derivedFields = [
|
||||
{
|
||||
datasourceUid = grafana_data_source.jaeger.uid
|
||||
matcherRegex = "traceID=(\\w+)"
|
||||
name = "TraceID"
|
||||
url = "$${__value.raw}"
|
||||
urlDisplayLabel = "View Trace"
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Terraform example with basic authentication
|
||||
|
||||
The following example includes basic authentication:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "loki_auth" {
|
||||
name = "Loki"
|
||||
type = "loki"
|
||||
url = "http://localhost:3100"
|
||||
|
||||
basic_auth_enabled = true
|
||||
basic_auth_username = "loki_user"
|
||||
|
||||
secure_json_data_encoded = jsonencode({
|
||||
basicAuthPassword = var.loki_password
|
||||
})
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
maxLines = 1000
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
For all available configuration options, refer to the [Grafana provider data source resource documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source).
|
||||
|
||||
## Next steps
|
||||
|
||||
After configuring your Loki data source, explore these resources:
|
||||
|
||||
- [Query the Loki data source](ref:loki-query-editor) to learn how to build LogQL queries in Grafana
|
||||
- [Use template variables](ref:loki-template-variables) to create dynamic, reusable dashboards
|
||||
- [LogQL documentation](https://grafana.com/docs/loki/latest/query/) to learn more about the Loki query language
|
||||
@@ -16,11 +16,6 @@ menuTitle: Query editor
|
||||
title: Loki query editor
|
||||
weight: 300
|
||||
refs:
|
||||
annotate-visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
logs:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/logs/
|
||||
@@ -36,231 +31,247 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/template-variables/
|
||||
configure-loki:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
loki-troubleshooting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/troubleshooting/
|
||||
---
|
||||
|
||||
# Loki query editor
|
||||
|
||||
The Loki data source's query editor helps you create [log](#create-a-log-query) and [metric](#create-a-metric-query) queries that use Loki's query language, [LogQL](/docs/loki/latest/logql/).
|
||||
The Loki data source query editor helps you create [log](#create-a-log-query) and [metric](#create-a-metric-query) queries using [LogQL](https://grafana.com/docs/loki/latest/logql/), Loki's query language.
|
||||
|
||||
You can query and display log data from Loki in [Explore](ref:explore) and in dashboards using the [Logs panel](ref:logs).
|
||||
|
||||
For general documentation on querying data sources in Grafana, refer to [Query and transform data](ref:query-transform-data).
|
||||
|
||||
## Before you begin
|
||||
|
||||
- [Configure the Loki data source](ref:configure-loki).
|
||||
- Familiarize yourself with [LogQL](https://grafana.com/docs/loki/latest/logql/).
|
||||
|
||||
## Choose a query editing mode
|
||||
|
||||
The Loki query editor has two modes:
|
||||
|
||||
- [Builder mode](#builder-mode), which provides a visual query designer.
|
||||
- [Code mode](#code-mode), which provides a feature-rich editor for writing queries.
|
||||
- **Builder mode** - Build queries using a visual interface without manually entering LogQL. Best for users new to Loki and LogQL.
|
||||
- **Code mode** - Write queries using a text editor with autocompletion, syntax highlighting, and query validation.
|
||||
|
||||
To switch between the editor modes, select the corresponding **Builder** and **Code** tabs.
|
||||
To switch between modes, select the **Builder** or **Code** tab at the top of the editor.
|
||||
|
||||
To run a query, select **Run queries** located at the top of the editor.
|
||||
Both modes are synchronized, so you can switch between them without losing your work. However, Builder mode doesn't support some complex queries. When switching from Code mode to Builder mode with an unsupported query, the editor displays a warning explaining which parts of the query might be lost.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To run Loki queries in [Explore](ref:explore), select **Run query**.
|
||||
{{< /admonition >}}
|
||||
## Toolbar features
|
||||
|
||||
Each mode is synchronized, so you can switch between them without losing your work, although there are some limitations. Builder mode doesn't support some complex queries.
|
||||
When you switch from Code mode to Builder mode with such a query, the editor displays a warning message that explains how you might lose parts of the query if you continue.
|
||||
You can then decide whether you still want to switch to Builder mode.
|
||||
The query editor toolbar provides features available in both Builder and Code mode.
|
||||
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
### Kick start your query
|
||||
|
||||
## Toolbar elements
|
||||
|
||||
The query editor toolbar contains the following elements:
|
||||
|
||||
- **Kick start your query** - Click to see a list of queries that help you quickly get started creating LogQL queries. You can then continue to complete your query.
|
||||
|
||||
These include:
|
||||
Click **Kick start your query** to see a list of example queries that help you get started quickly. These include:
|
||||
|
||||
- Log query starters
|
||||
- Metric query starters
|
||||
|
||||
Click the arrow next to each to see available query options.
|
||||
Click the arrow next to each category to see available query templates. Selecting a template populates the query editor with a starting query you can customize.
|
||||
|
||||
- **Label browser** - Use the Loki label browser to navigate through your labels and values, and build queries.
|
||||
### Label browser
|
||||
|
||||
To navigate Loki and build a query:
|
||||
Use the label browser to explore available labels and values in your Loki instance:
|
||||
|
||||
1. Choose labels to locate.
|
||||
1. Search for the values of your selected labels.
|
||||
1. Click **Label browser** in the toolbar.
|
||||
1. Select labels to filter.
|
||||
1. Search for values using the search field, which supports fuzzy matching.
|
||||
|
||||
The search field supports fuzzy search, and the label browser also supports faceting to list only possible label combinations.
|
||||
The label browser supports faceting to show only valid label combinations.
|
||||
|
||||
1. Select the **Show logs** button to display log lines based on the selected labels, or select the **Show logs rate** button to show the rate based on metrics such as requests per second. Additionally, you can validate the selector by clicking the **Validate selector** button. Click **Clear** to start from the beginning.
|
||||
Click **Show logs** to display log lines based on the selected labels, or **Show logs rate** to show a rate metric. Use **Validate selector** to check your selection, or **Clear** to start over.
|
||||
|
||||
{{< figure src="/static/img/docs/explore/Loki_label_browser.png" class="docs-image--no-shadow" max-width="800px" caption="The Loki label browser" >}}
|
||||
|
||||
- **Explain query** - Toggle to display a step-by-step explanation of all query components and operations.
|
||||
### Explain query
|
||||
|
||||
{{< figure src="/static/img/docs/prometheus/explain-results.png" max-width="500px" class="docs-image--no-shadow" caption="Explain results" >}}
|
||||
Toggle **Explain query** to display a step-by-step explanation of all query components and operations. This helps you understand how your query works and learn LogQL syntax.
|
||||
|
||||
- **Builder/Code** - Click the corresponding **Builder** or **Code** tab on the toolbar to select an editor mode.
|
||||
{{< figure src="/static/img/docs/prometheus/explain-results.png" max-width="500px" class="docs-image--no-shadow" caption="Explain query results" >}}
|
||||
|
||||
## Builder mode
|
||||
## Build a query in Builder mode
|
||||
|
||||
Builder mode helps you build queries using a visual interface without needing to manually enter LogQL. This option is best for users who have limited or no previous experience working with Loki and LogQL.
|
||||
Builder mode provides a visual interface for constructing LogQL queries without writing code.
|
||||
|
||||
### Label filters
|
||||
### Select labels
|
||||
|
||||
Select labels and their values from the dropdown list.
|
||||
When you select a label, Grafana retrieves available values from the server.
|
||||
Start by selecting labels to filter your log streams:
|
||||
|
||||
Use the `+` button to add a label and the `x` button to remove a label. You can add multiple labels.
|
||||
1. Select a label from the **Label** dropdown.
|
||||
1. Choose a comparison operator:
|
||||
- `=` - equals
|
||||
- `!=` - does not equal
|
||||
- `=~` - matches regex
|
||||
- `!~` - does not match regex
|
||||
1. Select a value from the **Value** dropdown, which displays available values for the selected label.
|
||||
|
||||
Select comparison operators from the following options:
|
||||
Use the `+` button to add additional label filters and the `x` button to remove them.
|
||||
|
||||
- `=` - equal to
|
||||
- `!=` - is not equal
|
||||
- `=~` - matches regex
|
||||
- `!~` - does not match regex
|
||||
### Add operations
|
||||
|
||||
Select values by using the dropdown, which displays all possible values based on the label selected.
|
||||
Select the **+ Operations** button to add operations to your query. The query editor groups operations into the following categories:
|
||||
|
||||
### Operations
|
||||
- **Aggregations** - refer to [Built-in aggregation operators](https://grafana.com/docs/loki/latest/logql/metric_queries/#built-in-aggregation-operators)
|
||||
- **Range functions** - refer to [Range Vector aggregation](https://grafana.com/docs/loki/latest/logql/metric_queries/#range-vector-aggregation)
|
||||
- **Formats** - refer to [Log queries](https://grafana.com/docs/loki/latest/logql/log_queries/#log-queries)
|
||||
- **Binary operations** - refer to [Binary operators](https://grafana.com/docs/loki/latest/logql/#binary-operators)
|
||||
- **Label filters** - refer to [Label filter expression](https://grafana.com/docs/loki/latest/logql/log_queries/#label-filter-expression)
|
||||
- **Line filters** - refer to [Line filter expression](https://grafana.com/docs/loki/latest/logql/log_queries/#line-filter-expression)
|
||||
|
||||
Select the `+ Operations` button to add operations to your query.
|
||||
The query editor groups operations into related sections, and you can type while the operations dropdown is open to search and filter the list.
|
||||
You can type while the operations dropdown is open to search and filter the list.
|
||||
|
||||
The query editor displays a query's operations as boxes in the operations section.
|
||||
Each operation's header displays its name, and additional action buttons appear when you hover your cursor over the header:
|
||||
Each operation appears as a box in the query editor. Hover over an operation's header to reveal action buttons:
|
||||
|
||||
| Button | Action |
|
||||
| ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
|
||||
| {{< figure src="/static/img/docs/v95/loki_operation_replace.png" class="docs-image--no-shadow" max-width="30px" >}} | Replaces the operation with different operation of the same type. |
|
||||
| {{< figure src="/static/img/docs/v95/loki_operation_description.png" class="docs-image--no-shadow" max-width="30px" >}} | Opens the operation's description tooltip. |
|
||||
| {{< figure src="/static/img/docs/v95/loki_operation_remove.png" class="docs-image--no-shadow" max-width="30px" >}} | Removes the operation. |
|
||||
| Button | Action |
|
||||
| ----------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------ |
|
||||
| {{< figure src="/static/img/docs/v95/loki_operation_replace.png" class="docs-image--no-shadow" max-width="30px" >}} | Replace the operation with a different operation of the same type. |
|
||||
| {{< figure src="/static/img/docs/v95/loki_operation_description.png" class="docs-image--no-shadow" max-width="30px" >}} | Open the operation's description tooltip. |
|
||||
| {{< figure src="/static/img/docs/v95/loki_operation_remove.png" class="docs-image--no-shadow" max-width="30px" >}} | Remove the operation. |
|
||||
|
||||
The query editor groups operations into the following sections:
|
||||
Some operations only make sense in a specific order. If adding an operation would result in an invalid query, the editor automatically places it in the correct position. To re-order operations manually, drag the operation box by its name and drop it in the desired location.
|
||||
|
||||
- Aggregations - see [Built-in aggregation operators](/docs/loki/latest/logql/metric_queries/#built-in-aggregation-operators)
|
||||
- Range functions - see [Range Vector aggregation](/docs/loki/latest/logql/metric_queries/#range-vector-aggregation)
|
||||
- Formats - see [Log queries](/docs/loki/latest/logql/log_queries/#log-queries)
|
||||
- Binary operations - see [Binary operators](/docs/loki/latest/logql/#binary-operators)
|
||||
- Label filters - see [Label filter expression](/docs/loki/latest/logql/log_queries/#label-filter-expression)
|
||||
- Line filters - see [Line filter expression](/docs/loki/latest/logql/log_queries/#label-filter-expression)
|
||||
For more information, refer to [Order of operations](https://grafana.com/docs/loki/latest/logql/#order-of-operations).
|
||||
|
||||
Some operations make sense only when used in a specific order. If adding an operation would result in nonsensical query, the query editor adds the operation to the correct place.
|
||||
To re-order operations manually, drag the operation box by its name and drop it into the desired place. For additional information see [Order of operations](/docs/loki/latest/logql/#order-of-operations).
|
||||
### Query preview
|
||||
|
||||
As you build your query, the editor displays a visual preview of the query structure. Each step is numbered and includes a description:
|
||||
|
||||
- **Step 1** typically shows your label selector (for example, `{}` with "Fetch all log lines matching label filters")
|
||||
- **Subsequent steps** show operations you've added (for example, `|= ""` with "Return log lines that contain string")
|
||||
|
||||
The raw LogQL query is displayed at the bottom of the query editor, showing the complete syntax that will be executed.
|
||||
|
||||
### Hints
|
||||
|
||||
In same cases the query editor can detect which operations would be most appropriate for a selected log stream. In such cases it will show a hint next to the `+ Operations` button. Click on the hint to add the operations to your query.
|
||||
The query editor can detect which operations would be most appropriate for a selected log stream. When available, a hint appears next to the **+ Operations** button. Click the hint to add the suggested operations to your query.
|
||||
|
||||
## Code mode
|
||||
## Write a query in Code mode
|
||||
|
||||
In **Code mode**, you can write complex queries using a text editor with autocompletion feature, syntax highlighting, and query validation.
|
||||
It also contains a [label browser](#label-browser) to further help you write queries.
|
||||
Code mode provides a text editor for writing LogQL queries directly. This mode is ideal for complex queries or users familiar with LogQL syntax.
|
||||
|
||||
For more information about Loki's query language, refer to the [Loki documentation](/docs/loki/latest/logql/).
|
||||
### Autocompletion
|
||||
|
||||
### Use autocompletion
|
||||
Autocompletion works automatically as you type. The editor can autocomplete:
|
||||
|
||||
Code mode's autocompletion feature works automatically while typing.
|
||||
- Static functions, aggregations, and keywords
|
||||
- Dynamic items like labels and label values
|
||||
|
||||
The query editor can autocomplete static functions, aggregations, and keywords, and also dynamic items like labels.
|
||||
The autocompletion dropdown includes documentation for the suggested items where available.
|
||||
The autocompletion dropdown includes documentation for suggested items where available.
|
||||
|
||||
## Options
|
||||
## Configure query options
|
||||
|
||||
The following options are the same for both **Builder** and **Code** mode:
|
||||
The following options are available in both Builder and Code mode. Expand the **Options** section to configure them.
|
||||
|
||||
- **Legend** - Controls the time series name, using a name or pattern. For example, `{{hostname}}` is replaced with the label value for the label `hostname`.
|
||||
| Option | Description |
|
||||
| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Legend** | Controls the time series name using a name or pattern. For example, `{{hostname}}` is replaced with the label value for the label `hostname`. |
|
||||
| **Type** | Selects the query type. `instant` queries a single point in time (uses the "To" time from the time range). `range` queries over the selected time range. |
|
||||
| **Line limit** | Defines the maximum number of log lines returned by a query. Default is `1000`. |
|
||||
| **Direction** | Determines the search order. **Backward** searches from the end of the time range (default). **Forward** searches from the beginning. |
|
||||
| **Step** | Sets the step parameter for metric queries. Default is `$__auto`, calculated using the time range and graph width. |
|
||||
|
||||
- **Type** - Selects the query type to run. The `instant` type queries against a single point in time. We use the "To" time from the time range. The `range` type queries over the selected range of time.
|
||||
### Query stats
|
||||
|
||||
- **Line limit** -Defines the upper limit for the number of log lines returned by a query. The default is `1000`
|
||||
The Options section displays query statistics to help you estimate the size and cost of your query before running it. Stats include:
|
||||
|
||||
- **Direction** - Determines the search order. **Backward** is a backward search starting at the end of the time range. **Forward** is a forward search starting at the beginning of the time range. The default is **Backward**
|
||||
- **Streams** - Number of log streams matching your label selectors
|
||||
- **Chunks** - Number of data chunks to be scanned
|
||||
- **Bytes** - Estimated data size
|
||||
- **Entries** - Estimated number of log entries
|
||||
|
||||
- **Step** Sets the step parameter of Loki metrics queries. The default value equals to the value of `$__auto` variable, which is calculated using the time range and the width of the graph (the number of pixels).
|
||||
These statistics update automatically as you build your query and can help you optimize queries to reduce load on your Loki instance.
|
||||
|
||||
## Run a query
|
||||
|
||||
To execute your query, click **Run queries** at the top of the query editor. The results display in the visualization panel below the editor.
|
||||
|
||||
In Explore, you can also press `Shift+Enter` to run the query.
|
||||
|
||||
## Create a log query
|
||||
|
||||
Loki log queries return the contents of the log lines.
|
||||
You can query and display log data from Loki via [Explore](ref:explore), and with the [Logs panel](ref:logs) in dashboards.
|
||||
Log queries return the contents of log lines. These are the most common type of Loki query.
|
||||
|
||||
To display the results of a log query, select the Loki data source, then enter a LogQL query.
|
||||
To create a log query:
|
||||
|
||||
For more information about log queries and LogQL, refer to the [Loki log queries documentation](/docs/loki/latest/logql/log_queries/).
|
||||
1. Select labels to filter your log streams.
|
||||
1. Optionally add line filters to search for specific text patterns.
|
||||
1. Optionally add parsers (like `json` or `logfmt`) to extract fields from log lines.
|
||||
1. Click **Run queries** to execute the query.
|
||||
|
||||
For more information about log queries and LogQL, refer to the [Loki log queries documentation](https://grafana.com/docs/loki/latest/logql/log_queries/).
|
||||
|
||||
### Show log context
|
||||
|
||||
In Explore, you can can retrieve the context surrounding your log results by clicking the `Show Context` button. You'll be able to investigate the logs from the same log stream that came before and after the log message you're interested in.
|
||||
In Explore, click **Show Context** on any log line to view the surrounding logs from the same log stream.
|
||||
|
||||
The initial log context query is created from all labels defining the stream for the selected log line. You can use the log context query editor to widen the search by removing one or more of the label filters from log stream. Additionally, if you used a parser in your original query, you can refine your search by using extracted labels filters.
|
||||
The initial context query uses all labels from the selected log line. You can widen the search by removing label filters in the log context query editor. If your original query used a parser, you can also refine the search using extracted label filters.
|
||||
|
||||
To reduce the repetition of selecting and removing the same labels when examining multiple log context windows, Grafana stores your selected labels and applies them to each open context window. This lets you seamlessly navigate through various log context windows without having to reapply your filters.
|
||||
Grafana stores your label selections and applies them to each context window you open, so you don't need to reapply filters when examining multiple log lines.
|
||||
|
||||
To reset filters and use the initial log context query, click the `Revert to initial query` button next to the query preview.
|
||||
|
||||
### Tail live logs
|
||||
|
||||
Loki supports live tailing of logs in real-time in [Explore](ref:explore).
|
||||
|
||||
Live tailing relies on two Websocket connections: one between the browser and Grafana server, and another between the Grafana server and Loki server.
|
||||
|
||||
To start tailing logs click the **Live** button in the top right corner of the Explore view.
|
||||
{{< figure src="/static/img/docs/v95/loki_tailing.png" class="docs-image--no-shadow" max-width="80px" >}}
|
||||
|
||||
#### Proxying examples
|
||||
|
||||
If you use reverse proxies, configure them accordingly to use live tailing:
|
||||
|
||||
**Using Apache2 for proxying between the browser and the Grafana server:**
|
||||
|
||||
```
|
||||
ProxyPassMatch "^/(api/datasources/proxy/\d+/loki/api/v1/tail)" "ws://127.0.0.1:3000/$1"
|
||||
```
|
||||
|
||||
**Using NGINX:**
|
||||
|
||||
This example provides a basic NGINX proxy configuration.
|
||||
It assumes that the Grafana server is available at `http://localhost:3000/`, the Loki server is running locally without proxy, and your external site uses HTTPS.
|
||||
If you also host Loki behind an NGINX proxy, repeat the following configuration for Loki.
|
||||
|
||||
In the `http` section of NGINX configuration, add the following map definition:
|
||||
|
||||
```
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
```
|
||||
|
||||
In your `server` section, add the following configuration:
|
||||
|
||||
```
|
||||
location ~ /(api/datasources/proxy/\d+/loki/api/v1/tail) {
|
||||
proxy_pass http://localhost:3000$request_uri;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-for $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto "https";
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:3000/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-for $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto "https";
|
||||
}
|
||||
```
|
||||
To reset filters, click **Revert to initial query** next to the query preview.
|
||||
|
||||
## Create a metric query
|
||||
|
||||
You can use LogQL to wrap a log query with functions that create metrics from your logs.
|
||||
Metric queries use LogQL to extract numeric data from logs. You wrap a log query with aggregation functions to create time series data for visualization and alerting.
|
||||
|
||||
For more information about metric queries, refer to the [Loki metric queries documentation](/docs/loki/latest/logql/metric_queries/).
|
||||
### Common metric query patterns
|
||||
|
||||
## Apply annotations
|
||||
| Function | Description | Example |
|
||||
| ------------------- | ----------------------------------------------- | ---------------------------------------------------- |
|
||||
| `rate()` | Calculates the number of log entries per second | `rate({job="app"}[5m])` |
|
||||
| `count_over_time()` | Counts log entries over the specified interval | `count_over_time({job="app"}[1h])` |
|
||||
| `bytes_rate()` | Calculates bytes per second of log entries | `bytes_rate({job="app"}[5m])` |
|
||||
| `sum_over_time()` | Sums extracted numeric values | `sum_over_time({job="app"} \| unwrap duration [5m])` |
|
||||
|
||||
[Annotations](ref:annotate-visualizations) overlay rich event information on top of graphs.
|
||||
You can add annotation queries in the Dashboard menu's Annotations view.
|
||||
### Build a metric query
|
||||
|
||||
You can only use log queries as a source for annotations.
|
||||
Grafana automatically uses log content as annotation text and your log stream labels as tags.
|
||||
You don't need to create any additional mapping.
|
||||
To create a metric query in Builder mode:
|
||||
|
||||
1. Select labels to filter your log streams.
|
||||
1. Click **+ Operations** and select a range function (for example, **Rate**).
|
||||
1. The editor wraps your log selector with the function and adds a time interval.
|
||||
1. Optionally add aggregations like `sum`, `avg`, or `max` to combine results.
|
||||
|
||||
In Code mode, enter the full LogQL expression directly:
|
||||
|
||||
```logql
|
||||
sum(rate({job="app", level="error"}[5m])) by (instance)
|
||||
```
|
||||
|
||||
This query calculates the per-second rate of error logs, then sums the results grouped by instance.
|
||||
|
||||
For more information, refer to the [Loki metric queries documentation](https://grafana.com/docs/loki/latest/logql/metric_queries/).
|
||||
|
||||
## Tail live logs
|
||||
|
||||
Loki supports live tailing of logs in real-time in [Explore](ref:explore).
|
||||
|
||||
To start tailing logs, click the **Live** button in the top right corner of the Explore view.
|
||||
|
||||
{{< figure src="/static/img/docs/v95/loki_tailing.png" class="docs-image--no-shadow" max-width="80px" >}}
|
||||
|
||||
Live tailing relies on two WebSocket connections: one between the browser and Grafana server, and another between the Grafana server and Loki server.
|
||||
|
||||
If you use reverse proxies, you may need to configure them to support WebSocket connections. For proxy configuration examples, refer to the [Loki troubleshooting documentation](ref:loki-troubleshooting).
|
||||
|
||||
## Use template variables
|
||||
|
||||
You can use template variables in your queries to create dynamic, reusable dashboards. Template variables appear as dropdown menus at the top of dashboards, allowing users to change query parameters without editing the query directly.
|
||||
|
||||
For information on creating and using template variables with Loki, refer to [Loki template variables](ref:template-variables).
|
||||
|
||||
@@ -38,22 +38,32 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
query-editor-options:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/query-editor/#options
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/query-editor/#options
|
||||
configure-loki:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
---
|
||||
|
||||
# Loki template variables
|
||||
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
|
||||
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana refers to such variables as template variables.
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables. Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard. Grafana refers to such variables as template variables.
|
||||
|
||||
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables) documentation.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have [configured the Loki data source](ref:configure-loki).
|
||||
- Your Loki instance should have logs with labels that you want to use as variable values.
|
||||
|
||||
## Use query variables
|
||||
|
||||
Variables of the type _Query_ help you query Loki for lists of labels or label values.
|
||||
The Loki data source provides a form to select the type of values expected for a given variable.
|
||||
|
||||
The form has these options:
|
||||
Use _Query_ type variables to dynamically fetch label names or label values from Loki. When you create a query variable with the Loki data source, you can choose what type of data to retrieve:
|
||||
|
||||
| Query type | Example label | Example stream selector | List returned |
|
||||
| ------------ | ------------- | ----------------------- | ---------------------------------------------------------------- |
|
||||
@@ -61,6 +71,26 @@ The form has these options:
|
||||
| Label values | `label` | | Label values for `label`. |
|
||||
| Label values | `label` | `log stream selector` | Label values for `label` in the specified `log stream selector`. |
|
||||
|
||||
### Create a query variable
|
||||
|
||||
To create a query variable for Loki:
|
||||
|
||||
1. Open the dashboard where you want to add the variable.
|
||||
1. Click **Dashboard settings** (gear icon) in the top navigation.
|
||||
1. Select **Variables** in the left menu.
|
||||
1. Click **Add variable**.
|
||||
1. Enter a **Name** for your variable (for example, `job`, `instance`, `level`).
|
||||
1. In the **Type** dropdown, select **Query**.
|
||||
1. In the **Data source** dropdown, select your Loki data source.
|
||||
1. In the **Query type** dropdown, select **Label names** or **Label values**.
|
||||
1. If you selected **Label values**, enter the label name in the **Label** field (for example, `job`).
|
||||
1. Optionally, enter a **Stream selector** to filter the label values (for example, `{namespace="production"}`).
|
||||
1. Click **Run query** to preview the variable values.
|
||||
1. Configure display options such as **Multi-value** or **Include All option** as needed.
|
||||
1. Click **Apply** to save the variable.
|
||||
|
||||
You can now use the variable in your Loki queries with the syntax `${variable_name}`. For example, `{job="$job"}` filters logs by the selected job.
|
||||
|
||||
## Use ad hoc filters
|
||||
|
||||
Loki supports the special **Ad hoc filters** variable type.
|
||||
@@ -68,27 +98,29 @@ You can use this variable type to specify any number of key/value filters, and G
|
||||
|
||||
For more information, refer to [Add ad hoc filters](ref:add-template-variables-add-ad-hoc-filters).
|
||||
|
||||
## Use $\_\_auto variable for Loki metric queries
|
||||
## Use the $\_\_auto variable for Loki metric queries
|
||||
|
||||
Consider using the `$__auto` variable in your Loki metric queries, which will automatically be substituted with the [step value](https://grafana.com/docs/grafana/next/datasources/loki/query-editor/#options) for range queries, and with the selected time range's value (computed from the starting and ending times) for instant queries.
|
||||
Consider using the `$__auto` variable in your Loki metric queries. This variable is automatically substituted with the [step value](ref:query-editor-options) for range queries, and with the selected time range's value (computed from the starting and ending times) for instant queries.
|
||||
|
||||
For more information about variables, refer to [Global built-in variables](ref:add-template-variables-global-variables).
|
||||
|
||||
## Label extraction and indexing in Loki
|
||||
## Extract and index labels in Loki
|
||||
|
||||
Labels play a fundamental role in Loki's log aggregation and querying capabilities. When logs are ingested into Loki, they are often accompanied by metadata called `labels`, which provide contextual information about the log entries. These labels consist of `key-value` pairs and are essential for organizing, filtering, and searching log data efficiently.
|
||||
|
||||
### Label extraction
|
||||
### Extract labels
|
||||
|
||||
During the ingestion process, Loki performs label extraction from log lines. Loki's approach to label extraction is based on `regular expressions`, allowing users to specify custom patterns for parsing log lines and extracting relevant label key-value pairs. This flexibility enables Loki to adapt to various log formats and schemas.
|
||||
|
||||
For example, suppose you have log lines in the following format:
|
||||
|
||||
**2023-07-25 12:34:56 INFO: Request from IP A.B.C.D to endpoint /api/data**
|
||||
```
|
||||
2023-07-25 12:34:56 INFO: Request from IP A.B.C.D to endpoint /api/data
|
||||
```
|
||||
|
||||
To extract labels from this log format, you could define a regular expression to extract the log level ("INFO"), IP address ("A.B.C.D"), and endpoint ("/api/data") as labels. These labels can later be used to filter and aggregate log entries.
|
||||
To extract labels from this log format, you could define a regular expression to extract the log level (`INFO`), IP address (`A.B.C.D`), and endpoint (`/api/data`) as labels. These labels can later be used to filter and aggregate log entries.
|
||||
|
||||
### Indexing labels
|
||||
### Index labels
|
||||
|
||||
Once labels are extracted, Loki efficiently indexes them. The index serves as a lookup mechanism that maps labels to the corresponding log entries. This indexing process enables faster retrieval of logs based on specific label criteria, significantly enhancing query performance.
|
||||
|
||||
@@ -96,6 +128,4 @@ For instance, if you have a label "job" that represents different services in yo
|
||||
|
||||
By effectively extracting and indexing labels, Loki enables users to perform complex and targeted log queries without compromising on query speed and resource consumption.
|
||||
|
||||
Utilizing Loki's indexed labels in combination with Grafana's template variables provides a powerful way to interactively explore and visualize log data. Template variables allow users to create dynamic queries, selecting and filtering logs based on various labels, such as job names, instance IDs, severity levels, or any other contextual information attached to the log entries.
|
||||
|
||||
In conclusion, Loki's label extraction and indexing mechanisms are key components that contribute to its ability to handle vast amounts of log data efficiently. By making use of labels and template variables, users can easily gain valuable insights from their log data and troubleshoot issues effectively.
|
||||
Combining Loki's indexed labels with Grafana template variables provides a powerful way to interactively explore and visualize log data. Template variables let you create dynamic queries that filter logs based on labels such as job names, instance IDs, or severity levels.
|
||||
|
||||
387
docs/sources/datasources/loki/troubleshooting/index.md
Normal file
387
docs/sources/datasources/loki/troubleshooting/index.md
Normal file
@@ -0,0 +1,387 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/loki/troubleshooting/
|
||||
description: Troubleshoot issues with the Loki data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- loki
|
||||
- troubleshooting
|
||||
- errors
|
||||
- logs
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshooting
|
||||
title: Troubleshoot issues with the Loki data source
|
||||
weight: 600
|
||||
refs:
|
||||
configure-loki:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
---
|
||||
|
||||
# Troubleshoot issues with the Loki data source
|
||||
|
||||
This document provides troubleshooting information for common errors you may encounter when using the Loki data source in Grafana.
|
||||
|
||||
## Connection errors
|
||||
|
||||
The following errors occur when Grafana cannot establish or maintain a connection to Loki.
|
||||
|
||||
### Unable to connect with Loki
|
||||
|
||||
**Error message:** "Unable to connect with Loki. Please check the server logs for more details."
|
||||
|
||||
**Cause:** Grafana cannot establish a network connection to the Loki server.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the Loki URL is correct in the [data source configuration](ref:configure-loki).
|
||||
1. Check that Loki is running and accessible from the Grafana server.
|
||||
1. Ensure no firewall rules are blocking the connection.
|
||||
1. If using a proxy, verify the proxy settings are correct.
|
||||
1. For Grafana Cloud, ensure you have configured [Private data source connect](ref:private-data-source-connect) if your Loki instance is not publicly accessible.
|
||||
|
||||
### Request timed out
|
||||
|
||||
**Error message:** "context deadline exceeded" or "request timed out"
|
||||
|
||||
**Cause:** The connection to Loki timed out before receiving a response.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the network latency between Grafana and Loki.
|
||||
1. Verify Loki is not overloaded or experiencing performance issues.
|
||||
1. Increase the **Timeout** setting in the data source configuration under **Additional settings** > **Advanced HTTP settings**.
|
||||
1. Check if any network devices (load balancers, proxies) are timing out the connection.
|
||||
1. Reduce the time range or complexity of your query.
|
||||
|
||||
### Failed to parse data source URL
|
||||
|
||||
**Error message:** "Failed to parse data source URL"
|
||||
|
||||
**Cause:** The URL entered in the data source configuration is not valid.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the URL format is correct (for example, `http://localhost:3100` or `https://loki.example.com:3100`).
|
||||
1. Ensure the URL includes the protocol (`http://` or `https://`).
|
||||
1. Remove any trailing slashes or invalid characters from the URL.
|
||||
|
||||
## Authentication errors
|
||||
|
||||
The following errors occur when there are issues with authentication credentials or permissions.
|
||||
|
||||
### Unauthorized (401)
|
||||
|
||||
**Error message:** "Status: 401 Unauthorized"
|
||||
|
||||
**Cause:** The authentication credentials are invalid or missing.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the username and password are correct in the data source configuration.
|
||||
1. Check the authentication method matches your Loki configuration.
|
||||
1. If using a bearer token or API key, ensure it is valid and has not expired.
|
||||
1. Verify the credentials have permission to access the Loki API.
|
||||
|
||||
### Forbidden (403)
|
||||
|
||||
**Error message:** "Status: 403 Forbidden"
|
||||
|
||||
**Cause:** The authenticated user does not have permission to access the requested resource.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the user has read access to the log streams you are querying.
|
||||
1. Check Loki's authentication and authorization configuration.
|
||||
1. If using multi-tenancy, ensure the correct tenant ID (X-Scope-OrgID header) is configured.
|
||||
1. Review any access control policies in your Loki deployment.
|
||||
|
||||
## Query errors
|
||||
|
||||
The following errors occur when there are issues with LogQL query syntax or execution.
|
||||
|
||||
### Parse error
|
||||
|
||||
**Error message:** "parse error" or "syntax error"
|
||||
|
||||
**Cause:** The LogQL query contains invalid syntax.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the query for typos or missing characters.
|
||||
1. Verify all brackets, braces, and parentheses are properly balanced.
|
||||
1. Ensure label matchers use the correct operators (`=`, `!=`, `=~`, `!~`).
|
||||
1. Verify string values are enclosed in double quotes.
|
||||
1. Refer to the [LogQL documentation](https://grafana.com/docs/loki/latest/query/) for correct syntax.
|
||||
|
||||
**Common syntax issues:**
|
||||
|
||||
| Issue | Incorrect | Correct |
|
||||
| ----------------- | -------------- | -------------- |
|
||||
| Missing quotes | `{job=app}` | `{job="app"}` |
|
||||
| Wrong operator | `{job=="app"}` | `{job="app"}` |
|
||||
| Unbalanced braces | `{job="app"` | `{job="app"}` |
|
||||
| Invalid regex | `{job=~"["}` | `{job=~"\\["}` |
|
||||
|
||||
### Query limits exceeded
|
||||
|
||||
**Error message:** "query returned more than the max number of entries" or "max entries limit exceeded"
|
||||
|
||||
**Cause:** The query returned more log entries than the configured limit allows.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Add more specific label selectors to reduce the number of matching streams.
|
||||
1. Add line filters to narrow down the results (for example, `|= "error"`).
|
||||
1. Reduce the time range of your query.
|
||||
1. Increase the **Maximum lines** setting in the data source configuration.
|
||||
1. If you control the Loki instance, consider adjusting Loki's `max_entries_limit_per_query` setting.
|
||||
|
||||
### Query timeout
|
||||
|
||||
**Error message:** "query timed out"
|
||||
|
||||
**Cause:** The query took longer to execute than the configured timeout.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Simplify the query by adding more selective label matchers.
|
||||
1. Reduce the time range.
|
||||
1. Avoid expensive operations like complex regex patterns on high-cardinality data.
|
||||
1. If you control the Loki instance, check Loki's query timeout settings.
|
||||
|
||||
### Too many outstanding requests
|
||||
|
||||
**Error message:** "too many outstanding requests"
|
||||
|
||||
**Cause:** Loki has reached its limit for concurrent queries.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Wait a moment and retry the query.
|
||||
1. Reduce the number of panels or dashboards querying Loki simultaneously.
|
||||
1. If you control the Loki instance, consider increasing Loki's concurrency limits.
|
||||
|
||||
## Metric query errors
|
||||
|
||||
The following errors occur when using LogQL metric queries.
|
||||
|
||||
### Invalid unwrap expression
|
||||
|
||||
**Error message:** "invalid unwrap expression" or "unwrap: label does not exist"
|
||||
|
||||
**Cause:** The `unwrap` function references a label that doesn't exist or isn't numeric.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the label name in the `unwrap` expression exists in your log data.
|
||||
1. Ensure the label contains numeric values.
|
||||
1. Add a parser stage (`| logfmt`, `| json`, etc.) before `unwrap` to extract the label from log content.
|
||||
|
||||
**Example fix:**
|
||||
|
||||
```logql
|
||||
# Incorrect - label might not exist
|
||||
{job="app"} | unwrap latency
|
||||
|
||||
# Correct - parse the log first
|
||||
{job="app"} | logfmt | unwrap latency
|
||||
```
|
||||
|
||||
### Division by zero
|
||||
|
||||
**Error message:** "division by zero"
|
||||
|
||||
**Cause:** A metric query attempted to divide by zero.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Add conditions to handle cases where the denominator could be zero.
|
||||
1. Use the `or` operator to provide a default value.
|
||||
|
||||
## Common issues
|
||||
|
||||
The following issues don't always produce specific error messages but are commonly encountered.
|
||||
|
||||
### Empty query results
|
||||
|
||||
**Cause:** The query returns no data.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the time range includes data in your Loki instance.
|
||||
1. Check that the label selectors match existing log streams.
|
||||
1. Use the **Label browser** in the query editor to see available labels and values.
|
||||
1. Start with a simple query like `{job="your-job"}` and add filters incrementally.
|
||||
1. Verify logs are being ingested into Loki for the selected time range.
|
||||
|
||||
### Slow query performance
|
||||
|
||||
**Cause:** Queries take a long time to execute.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Add more specific label selectors. Labels are indexed, so filtering by labels is fast.
|
||||
1. Reduce the time range of your query.
|
||||
1. Avoid regex filters on high-volume streams when possible.
|
||||
1. Use line filters (`|=`, `!=`) before expensive regex operations.
|
||||
1. For metric queries, ensure you're using appropriate aggregation intervals.
|
||||
|
||||
**Query optimization tips:**
|
||||
|
||||
| Slow | Fast |
|
||||
| ----------------------------------------- | ------------------------------------------------- |
|
||||
| `{namespace="prod"} \|~ "error.*timeout"` | `{namespace="prod", level="error"} \|= "timeout"` |
|
||||
| `{job=~".+"}` (matches all) | `{job="specific-job"}` |
|
||||
| Wide time range, no filters | Narrow time range with label filters |
|
||||
|
||||
### Labels not appearing in dropdown
|
||||
|
||||
**Cause:** The label browser doesn't show expected labels.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check that logs with those labels exist in the selected time range.
|
||||
1. Verify the labels are indexed in Loki (not just parsed from log content).
|
||||
1. Refresh the label browser by clicking the refresh button.
|
||||
1. Clear your browser cache and reload the page.
|
||||
|
||||
### Log lines truncated
|
||||
|
||||
**Cause:** Long log lines are cut off in the display.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Click on a log line to expand and view the full content.
|
||||
1. Use the **Wrap lines** option in the logs visualization settings.
|
||||
1. The full log content is always available; only the display is truncated.
|
||||
|
||||
### Derived fields not working
|
||||
|
||||
**Cause:** Derived fields configured in the data source aren't appearing in log details.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the regex pattern in your derived field configuration matches your log format.
|
||||
1. Test the regex in the **Debug** section of the derived fields configuration.
|
||||
1. Ensure the derived field has a valid URL or internal data source configured.
|
||||
1. Check that the log lines contain text matching the regex pattern.
|
||||
|
||||
## Live tailing issues
|
||||
|
||||
The following issues occur when using the live log tailing feature.
|
||||
|
||||
### Live tailing not working
|
||||
|
||||
**Cause:** Live tailing relies on WebSocket connections that may be blocked by proxies or firewalls.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify WebSocket connections are allowed through your network infrastructure.
|
||||
1. Check that your reverse proxy is configured to support WebSocket connections.
|
||||
1. Ensure the Grafana server can establish a WebSocket connection to Loki.
|
||||
|
||||
### Configure reverse proxies for live tailing
|
||||
|
||||
If you use reverse proxies, configure them to support WebSocket connections for live tailing.
|
||||
|
||||
**Apache2 configuration:**
|
||||
|
||||
Add the following to proxy WebSocket connections:
|
||||
|
||||
```apache
|
||||
ProxyPassMatch "^/(api/datasources/proxy/\d+/loki/api/v1/tail)" "ws://127.0.0.1:3000/$1"
|
||||
```
|
||||
|
||||
**NGINX configuration:**
|
||||
|
||||
This example assumes the Grafana server is available at `http://localhost:3000/`, the Loki server is running locally without a proxy, and your external site uses HTTPS. If you also host Loki behind NGINX, repeat this configuration for Loki.
|
||||
|
||||
In the `http` section of your NGINX configuration, add the following map definition:
|
||||
|
||||
```nginx
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
```
|
||||
|
||||
In your `server` section, add the following configuration:
|
||||
|
||||
```nginx
|
||||
location ~ /(api/datasources/proxy/\d+/loki/api/v1/tail) {
|
||||
proxy_pass http://localhost:3000$request_uri;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-for $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto "https";
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:3000/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-for $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto "https";
|
||||
}
|
||||
```
|
||||
|
||||
## Multi-tenancy issues
|
||||
|
||||
The following errors occur when using Loki in multi-tenant mode.
|
||||
|
||||
### No org id
|
||||
|
||||
**Error message:** "no org id" or "X-Scope-OrgID header required"
|
||||
|
||||
**Cause:** Loki is configured for multi-tenancy but no tenant ID was provided.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Add a custom HTTP header `X-Scope-OrgID` with your tenant ID in the data source configuration.
|
||||
1. Navigate to **Additional settings** > **HTTP headers** and add the header.
|
||||
|
||||
### Tenant not found
|
||||
|
||||
**Error message:** "tenant not found" or "invalid tenant"
|
||||
|
||||
**Cause:** The specified tenant ID doesn't exist or the user doesn't have access.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the tenant ID is correct.
|
||||
1. Check that the tenant exists in your Loki deployment.
|
||||
1. Verify the user has permission to access the specified tenant.
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you continue to experience issues:
|
||||
|
||||
- Check the [Grafana community forums](https://community.grafana.com/) for similar issues and solutions.
|
||||
- Review the [Loki documentation](https://grafana.com/docs/loki/latest/) for detailed configuration and query guidance.
|
||||
- Contact Grafana Support if you're an Enterprise, Cloud Pro, or Cloud contracted customer.
|
||||
|
||||
When reporting issues, include the following information:
|
||||
|
||||
- Grafana version
|
||||
- Loki version
|
||||
- Deployment type (self-hosted Loki, Grafana Cloud Logs)
|
||||
- Error messages (redact sensitive information)
|
||||
- Steps to reproduce the issue
|
||||
- Relevant configuration such as data source settings, authentication method, and timeout values (redact credentials)
|
||||
- Sample LogQL query (if applicable, with sensitive data redacted)
|
||||
- Time range of the query
|
||||
- Approximate volume of logs being queried
|
||||
@@ -23,8 +23,6 @@ killercoda:
|
||||
|
||||
This tutorial is a continuation of the [Get started with Grafana Alerting - Route alerts using dynamic labels](http://www.grafana.com/tutorials/alerting-get-started-pt5/) tutorial.
|
||||
|
||||
{{< youtube id="mqj_hN24zLU" >}}
|
||||
|
||||
<!-- USE CASE -->
|
||||
|
||||
In this tutorial you will learn how to:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { test, expect } from '@grafana/plugin-e2e';
|
||||
|
||||
import { flows, saveDashboard, type Variable } from './utils';
|
||||
import { flows, type Variable } from './utils';
|
||||
|
||||
test.use({
|
||||
featureToggles: {
|
||||
@@ -64,7 +64,20 @@ test.describe(
|
||||
label: 'VariableUnderTest',
|
||||
};
|
||||
|
||||
await flows.addNewTextBoxVariable(dashboardPage, variable);
|
||||
// common steps to add a new variable
|
||||
await flows.newEditPaneVariableClick(dashboardPage, selectors);
|
||||
await flows.newEditPanelCommonVariableInputs(dashboardPage, selectors, variable);
|
||||
|
||||
// set the textbox variable value
|
||||
const type = 'variable-type Value';
|
||||
const fieldLabel = dashboardPage.getByGrafanaSelector(
|
||||
selectors.components.PanelEditor.OptionsPane.fieldLabel(type)
|
||||
);
|
||||
await expect(fieldLabel).toBeVisible();
|
||||
const inputField = fieldLabel.locator('input');
|
||||
await expect(inputField).toBeVisible();
|
||||
await inputField.fill(variable.value);
|
||||
await inputField.blur();
|
||||
|
||||
// select the variable in the dashboard and confirm the variable value is set
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.SubMenu.submenuItem).click();
|
||||
@@ -127,94 +140,5 @@ test.describe(
|
||||
await expect(panelContent).toBeVisible();
|
||||
await expect(markdownContent).toContainText('VariableUnderTest: 10m');
|
||||
});
|
||||
test('can hide a variable', async ({ dashboardPage, selectors, page }) => {
|
||||
const variable: Variable = {
|
||||
type: 'textbox',
|
||||
name: 'VariableUnderTest',
|
||||
value: 'foo',
|
||||
label: 'VariableUnderTest',
|
||||
};
|
||||
|
||||
await saveDashboard(dashboardPage, page, selectors, 'can hide a variable');
|
||||
await flows.addNewTextBoxVariable(dashboardPage, variable);
|
||||
|
||||
// check the variable is visible in the dashboard
|
||||
const variableLabel = dashboardPage.getByGrafanaSelector(
|
||||
selectors.pages.Dashboard.SubMenu.submenuItemLabels(variable.label)
|
||||
);
|
||||
await expect(variableLabel).toBeVisible();
|
||||
// hide the variable
|
||||
await dashboardPage
|
||||
.getByGrafanaSelector(selectors.pages.Dashboard.Settings.Variables.Edit.General.generalDisplaySelect)
|
||||
.click();
|
||||
await page.getByText('Hidden', { exact: true }).click();
|
||||
|
||||
// check that the variable is still visible
|
||||
await expect(
|
||||
dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.SubMenu.submenuItemLabels(variable.label!))
|
||||
).toBeVisible();
|
||||
|
||||
// save dashboard and exit edit mode and check variable is not visible
|
||||
await saveDashboard(dashboardPage, page, selectors);
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
|
||||
await expect(
|
||||
dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.SubMenu.submenuItemLabels(variable.label!))
|
||||
).toBeHidden();
|
||||
// refresh and check that variable isn't visible
|
||||
await page.reload();
|
||||
await expect(
|
||||
dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.SubMenu.submenuItemLabels(variable.label!))
|
||||
).toBeHidden();
|
||||
// check that the variable is visible in edit mode
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.editButton).click();
|
||||
await expect(
|
||||
dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.SubMenu.submenuItemLabels(variable.label!))
|
||||
).toBeVisible();
|
||||
});
|
||||
|
||||
test('can hide variable under the controls menu', async ({ dashboardPage, selectors, page }) => {
|
||||
const variable: Variable = {
|
||||
type: 'textbox',
|
||||
name: 'VariableUnderTest',
|
||||
value: 'foo',
|
||||
label: 'VariableUnderTest',
|
||||
};
|
||||
await saveDashboard(dashboardPage, page, selectors, 'can hide a variable in controls menu');
|
||||
|
||||
await flows.addNewTextBoxVariable(dashboardPage, variable);
|
||||
|
||||
// check the variable is visible in the dashboard
|
||||
const variableLabel = dashboardPage.getByGrafanaSelector(
|
||||
selectors.pages.Dashboard.SubMenu.submenuItemLabels(variable.label)
|
||||
);
|
||||
await expect(variableLabel).toBeVisible();
|
||||
// hide the variable
|
||||
await dashboardPage
|
||||
.getByGrafanaSelector(selectors.pages.Dashboard.Settings.Variables.Edit.General.generalDisplaySelect)
|
||||
.click();
|
||||
await page.getByText('Controls menu', { exact: true }).click();
|
||||
|
||||
// check that the variable is hidden under the controls menu
|
||||
await expect(
|
||||
dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.SubMenu.submenuItemLabels(variable.label!))
|
||||
).toBeHidden();
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.ControlsButton).click();
|
||||
await expect(
|
||||
dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.SubMenu.submenuItemLabels(variable.label!))
|
||||
).toBeVisible();
|
||||
|
||||
// save dashboard and refresh
|
||||
await saveDashboard(dashboardPage, page, selectors);
|
||||
await page.reload();
|
||||
|
||||
//check that the variable is hidden under the controls menu
|
||||
await expect(
|
||||
dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.SubMenu.submenuItemLabels(variable.label!))
|
||||
).toBeHidden();
|
||||
await dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.ControlsButton).click();
|
||||
await expect(
|
||||
dashboardPage.getByGrafanaSelector(selectors.pages.Dashboard.SubMenu.submenuItemLabels(variable.label!))
|
||||
).toBeVisible();
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
@@ -79,20 +79,6 @@ export const flows = {
|
||||
await variableLabelInput.blur();
|
||||
}
|
||||
},
|
||||
async addNewTextBoxVariable(dashboardPage: DashboardPage, variable: Variable) {
|
||||
await flows.newEditPaneVariableClick(dashboardPage, selectors);
|
||||
await flows.newEditPanelCommonVariableInputs(dashboardPage, selectors, variable);
|
||||
// set the textbox variable value
|
||||
const type = 'variable-type Value';
|
||||
const fieldLabel = dashboardPage.getByGrafanaSelector(
|
||||
selectors.components.PanelEditor.OptionsPane.fieldLabel(type)
|
||||
);
|
||||
await expect(fieldLabel).toBeVisible();
|
||||
const inputField = fieldLabel.locator('input');
|
||||
await expect(inputField).toBeVisible();
|
||||
await inputField.fill(variable.value);
|
||||
await inputField.blur();
|
||||
},
|
||||
};
|
||||
|
||||
export type Variable = {
|
||||
@@ -103,16 +89,8 @@ export type Variable = {
|
||||
value: string;
|
||||
};
|
||||
|
||||
export async function saveDashboard(
|
||||
dashboardPage: DashboardPage,
|
||||
page: Page,
|
||||
selectors: E2ESelectorGroups,
|
||||
title?: string
|
||||
) {
|
||||
export async function saveDashboard(dashboardPage: DashboardPage, page: Page, selectors: E2ESelectorGroups) {
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.NavToolbar.editDashboard.saveButton).click();
|
||||
if (title) {
|
||||
await page.getByTestId(selectors.components.Drawer.DashboardSaveDrawer.saveAsTitleInput).fill(title);
|
||||
}
|
||||
await dashboardPage.getByGrafanaSelector(selectors.components.Drawer.DashboardSaveDrawer.saveButton).click();
|
||||
await expect(page.getByText('Dashboard saved')).toBeVisible();
|
||||
}
|
||||
|
||||
5
go.mod
5
go.mod
@@ -7,7 +7,7 @@ require (
|
||||
buf.build/gen/go/parca-dev/parca/protocolbuffers/go v1.36.2-20250703125925-3f0fcf4bff96.1 // @grafana/observability-traces-and-profiling
|
||||
cloud.google.com/go/kms v1.22.0 // @grafana/grafana-backend-group
|
||||
cloud.google.com/go/storage v1.55.0 // @grafana/grafana-backend-group
|
||||
connectrpc.com/connect v1.19.1 // @grafana/observability-traces-and-profiling
|
||||
connectrpc.com/connect v1.18.1 // @grafana/observability-traces-and-profiling
|
||||
cuelang.org/go v0.11.1 // @grafana/grafana-as-code
|
||||
dario.cat/mergo v1.0.2 // @grafana/grafana-app-platform-squad
|
||||
filippo.io/age v1.2.1 // @grafana/identity-access-team
|
||||
@@ -111,7 +111,7 @@ require (
|
||||
github.com/grafana/nanogit v0.3.0 // indirect; @grafana/grafana-git-ui-sync-team
|
||||
github.com/grafana/otel-profiling-go v0.5.1 // @grafana/grafana-backend-group
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // @grafana/observability-traces-and-profiling
|
||||
github.com/grafana/pyroscope/api v1.2.1-0.20251118081820-ace37f973a0f // @grafana/observability-traces-and-profiling
|
||||
github.com/grafana/pyroscope/api v1.2.1-0.20250415190842-3ff7247547ae // @grafana/observability-traces-and-profiling
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // @grafana/grafana-search-and-storage
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 // @grafana/plugins-platform-backend
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 // @grafana/grafana-backend-group
|
||||
@@ -681,7 +681,6 @@ require (
|
||||
github.com/go-openapi/swag/stringutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/typeutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
|
||||
github.com/google/gnostic v0.7.1 // indirect
|
||||
github.com/gophercloud/gophercloud/v2 v2.9.0 // indirect
|
||||
github.com/grafana/sqlds/v5 v5.0.3 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
|
||||
|
||||
10
go.sum
10
go.sum
@@ -627,8 +627,8 @@ cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoIS
|
||||
cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M=
|
||||
cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA=
|
||||
cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw=
|
||||
connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
|
||||
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
|
||||
connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw=
|
||||
connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs=
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20251212221603-3adeb8663819 h1:Zh+Ur3OsoWpvALHPLT45nOekHkgOt+IOfutBbPqM17I=
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20251212221603-3adeb8663819/go.mod h1:WjmQxb+W6nVNCgj8nXrF24lIz95AHwnSl36tpjDZSU8=
|
||||
@@ -1503,8 +1503,6 @@ github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PU
|
||||
github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/gnostic v0.7.1 h1:t5Kc7j/8kYr8t2u11rykRrPPovlEMG4+xdc/SpekATs=
|
||||
github.com/google/gnostic v0.7.1/go.mod h1:KSw6sxnxEBFM8jLPfJd46xZP+yQcfE8XkiqfZx5zR28=
|
||||
github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c=
|
||||
github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@@ -1687,8 +1685,8 @@ github.com/grafana/prometheus-alertmanager v0.25.1-0.20250911094103-5456b6e45604
|
||||
github.com/grafana/prometheus-alertmanager v0.25.1-0.20250911094103-5456b6e45604/go.mod h1:O/QP1BCm0HHIzbKvgMzqb5sSyH88rzkFk84F4TfJjBU=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU=
|
||||
github.com/grafana/pyroscope/api v1.2.1-0.20251118081820-ace37f973a0f h1:fTlIj5n4x5dU63XHItug7GLjtnaeJdPqBlqg4zlABq0=
|
||||
github.com/grafana/pyroscope/api v1.2.1-0.20251118081820-ace37f973a0f/go.mod h1:VBNcIhunCZsJ3/mcYx+j7uFf0P/108eiWa+8+Z9ll3o=
|
||||
github.com/grafana/pyroscope/api v1.2.1-0.20250415190842-3ff7247547ae h1:35W3Wjp9KWnSoV/DuymmyIj5aHE0CYlDQ5m2KeXUPAc=
|
||||
github.com/grafana/pyroscope/api v1.2.1-0.20250415190842-3ff7247547ae/go.mod h1:6CJ1uXmLZ13ufpO9xE4pST+DyaBt0uszzrV0YnoaVLQ=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/grafana/saml v0.4.15-0.20240917091248-ae3bbdad8a56 h1:SDGrP81Vcd102L3UJEryRd1eestRw73wt+b8vnVEFe0=
|
||||
|
||||
@@ -755,8 +755,6 @@ github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZP
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/flosch/pongo2/v4 v4.0.2 h1:gv+5Pe3vaSVmiJvh/BZa82b7/00YUGm0PIyVVLop0Hw=
|
||||
github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8=
|
||||
github.com/flowstack/go-jsonschema v0.1.1 h1:dCrjGJRXIlbDsLAgTJZTjhwUJnnxVWl1OgNyYh5nyDc=
|
||||
github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
|
||||
github.com/fluent/fluent-bit-go v0.0.0-20230731091245-a7a013e2473c h1:yKN46XJHYC/gvgH2UsisJ31+n4K3S7QYZSfU2uAWjuI=
|
||||
github.com/fluent/fluent-bit-go v0.0.0-20230731091245-a7a013e2473c/go.mod h1:L92h+dgwElEyUuShEwjbiHjseW410WIcNz+Bjutc8YQ=
|
||||
github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8=
|
||||
|
||||
@@ -218,10 +218,8 @@ lineage: schemas: [{
|
||||
// Optional field, if you want to extract part of a series name or metric node segment.
|
||||
// Named capture groups can be used to separate the display text and value.
|
||||
regex?: string
|
||||
// Optional, indicates whether a custom type variable uses CSV or JSON to define its values
|
||||
valuesFormat?: "csv" | "json" | *"csv"
|
||||
// Determine whether regex applies to variable value or display text
|
||||
regexApplyTo?: #VariableRegexApplyTo
|
||||
// Determine whether regex applies to variable value or display text
|
||||
regexApplyTo?: #VariableRegexApplyTo
|
||||
// Additional static options for query variable
|
||||
staticOptions?: [...#VariableOption]
|
||||
// Ordering of static options in relation to options returned from data source for query variable
|
||||
|
||||
@@ -295,8 +295,8 @@
|
||||
"@grafana/plugin-ui": "^0.11.1",
|
||||
"@grafana/prometheus": "workspace:*",
|
||||
"@grafana/runtime": "workspace:*",
|
||||
"@grafana/scenes": "v6.52.1",
|
||||
"@grafana/scenes-react": "v6.52.1",
|
||||
"@grafana/scenes": "6.52.0",
|
||||
"@grafana/scenes-react": "6.52.0",
|
||||
"@grafana/schema": "workspace:*",
|
||||
"@grafana/sql": "workspace:*",
|
||||
"@grafana/ui": "workspace:*",
|
||||
|
||||
@@ -400,6 +400,10 @@ export interface FeatureToggles {
|
||||
*/
|
||||
tableSharedCrosshair?: boolean;
|
||||
/**
|
||||
* Use the kubernetes API for feature toggle management in the frontend
|
||||
*/
|
||||
kubernetesFeatureToggles?: boolean;
|
||||
/**
|
||||
* Enabled grafana cloud specific RBAC roles
|
||||
*/
|
||||
cloudRBACRoles?: boolean;
|
||||
@@ -1259,8 +1263,4 @@ export interface FeatureToggles {
|
||||
* Enables the creation of keepers that manage secrets stored on AWS secrets manager
|
||||
*/
|
||||
secretsManagementAppPlatformAwsKeeper?: boolean;
|
||||
/**
|
||||
* Enables profiles exemplars support in profiles drilldown
|
||||
*/
|
||||
profilesExemplars?: boolean;
|
||||
}
|
||||
|
||||
@@ -103,7 +103,6 @@ export interface IntervalVariableModel extends VariableWithOptions {
|
||||
|
||||
export interface CustomVariableModel extends VariableWithMultiSupport {
|
||||
type: 'custom';
|
||||
valuesFormat?: 'csv' | 'json';
|
||||
}
|
||||
|
||||
export interface DataSourceVariableModel extends VariableWithMultiSupport {
|
||||
|
||||
@@ -266,9 +266,6 @@ export const versionedPages = {
|
||||
Controls: {
|
||||
'11.1.0': 'data-testid dashboard controls',
|
||||
},
|
||||
ControlsButton: {
|
||||
'12.3.0': 'data-testid dashboard controls button',
|
||||
},
|
||||
SubMenu: {
|
||||
submenu: {
|
||||
[MIN_GRAFANA_VERSION]: 'Dashboard submenu',
|
||||
|
||||
@@ -25,10 +25,6 @@ export interface GrafanaPyroscopeDataQuery extends common.DataQuery {
|
||||
* Allows to group the results.
|
||||
*/
|
||||
groupBy: Array<string>;
|
||||
/**
|
||||
* If set to true, exemplars will be requested
|
||||
*/
|
||||
includeExemplars: boolean;
|
||||
/**
|
||||
* Specifies the query label selectors.
|
||||
*/
|
||||
@@ -53,7 +49,6 @@ export interface GrafanaPyroscopeDataQuery extends common.DataQuery {
|
||||
|
||||
export const defaultGrafanaPyroscopeDataQuery: Partial<GrafanaPyroscopeDataQuery> = {
|
||||
groupBy: [],
|
||||
includeExemplars: false,
|
||||
labelSelector: '{}',
|
||||
spanSelector: [],
|
||||
};
|
||||
|
||||
@@ -211,10 +211,6 @@ export interface VariableModel {
|
||||
* Type of variable
|
||||
*/
|
||||
type: VariableType;
|
||||
/**
|
||||
* Optional, indicates whether a custom type variable uses CSV or JSON to define its values
|
||||
*/
|
||||
valuesFormat?: ('csv' | 'json');
|
||||
}
|
||||
|
||||
export const defaultVariableModel: Partial<VariableModel> = {
|
||||
@@ -224,7 +220,6 @@ export const defaultVariableModel: Partial<VariableModel> = {
|
||||
options: [],
|
||||
skipUrlSync: false,
|
||||
staticOptions: [],
|
||||
valuesFormat: 'csv',
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -317,7 +317,6 @@ export const handyTestingSchema: Spec = {
|
||||
query: 'option1, option2',
|
||||
skipUrlSync: false,
|
||||
allowCustomValue: true,
|
||||
valuesFormat: 'csv',
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -300,7 +300,7 @@ export interface FieldConfig {
|
||||
description?: string;
|
||||
// An explicit path to the field in the datasource. When the frame meta includes a path,
|
||||
// This will default to `${frame.meta.path}/${field.name}
|
||||
//
|
||||
//
|
||||
// When defined, this value can be used as an identifier within the datasource scope, and
|
||||
// may be used to update the results
|
||||
path?: string;
|
||||
@@ -1353,7 +1353,6 @@ export interface CustomVariableSpec {
|
||||
skipUrlSync: boolean;
|
||||
description?: string;
|
||||
allowCustomValue: boolean;
|
||||
valuesFormat?: "csv" | "json";
|
||||
}
|
||||
|
||||
export const defaultCustomVariableSpec = (): CustomVariableSpec => ({
|
||||
@@ -1366,7 +1365,6 @@ export const defaultCustomVariableSpec = (): CustomVariableSpec => ({
|
||||
hide: "dontHide",
|
||||
skipUrlSync: false,
|
||||
allowCustomValue: true,
|
||||
valuesFormat: undefined,
|
||||
});
|
||||
|
||||
// Group variable kind
|
||||
@@ -1551,3 +1549,4 @@ export const defaultSpec = (): Spec => ({
|
||||
title: "",
|
||||
variables: [],
|
||||
});
|
||||
|
||||
|
||||
@@ -1359,7 +1359,6 @@ export interface CustomVariableSpec {
|
||||
skipUrlSync: boolean;
|
||||
description?: string;
|
||||
allowCustomValue: boolean;
|
||||
valuesFormat?: "csv" | "json";
|
||||
}
|
||||
|
||||
export const defaultCustomVariableSpec = (): CustomVariableSpec => ({
|
||||
|
||||
@@ -1,174 +0,0 @@
|
||||
import { useEffect, useState } from 'react';
|
||||
import { Observable } from 'rxjs';
|
||||
import { map } from 'rxjs/operators';
|
||||
|
||||
import {
|
||||
CustomVariableSupport,
|
||||
DataQueryRequest,
|
||||
DataQueryResponse,
|
||||
QueryEditorProps,
|
||||
Field,
|
||||
DataFrame,
|
||||
MetricFindValue,
|
||||
} from '@grafana/data';
|
||||
import { t } from '@grafana/i18n';
|
||||
import { EditorMode, EditorRows, EditorRow, EditorField } from '@grafana/plugin-ui';
|
||||
import { Combobox, ComboboxOption } from '@grafana/ui';
|
||||
|
||||
import { SqlQueryEditorLazy } from './components/QueryEditorLazy';
|
||||
import { SqlDatasource } from './datasource/SqlDatasource';
|
||||
import { applyQueryDefaults } from './defaults';
|
||||
import { QueryFormat, type SQLQuery, type SQLOptions, type SQLQueryMeta } from './types';
|
||||
|
||||
type SQLVariableQuery = { query: string } & SQLQuery;
|
||||
|
||||
const refId = 'SQLVariableQueryEditor-VariableQuery';
|
||||
|
||||
export class SQLVariableSupport extends CustomVariableSupport<SqlDatasource, SQLQuery> {
|
||||
constructor(readonly datasource: SqlDatasource) {
|
||||
super();
|
||||
}
|
||||
editor = SQLVariablesQueryEditor;
|
||||
query(request: DataQueryRequest<SQLQuery>): Observable<DataQueryResponse> {
|
||||
if (request.targets.length < 1) {
|
||||
throw new Error('no variable query found');
|
||||
}
|
||||
const updatedQuery = migrateVariableQuery(request.targets[0]);
|
||||
return this.datasource.query({ ...request, targets: [updatedQuery] }).pipe(
|
||||
map((d: DataQueryResponse) => {
|
||||
const frames = d.data || [];
|
||||
const metricFindValues = convertDataFramesToMetricFindValues(frames, updatedQuery.meta);
|
||||
return { data: metricFindValues };
|
||||
})
|
||||
);
|
||||
}
|
||||
getDefaultQuery(): Partial<SQLQuery> {
|
||||
return applyQueryDefaults({ refId, editorMode: EditorMode.Builder, format: QueryFormat.Table });
|
||||
}
|
||||
}
|
||||
|
||||
type SQLVariableQueryEditorProps = QueryEditorProps<SqlDatasource, SQLQuery, SQLOptions>;
|
||||
|
||||
const SQLVariablesQueryEditor = (props: SQLVariableQueryEditorProps) => {
|
||||
const query = migrateVariableQuery(props.query);
|
||||
return (
|
||||
<>
|
||||
<SqlQueryEditorLazy {...props} query={query} />
|
||||
<FieldMapping {...props} query={query} />
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
const FieldMapping = (props: SQLVariableQueryEditorProps) => {
|
||||
const { query, datasource, onChange } = props;
|
||||
const [choices, setChoices] = useState<ComboboxOption[]>([]);
|
||||
useEffect(() => {
|
||||
let isActive = true;
|
||||
// eslint-disable-next-line
|
||||
const subscription = datasource.query({ targets: [query] } as DataQueryRequest<SQLQuery>).subscribe({
|
||||
next: (response) => {
|
||||
if (!isActive) {
|
||||
return;
|
||||
}
|
||||
const fieldNames = (response.data[0] || { fields: [] }).fields.map((f: Field) => f.name);
|
||||
setChoices(fieldNames.map((f: Field) => ({ value: f, label: f })));
|
||||
},
|
||||
error: () => {
|
||||
if (isActive) {
|
||||
setChoices([]);
|
||||
}
|
||||
},
|
||||
});
|
||||
return () => {
|
||||
isActive = false;
|
||||
subscription.unsubscribe();
|
||||
};
|
||||
}, [datasource, query]);
|
||||
const onMetaPropChange = <Key extends keyof SQLQueryMeta, Value extends SQLQueryMeta[Key]>(
|
||||
key: Key,
|
||||
value: Value,
|
||||
meta = query.meta || {}
|
||||
) => {
|
||||
onChange({ ...query, meta: { ...meta, [key]: value } });
|
||||
};
|
||||
return (
|
||||
<EditorRows>
|
||||
<EditorRow>
|
||||
<EditorField label={t('grafana-sql.components.query-meta.variables.valueField', 'Value Field')}>
|
||||
<Combobox
|
||||
isClearable
|
||||
value={query.meta?.valueField}
|
||||
onChange={(e) => onMetaPropChange('valueField', e?.value)}
|
||||
width={40}
|
||||
options={choices}
|
||||
/>
|
||||
</EditorField>
|
||||
<EditorField label={t('grafana-sql.components.query-meta.variables.textField', 'Text Field')}>
|
||||
<Combobox
|
||||
isClearable
|
||||
value={query.meta?.textField}
|
||||
onChange={(e) => onMetaPropChange('textField', e?.value)}
|
||||
width={40}
|
||||
options={choices}
|
||||
/>
|
||||
</EditorField>
|
||||
</EditorRow>
|
||||
</EditorRows>
|
||||
);
|
||||
};
|
||||
|
||||
const migrateVariableQuery = (rawQuery: string | SQLQuery): SQLVariableQuery => {
|
||||
if (typeof rawQuery !== 'string') {
|
||||
return {
|
||||
...rawQuery,
|
||||
refId: rawQuery.refId || refId,
|
||||
query: rawQuery.rawSql || '',
|
||||
};
|
||||
}
|
||||
return {
|
||||
...applyQueryDefaults({
|
||||
refId,
|
||||
rawSql: rawQuery,
|
||||
editorMode: rawQuery ? EditorMode.Code : EditorMode.Builder,
|
||||
}),
|
||||
query: rawQuery,
|
||||
};
|
||||
};
|
||||
|
||||
const convertDataFramesToMetricFindValues = (frames: DataFrame[], meta?: SQLQueryMeta): MetricFindValue[] => {
|
||||
if (!frames.length) {
|
||||
throw new Error('no results found');
|
||||
}
|
||||
|
||||
const frame = frames[0];
|
||||
|
||||
const fields = frame.fields;
|
||||
|
||||
if (fields.length < 1) {
|
||||
throw new Error('no fields found in the response');
|
||||
}
|
||||
|
||||
let textField = fields.find((f) => f.name === '__text');
|
||||
let valueField = fields.find((f) => f.name === '__value');
|
||||
if (meta?.textField) {
|
||||
textField = fields.find((f) => f.name === meta.textField);
|
||||
}
|
||||
if (meta?.valueField) {
|
||||
valueField = fields.find((f) => f.name === meta.valueField);
|
||||
}
|
||||
const resolvedTextField = textField || valueField || fields[0];
|
||||
const resolvedValueField = valueField || textField || fields[0];
|
||||
|
||||
const results: MetricFindValue[] = [];
|
||||
const rowCount = frame.length;
|
||||
for (let i = 0; i < rowCount; i++) {
|
||||
const text = String(resolvedTextField.values[i] ?? '');
|
||||
const value = String(resolvedValueField.values[i] ?? '');
|
||||
const properties: Record<string, string> = {};
|
||||
for (const field of fields) {
|
||||
properties[field.name] = String(field.values[i] ?? '');
|
||||
}
|
||||
results.push({ text, value, properties });
|
||||
}
|
||||
return results;
|
||||
};
|
||||
@@ -21,7 +21,6 @@ export { TLSSecretsConfig } from './components/configuration/TLSSecretsConfig';
|
||||
export { useMigrateDatabaseFields } from './components/configuration/useMigrateDatabaseFields';
|
||||
export { SqlQueryEditorLazy } from './components/QueryEditorLazy';
|
||||
export type { QueryHeaderProps } from './components/QueryHeader';
|
||||
export { SQLVariableSupport } from './SQLVariableSupport';
|
||||
export { createSelectClause, haveColumns } from './utils/sql.utils';
|
||||
export { applyQueryDefaults } from './defaults';
|
||||
export { makeVariable } from './utils/testHelpers';
|
||||
|
||||
@@ -69,12 +69,6 @@
|
||||
"placeholder-select-format": "Select format",
|
||||
"run-query": "Run query"
|
||||
},
|
||||
"query-meta": {
|
||||
"variables": {
|
||||
"textField": "Text Field",
|
||||
"valueField": "Value Field"
|
||||
}
|
||||
},
|
||||
"query-toolbox": {
|
||||
"content-hit-ctrlcmdreturn-to-run-query": "Hit CTRL/CMD+Return to run query",
|
||||
"tooltip-collapse": "Collapse editor",
|
||||
|
||||
@@ -50,8 +50,6 @@ export enum QueryFormat {
|
||||
Table = 'table',
|
||||
}
|
||||
|
||||
export type SQLQueryMeta = { valueField?: string; textField?: string };
|
||||
|
||||
export interface SQLQuery extends DataQuery {
|
||||
alias?: string;
|
||||
format?: QueryFormat;
|
||||
@@ -61,7 +59,6 @@ export interface SQLQuery extends DataQuery {
|
||||
sql?: SQLExpression;
|
||||
editorMode?: EditorMode;
|
||||
rawQuery?: boolean;
|
||||
meta?: SQLQueryMeta;
|
||||
}
|
||||
|
||||
export interface NameValue {
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin/coreplugin"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/pluginfakes"
|
||||
"github.com/grafana/grafana/pkg/services/accesscontrol"
|
||||
"github.com/grafana/grafana/pkg/services/caching"
|
||||
@@ -27,7 +28,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/services/oauthtoken/oauthtokentest"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/coreplugin"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginaccesscontrol"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginconfig"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/plugincontext"
|
||||
|
||||
10
pkg/kinds/dashboard/dashboard_spec_gen.go
generated
10
pkg/kinds/dashboard/dashboard_spec_gen.go
generated
@@ -837,8 +837,6 @@ type VariableModel struct {
|
||||
// Optional field, if you want to extract part of a series name or metric node segment.
|
||||
// Named capture groups can be used to separate the display text and value.
|
||||
Regex *string `json:"regex,omitempty"`
|
||||
// Optional, indicates whether a custom type variable uses CSV or JSON to define its values
|
||||
ValuesFormat *VariableModelValuesFormat `json:"valuesFormat,omitempty"`
|
||||
// Determine whether regex applies to variable value or display text
|
||||
RegexApplyTo *VariableRegexApplyTo `json:"regexApplyTo,omitempty"`
|
||||
// Additional static options for query variable
|
||||
@@ -854,7 +852,6 @@ func NewVariableModel() *VariableModel {
|
||||
Multi: (func(input bool) *bool { return &input })(false),
|
||||
AllowCustomValue: (func(input bool) *bool { return &input })(true),
|
||||
IncludeAll: (func(input bool) *bool { return &input })(false),
|
||||
ValuesFormat: (func(input VariableModelValuesFormat) *VariableModelValuesFormat { return &input })(VariableModelValuesFormatCsv),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1194,13 +1191,6 @@ const (
|
||||
DataTransformerConfigTopicAlertStates DataTransformerConfigTopic = "alertStates"
|
||||
)
|
||||
|
||||
type VariableModelValuesFormat string
|
||||
|
||||
const (
|
||||
VariableModelValuesFormatCsv VariableModelValuesFormat = "csv"
|
||||
VariableModelValuesFormatJson VariableModelValuesFormat = "json"
|
||||
)
|
||||
|
||||
type VariableModelStaticOptionsOrder string
|
||||
|
||||
const (
|
||||
|
||||
@@ -14,8 +14,6 @@ import (
|
||||
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin/coreplugin"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin/provider"
|
||||
"github.com/grafana/grafana/pkg/plugins/log"
|
||||
"github.com/grafana/grafana/pkg/tsdb/azuremonitor"
|
||||
cloudmonitoring "github.com/grafana/grafana/pkg/tsdb/cloud-monitoring"
|
||||
@@ -94,10 +92,6 @@ func NewRegistry(store map[string]backendplugin.PluginFactoryFunc) *Registry {
|
||||
}
|
||||
}
|
||||
|
||||
func ProvideCoreProvider(coreRegistry *Registry) plugins.BackendFactoryProvider {
|
||||
return provider.New(coreRegistry.BackendFactoryProvider(), provider.DefaultProvider)
|
||||
}
|
||||
|
||||
func ProvideCoreRegistry(tracer trace.Tracer, am *azuremonitor.Service, cw *cloudwatch.Service, cm *cloudmonitoring.Service,
|
||||
es *elasticsearch.Service, grap *graphite.Service, idb *influxdb.Service, lk *loki.Service, otsdb *opentsdb.Service,
|
||||
pr *prometheus.Service, t *tempo.Service, td *testdatasource.Service, pg *postgres.Service, my *mysql.Service,
|
||||
@@ -162,7 +156,7 @@ func asBackendPlugin(svc any) backendplugin.PluginFactoryFunc {
|
||||
|
||||
if opts.QueryDataHandler != nil || opts.CallResourceHandler != nil ||
|
||||
opts.CheckHealthHandler != nil || opts.StreamHandler != nil {
|
||||
return coreplugin.New(opts)
|
||||
return New(opts)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin/coreplugin"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin/grpcplugin"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin/pluginextensionv2"
|
||||
"github.com/grafana/grafana/pkg/plugins/log"
|
||||
@@ -26,6 +27,10 @@ func New(providers ...PluginBackendProvider) *Service {
|
||||
}
|
||||
}
|
||||
|
||||
func ProvideService(coreRegistry *coreplugin.Registry) *Service {
|
||||
return New(coreRegistry.BackendFactoryProvider(), DefaultProvider)
|
||||
}
|
||||
|
||||
func (s *Service) BackendFactory(ctx context.Context, p *plugins.Plugin) backendplugin.PluginFactoryFunc {
|
||||
for _, provider := range s.providerChain {
|
||||
if factory := provider(ctx, p); factory != nil {
|
||||
|
||||
@@ -276,7 +276,7 @@ func (b *APIBuilder) oneFlagHandler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if b.providerType == setting.FeaturesServiceProviderType || b.providerType == setting.OFREPProviderType {
|
||||
if b.providerType == setting.GOFFProviderType || b.providerType == setting.OFREPProviderType {
|
||||
b.proxyFlagReq(ctx, flagKey, isAuthedReq, w, r)
|
||||
return
|
||||
}
|
||||
@@ -304,7 +304,7 @@ func (b *APIBuilder) allFlagsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
isAuthedReq := b.isAuthenticatedRequest(r)
|
||||
span.SetAttributes(attribute.Bool("authenticated", isAuthedReq))
|
||||
|
||||
if b.providerType == setting.FeaturesServiceProviderType || b.providerType == setting.OFREPProviderType {
|
||||
if b.providerType == setting.GOFFProviderType || b.providerType == setting.OFREPProviderType {
|
||||
b.proxyAllFlagReq(ctx, isAuthedReq, w, r)
|
||||
return
|
||||
}
|
||||
|
||||
11
pkg/server/wire_gen.go
generated
11
pkg/server/wire_gen.go
generated
@@ -37,6 +37,8 @@ import (
|
||||
"github.com/grafana/grafana/pkg/login/social/socialimpl"
|
||||
"github.com/grafana/grafana/pkg/middleware/csrf"
|
||||
"github.com/grafana/grafana/pkg/middleware/loggermw"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin/coreplugin"
|
||||
provider2 "github.com/grafana/grafana/pkg/plugins/backendplugin/provider"
|
||||
manager4 "github.com/grafana/grafana/pkg/plugins/manager"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/filestore"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/process"
|
||||
@@ -176,7 +178,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/angulardetectorsprovider"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/angularinspector"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/angularpatternsstore"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/coreplugin"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/dashboards"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/installsync"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/keyretriever"
|
||||
@@ -556,7 +557,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
|
||||
zipkinService := zipkin.ProvideService(httpclientProvider)
|
||||
jaegerService := jaeger.ProvideService(httpclientProvider)
|
||||
corepluginRegistry := coreplugin.ProvideCoreRegistry(tracer, azuremonitorService, cloudwatchService, cloudmonitoringService, elasticsearchService, graphiteService, influxdbService, lokiService, opentsdbService, prometheusService, tempoService, testdatasourceService, postgresService, mysqlService, mssqlService, grafanadsService, pyroscopeService, parcaService, zipkinService, jaegerService)
|
||||
backendFactoryProvider := coreplugin.ProvideCoreProvider(corepluginRegistry)
|
||||
providerService := provider2.ProvideService(corepluginRegistry)
|
||||
processService := process.ProvideService()
|
||||
retrieverService := retriever.ProvideService(sqlStore, apikeyService, kvStore, userService, orgService)
|
||||
serviceAccountPermissionsService, err := ossaccesscontrol.ProvideServiceAccountPermissions(cfg, featureToggles, routeRegisterImpl, sqlStore, accessControl, ossLicensingService, retrieverService, acimplService, teamService, userService, actionSetService)
|
||||
@@ -572,7 +573,7 @@ func Initialize(ctx context.Context, cfg *setting.Cfg, opts Options, apiOpts api
|
||||
service13 := service6.ProvideService(sqlStore, secretsService)
|
||||
serviceregistrationService := serviceregistration.ProvideService(cfg, featureToggles, registryRegistry, service13)
|
||||
noop := provisionedplugins.NewNoop()
|
||||
initialize := pipeline.ProvideInitializationStage(pluginManagementCfg, inMemory, backendFactoryProvider, processService, serviceregistrationService, acimplService, actionSetService, envVarsProvider, tracingService, noop)
|
||||
initialize := pipeline.ProvideInitializationStage(pluginManagementCfg, inMemory, providerService, processService, serviceregistrationService, acimplService, actionSetService, envVarsProvider, tracingService, noop)
|
||||
terminate, err := pipeline.ProvideTerminationStage(pluginManagementCfg, inMemory, processService)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1216,7 +1217,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
|
||||
zipkinService := zipkin.ProvideService(httpclientProvider)
|
||||
jaegerService := jaeger.ProvideService(httpclientProvider)
|
||||
corepluginRegistry := coreplugin.ProvideCoreRegistry(tracer, azuremonitorService, cloudwatchService, cloudmonitoringService, elasticsearchService, graphiteService, influxdbService, lokiService, opentsdbService, prometheusService, tempoService, testdatasourceService, postgresService, mysqlService, mssqlService, grafanadsService, pyroscopeService, parcaService, zipkinService, jaegerService)
|
||||
backendFactoryProvider := coreplugin.ProvideCoreProvider(corepluginRegistry)
|
||||
providerService := provider2.ProvideService(corepluginRegistry)
|
||||
processService := process.ProvideService()
|
||||
retrieverService := retriever.ProvideService(sqlStore, apikeyService, kvStore, userService, orgService)
|
||||
serviceAccountPermissionsService, err := ossaccesscontrol.ProvideServiceAccountPermissions(cfg, featureToggles, routeRegisterImpl, sqlStore, accessControl, ossLicensingService, retrieverService, acimplService, teamService, userService, actionSetService)
|
||||
@@ -1232,7 +1233,7 @@ func InitializeForTest(ctx context.Context, t sqlutil.ITestDB, testingT interfac
|
||||
service13 := service6.ProvideService(sqlStore, secretsService)
|
||||
serviceregistrationService := serviceregistration.ProvideService(cfg, featureToggles, registryRegistry, service13)
|
||||
noop := provisionedplugins.NewNoop()
|
||||
initialize := pipeline.ProvideInitializationStage(pluginManagementCfg, inMemory, backendFactoryProvider, processService, serviceregistrationService, acimplService, actionSetService, envVarsProvider, tracingService, noop)
|
||||
initialize := pipeline.ProvideInitializationStage(pluginManagementCfg, inMemory, providerService, processService, serviceregistrationService, acimplService, actionSetService, envVarsProvider, tracingService, noop)
|
||||
terminate, err := pipeline.ProvideTerminationStage(pluginManagementCfg, inMemory, processService)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -3,13 +3,11 @@ package dualwrite
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
openfgav1 "github.com/openfga/api/proto/openfga/v1"
|
||||
|
||||
claims "github.com/grafana/authlib/types"
|
||||
|
||||
dashboardV1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1"
|
||||
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana"
|
||||
)
|
||||
@@ -21,30 +19,14 @@ type legacyTupleCollector func(ctx context.Context, orgID int64) (map[string]map
|
||||
type zanzanaTupleCollector func(ctx context.Context, client zanzana.Client, object string, namespace string) (map[string]*openfgav1.TupleKey, error)
|
||||
|
||||
type resourceReconciler struct {
|
||||
name string
|
||||
legacy legacyTupleCollector
|
||||
zanzana zanzanaTupleCollector
|
||||
client zanzana.Client
|
||||
orphanObjectPrefix string
|
||||
orphanRelations []string
|
||||
name string
|
||||
legacy legacyTupleCollector
|
||||
zanzana zanzanaTupleCollector
|
||||
client zanzana.Client
|
||||
}
|
||||
|
||||
func newResourceReconciler(name string, legacy legacyTupleCollector, zanzanaCollector zanzanaTupleCollector, client zanzana.Client) resourceReconciler {
|
||||
r := resourceReconciler{name: name, legacy: legacy, zanzana: zanzanaCollector, client: client}
|
||||
|
||||
// we only need to worry about orphaned tuples for reconcilers that use the managed permissions collector (i.e. dashboards & folders)
|
||||
switch name {
|
||||
case "managed folder permissions":
|
||||
// prefix for folders is `folder:`
|
||||
r.orphanObjectPrefix = zanzana.NewObjectEntry(zanzana.TypeFolder, "", "", "", "")
|
||||
r.orphanRelations = append([]string{}, zanzana.RelationsFolder...)
|
||||
case "managed dashboard permissions":
|
||||
// prefix for dashboards will be `resource:dashboard.grafana.app/dashboards/`
|
||||
r.orphanObjectPrefix = fmt.Sprintf("%s/", zanzana.NewObjectEntry(zanzana.TypeResource, dashboardV1.APIGroup, dashboardV1.DASHBOARD_RESOURCE, "", ""))
|
||||
r.orphanRelations = append([]string{}, zanzana.RelationsResouce...)
|
||||
}
|
||||
|
||||
return r
|
||||
func newResourceReconciler(name string, legacy legacyTupleCollector, zanzana zanzanaTupleCollector, client zanzana.Client) resourceReconciler {
|
||||
return resourceReconciler{name, legacy, zanzana, client}
|
||||
}
|
||||
|
||||
func (r resourceReconciler) reconcile(ctx context.Context, namespace string) error {
|
||||
@@ -53,15 +35,6 @@ func (r resourceReconciler) reconcile(ctx context.Context, namespace string) err
|
||||
return err
|
||||
}
|
||||
|
||||
// 0. Fetch all tuples currently stored in Zanzana. This will be used later on
|
||||
// to cleanup orphaned tuples.
|
||||
// This order needs to be kept (fetching from Zanzana first) to avoid accidentally
|
||||
// cleaning up new tuples that were added after the legacy tuples were fetched.
|
||||
allTuplesInZanzana, err := r.readAllTuples(ctx, namespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read all tuples from zanzana for %s: %w", r.name, err)
|
||||
}
|
||||
|
||||
// 1. Fetch grafana resources stored in grafana db.
|
||||
res, err := r.legacy(ctx, info.OrgID)
|
||||
if err != nil {
|
||||
@@ -114,14 +87,6 @@ func (r resourceReconciler) reconcile(ctx context.Context, namespace string) err
|
||||
}
|
||||
}
|
||||
|
||||
// when the last managed permission for a resource is removed, the legacy results will no
|
||||
// longer contain any tuples for that resource. this process cleans it up when applicable.
|
||||
orphans, err := r.collectOrphanDeletes(ctx, namespace, allTuplesInZanzana, res)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to collect orphan deletes (%s): %w", r.name, err)
|
||||
}
|
||||
deletes = append(deletes, orphans...)
|
||||
|
||||
if len(writes) == 0 && len(deletes) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -154,79 +119,3 @@ func (r resourceReconciler) reconcile(ctx context.Context, namespace string) err
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// collectOrphanDeletes collects tuples that are no longer present in the legacy results
|
||||
// but still are present in zanzana. when that is the case, we need to delete the tuple from
|
||||
// zanzana. this will happen when the last managed permission for a resource is removed.
|
||||
// this is only used for dashboards and folders, as those are the only resources that use the managed permissions collector.
|
||||
func (r resourceReconciler) collectOrphanDeletes(
|
||||
ctx context.Context,
|
||||
namespace string,
|
||||
allTuplesInZanzana []*authzextv1.Tuple,
|
||||
legacyReturnedTuples map[string]map[string]*openfgav1.TupleKey,
|
||||
) ([]*openfgav1.TupleKeyWithoutCondition, error) {
|
||||
if r.orphanObjectPrefix == "" || len(r.orphanRelations) == 0 {
|
||||
return []*openfgav1.TupleKeyWithoutCondition{}, nil
|
||||
}
|
||||
|
||||
seen := map[string]struct{}{}
|
||||
out := []*openfgav1.TupleKeyWithoutCondition{}
|
||||
|
||||
// what relation types we are interested in cleaning up
|
||||
relationsToCleanup := map[string]struct{}{}
|
||||
for _, rel := range r.orphanRelations {
|
||||
relationsToCleanup[rel] = struct{}{}
|
||||
}
|
||||
|
||||
for _, tuple := range allTuplesInZanzana {
|
||||
if tuple == nil || tuple.Key == nil {
|
||||
continue
|
||||
}
|
||||
// only cleanup the particular relation types we are interested in
|
||||
if _, ok := relationsToCleanup[tuple.Key.Relation]; !ok {
|
||||
continue
|
||||
}
|
||||
// only cleanup the particular object types we are interested in (either dashboards or folders)
|
||||
if !strings.HasPrefix(tuple.Key.Object, r.orphanObjectPrefix) {
|
||||
continue
|
||||
}
|
||||
// if legacy returned this object, it's not orphaned
|
||||
if _, ok := legacyReturnedTuples[tuple.Key.Object]; ok {
|
||||
continue
|
||||
}
|
||||
// keep track of the tuples we have already seen and marked for deletion
|
||||
key := fmt.Sprintf("%s|%s|%s", tuple.Key.User, tuple.Key.Relation, tuple.Key.Object)
|
||||
if _, ok := seen[key]; ok {
|
||||
continue
|
||||
}
|
||||
seen[key] = struct{}{}
|
||||
out = append(out, &openfgav1.TupleKeyWithoutCondition{
|
||||
User: tuple.Key.User,
|
||||
Relation: tuple.Key.Relation,
|
||||
Object: tuple.Key.Object,
|
||||
})
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (r resourceReconciler) readAllTuples(ctx context.Context, namespace string) ([]*authzextv1.Tuple, error) {
|
||||
var (
|
||||
out []*authzextv1.Tuple
|
||||
continueToken string
|
||||
)
|
||||
for {
|
||||
res, err := r.client.Read(ctx, &authzextv1.ReadRequest{
|
||||
Namespace: namespace,
|
||||
ContinuationToken: continueToken,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, res.Tuples...)
|
||||
continueToken = res.ContinuationToken
|
||||
if continueToken == "" {
|
||||
return out, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
package dualwrite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
authlib "github.com/grafana/authlib/types"
|
||||
openfgav1 "github.com/openfga/api/proto/openfga/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
|
||||
"github.com/grafana/grafana/pkg/services/authz/zanzana"
|
||||
)
|
||||
|
||||
type fakeZanzanaClient struct {
|
||||
readTuples []*authzextv1.Tuple
|
||||
writeReqs []*authzextv1.WriteRequest
|
||||
}
|
||||
|
||||
func (f *fakeZanzanaClient) Read(ctx context.Context, req *authzextv1.ReadRequest) (*authzextv1.ReadResponse, error) {
|
||||
return &authzextv1.ReadResponse{
|
||||
Tuples: f.readTuples,
|
||||
ContinuationToken: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fakeZanzanaClient) Write(ctx context.Context, req *authzextv1.WriteRequest) error {
|
||||
f.writeReqs = append(f.writeReqs, req)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeZanzanaClient) BatchCheck(ctx context.Context, req *authzextv1.BatchCheckRequest) (*authzextv1.BatchCheckResponse, error) {
|
||||
return &authzextv1.BatchCheckResponse{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeZanzanaClient) Mutate(ctx context.Context, req *authzextv1.MutateRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeZanzanaClient) Query(ctx context.Context, req *authzextv1.QueryRequest) (*authzextv1.QueryResponse, error) {
|
||||
return &authzextv1.QueryResponse{}, nil
|
||||
}
|
||||
|
||||
func (f *fakeZanzanaClient) Check(ctx context.Context, info authlib.AuthInfo, req authlib.CheckRequest, folder string) (authlib.CheckResponse, error) {
|
||||
return authlib.CheckResponse{Allowed: true}, nil
|
||||
}
|
||||
|
||||
func (f *fakeZanzanaClient) Compile(ctx context.Context, info authlib.AuthInfo, req authlib.ListRequest) (authlib.ItemChecker, authlib.Zookie, error) {
|
||||
return func(name, folder string) bool { return true }, authlib.NoopZookie{}, nil
|
||||
}
|
||||
|
||||
func TestResourceReconciler_OrphanedManagedDashboardTuplesAreDeleted(t *testing.T) {
|
||||
legacy := func(ctx context.Context, orgID int64) (map[string]map[string]*openfgav1.TupleKey, error) {
|
||||
return map[string]map[string]*openfgav1.TupleKey{}, nil
|
||||
}
|
||||
zCollector := func(ctx context.Context, client zanzana.Client, object string, namespace string) (map[string]*openfgav1.TupleKey, error) {
|
||||
return map[string]*openfgav1.TupleKey{}, nil
|
||||
}
|
||||
|
||||
fake := &fakeZanzanaClient{}
|
||||
r := newResourceReconciler("managed dashboard permissions", legacy, zCollector, fake)
|
||||
|
||||
require.NotEmpty(t, r.orphanObjectPrefix)
|
||||
require.NotEmpty(t, r.orphanRelations)
|
||||
|
||||
relAllowed := r.orphanRelations[0]
|
||||
objAllowed := r.orphanObjectPrefix + "dash-uid-1"
|
||||
|
||||
fake.readTuples = []*authzextv1.Tuple{
|
||||
// should be removed
|
||||
{
|
||||
Key: &authzextv1.TupleKey{
|
||||
User: "user:1",
|
||||
Relation: relAllowed,
|
||||
Object: objAllowed,
|
||||
},
|
||||
},
|
||||
|
||||
// same relation but different object type/prefix - should stay
|
||||
{
|
||||
Key: &authzextv1.TupleKey{
|
||||
User: "user:1",
|
||||
Relation: relAllowed,
|
||||
Object: "folder:some-folder",
|
||||
},
|
||||
},
|
||||
// same prefix but different relation - should stay
|
||||
{
|
||||
Key: &authzextv1.TupleKey{
|
||||
User: "user:1",
|
||||
Relation: zanzana.RelationParent,
|
||||
Object: objAllowed,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := r.reconcile(context.Background(), authlib.OrgNamespaceFormatter(1))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, fake.writeReqs, 1)
|
||||
wr := fake.writeReqs[0]
|
||||
require.NotNil(t, wr.Deletes)
|
||||
require.Nil(t, wr.Writes)
|
||||
|
||||
require.Len(t, wr.Deletes.TupleKeys, 1)
|
||||
del := wr.Deletes.TupleKeys[0]
|
||||
require.Equal(t, "user:1", del.User)
|
||||
require.Equal(t, relAllowed, del.Relation)
|
||||
require.Equal(t, objAllowed, del.Object)
|
||||
}
|
||||
@@ -32,8 +32,6 @@ func NewOpenFGAServer(cfg setting.ZanzanaServerSettings, store storage.OpenFGADa
|
||||
opts := []server.OpenFGAServiceV1Option{
|
||||
server.WithDatastore(store),
|
||||
server.WithLogger(zlogger.New(logger)),
|
||||
|
||||
// Cache settings
|
||||
server.WithCheckCacheLimit(cfg.CacheSettings.CheckCacheLimit),
|
||||
server.WithCacheControllerEnabled(cfg.CacheSettings.CacheControllerEnabled),
|
||||
server.WithCacheControllerTTL(cfg.CacheSettings.CacheControllerTTL),
|
||||
@@ -42,25 +40,16 @@ func NewOpenFGAServer(cfg setting.ZanzanaServerSettings, store storage.OpenFGADa
|
||||
server.WithCheckIteratorCacheEnabled(cfg.CacheSettings.CheckIteratorCacheEnabled),
|
||||
server.WithCheckIteratorCacheMaxResults(cfg.CacheSettings.CheckIteratorCacheMaxResults),
|
||||
server.WithCheckIteratorCacheTTL(cfg.CacheSettings.CheckIteratorCacheTTL),
|
||||
|
||||
// ListObjects settings
|
||||
server.WithListObjectsMaxResults(cfg.ListObjectsMaxResults),
|
||||
server.WithListObjectsIteratorCacheEnabled(cfg.CacheSettings.ListObjectsIteratorCacheEnabled),
|
||||
server.WithListObjectsIteratorCacheMaxResults(cfg.CacheSettings.ListObjectsIteratorCacheMaxResults),
|
||||
server.WithListObjectsIteratorCacheTTL(cfg.CacheSettings.ListObjectsIteratorCacheTTL),
|
||||
server.WithListObjectsDeadline(cfg.ListObjectsDeadline),
|
||||
|
||||
// Shared iterator settings
|
||||
server.WithSharedIteratorEnabled(cfg.CacheSettings.SharedIteratorEnabled),
|
||||
server.WithSharedIteratorLimit(cfg.CacheSettings.SharedIteratorLimit),
|
||||
server.WithSharedIteratorTTL(cfg.CacheSettings.SharedIteratorTTL),
|
||||
|
||||
server.WithContextPropagationToDatastore(true),
|
||||
server.WithListObjectsDeadline(cfg.ListObjectsDeadline),
|
||||
}
|
||||
|
||||
openfgaOpts := withOpenFGAOptions(cfg)
|
||||
opts = append(opts, openfgaOpts...)
|
||||
|
||||
srv, err := server.NewServerWithOpts(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -69,129 +58,6 @@ func NewOpenFGAServer(cfg setting.ZanzanaServerSettings, store storage.OpenFGADa
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
func withOpenFGAOptions(cfg setting.ZanzanaServerSettings) []server.OpenFGAServiceV1Option {
|
||||
opts := make([]server.OpenFGAServiceV1Option, 0)
|
||||
|
||||
listOpts := withListOptions(cfg)
|
||||
opts = append(opts, listOpts...)
|
||||
|
||||
// Check settings
|
||||
if cfg.OpenFgaServerSettings.MaxConcurrentReadsForCheck != 0 {
|
||||
opts = append(opts, server.WithMaxConcurrentReadsForCheck(cfg.OpenFgaServerSettings.MaxConcurrentReadsForCheck))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.CheckDatabaseThrottleThreshold != 0 || cfg.OpenFgaServerSettings.CheckDatabaseThrottleDuration != 0 {
|
||||
opts = append(opts, server.WithCheckDatabaseThrottle(cfg.OpenFgaServerSettings.CheckDatabaseThrottleThreshold, cfg.OpenFgaServerSettings.CheckDatabaseThrottleDuration))
|
||||
}
|
||||
|
||||
// Batch check settings
|
||||
if cfg.OpenFgaServerSettings.MaxConcurrentChecksPerBatchCheck != 0 {
|
||||
opts = append(opts, server.WithMaxConcurrentChecksPerBatchCheck(cfg.OpenFgaServerSettings.MaxConcurrentChecksPerBatchCheck))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.MaxChecksPerBatchCheck != 0 {
|
||||
opts = append(opts, server.WithMaxChecksPerBatchCheck(cfg.OpenFgaServerSettings.MaxChecksPerBatchCheck))
|
||||
}
|
||||
|
||||
// Resolve node settings
|
||||
if cfg.OpenFgaServerSettings.ResolveNodeLimit != 0 {
|
||||
opts = append(opts, server.WithResolveNodeLimit(cfg.OpenFgaServerSettings.ResolveNodeLimit))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ResolveNodeBreadthLimit != 0 {
|
||||
opts = append(opts, server.WithResolveNodeBreadthLimit(cfg.OpenFgaServerSettings.ResolveNodeBreadthLimit))
|
||||
}
|
||||
|
||||
// Dispatch throttling settings
|
||||
if cfg.OpenFgaServerSettings.DispatchThrottlingCheckResolverEnabled {
|
||||
opts = append(opts, server.WithDispatchThrottlingCheckResolverEnabled(cfg.OpenFgaServerSettings.DispatchThrottlingCheckResolverEnabled))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.DispatchThrottlingCheckResolverFrequency != 0 {
|
||||
opts = append(opts, server.WithDispatchThrottlingCheckResolverFrequency(cfg.OpenFgaServerSettings.DispatchThrottlingCheckResolverFrequency))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.DispatchThrottlingCheckResolverThreshold != 0 {
|
||||
opts = append(opts, server.WithDispatchThrottlingCheckResolverThreshold(cfg.OpenFgaServerSettings.DispatchThrottlingCheckResolverThreshold))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.DispatchThrottlingCheckResolverMaxThreshold != 0 {
|
||||
opts = append(opts, server.WithDispatchThrottlingCheckResolverMaxThreshold(cfg.OpenFgaServerSettings.DispatchThrottlingCheckResolverMaxThreshold))
|
||||
}
|
||||
|
||||
// Shadow check/query settings
|
||||
if cfg.OpenFgaServerSettings.ShadowCheckResolverTimeout != 0 {
|
||||
opts = append(opts, server.WithShadowCheckResolverTimeout(cfg.OpenFgaServerSettings.ShadowCheckResolverTimeout))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ShadowListObjectsQueryTimeout != 0 {
|
||||
opts = append(opts, server.WithShadowListObjectsQueryTimeout(cfg.OpenFgaServerSettings.ShadowListObjectsQueryTimeout))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ShadowListObjectsQueryMaxDeltaItems != 0 {
|
||||
opts = append(opts, server.WithShadowListObjectsQueryMaxDeltaItems(cfg.OpenFgaServerSettings.ShadowListObjectsQueryMaxDeltaItems))
|
||||
}
|
||||
|
||||
if cfg.OpenFgaServerSettings.RequestTimeout != 0 {
|
||||
opts = append(opts, server.WithRequestTimeout(cfg.OpenFgaServerSettings.RequestTimeout))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.MaxAuthorizationModelSizeInBytes != 0 {
|
||||
opts = append(opts, server.WithMaxAuthorizationModelSizeInBytes(cfg.OpenFgaServerSettings.MaxAuthorizationModelSizeInBytes))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.AuthorizationModelCacheSize != 0 {
|
||||
opts = append(opts, server.WithAuthorizationModelCacheSize(cfg.OpenFgaServerSettings.AuthorizationModelCacheSize))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ChangelogHorizonOffset != 0 {
|
||||
opts = append(opts, server.WithChangelogHorizonOffset(cfg.OpenFgaServerSettings.ChangelogHorizonOffset))
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
func withListOptions(cfg setting.ZanzanaServerSettings) []server.OpenFGAServiceV1Option {
|
||||
opts := make([]server.OpenFGAServiceV1Option, 0)
|
||||
|
||||
// ListObjects settings
|
||||
if cfg.OpenFgaServerSettings.MaxConcurrentReadsForListObjects != 0 {
|
||||
opts = append(opts, server.WithMaxConcurrentReadsForListObjects(cfg.OpenFgaServerSettings.MaxConcurrentReadsForListObjects))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ListObjectsDispatchThrottlingEnabled {
|
||||
opts = append(opts, server.WithListObjectsDispatchThrottlingEnabled(cfg.OpenFgaServerSettings.ListObjectsDispatchThrottlingEnabled))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ListObjectsDispatchThrottlingFrequency != 0 {
|
||||
opts = append(opts, server.WithListObjectsDispatchThrottlingFrequency(cfg.OpenFgaServerSettings.ListObjectsDispatchThrottlingFrequency))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ListObjectsDispatchThrottlingThreshold != 0 {
|
||||
opts = append(opts, server.WithListObjectsDispatchThrottlingThreshold(cfg.OpenFgaServerSettings.ListObjectsDispatchThrottlingThreshold))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ListObjectsDispatchThrottlingMaxThreshold != 0 {
|
||||
opts = append(opts, server.WithListObjectsDispatchThrottlingMaxThreshold(cfg.OpenFgaServerSettings.ListObjectsDispatchThrottlingMaxThreshold))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ListObjectsDatabaseThrottleThreshold != 0 || cfg.OpenFgaServerSettings.ListObjectsDatabaseThrottleDuration != 0 {
|
||||
opts = append(opts, server.WithListObjectsDatabaseThrottle(cfg.OpenFgaServerSettings.ListObjectsDatabaseThrottleThreshold, cfg.OpenFgaServerSettings.ListObjectsDatabaseThrottleDuration))
|
||||
}
|
||||
|
||||
// ListUsers settings
|
||||
if cfg.OpenFgaServerSettings.ListUsersDeadline != 0 {
|
||||
opts = append(opts, server.WithListUsersDeadline(cfg.OpenFgaServerSettings.ListUsersDeadline))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ListUsersMaxResults != 0 {
|
||||
opts = append(opts, server.WithListUsersMaxResults(cfg.OpenFgaServerSettings.ListUsersMaxResults))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.MaxConcurrentReadsForListUsers != 0 {
|
||||
opts = append(opts, server.WithMaxConcurrentReadsForListUsers(cfg.OpenFgaServerSettings.MaxConcurrentReadsForListUsers))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ListUsersDispatchThrottlingEnabled {
|
||||
opts = append(opts, server.WithListUsersDispatchThrottlingEnabled(cfg.OpenFgaServerSettings.ListUsersDispatchThrottlingEnabled))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ListUsersDispatchThrottlingFrequency != 0 {
|
||||
opts = append(opts, server.WithListUsersDispatchThrottlingFrequency(cfg.OpenFgaServerSettings.ListUsersDispatchThrottlingFrequency))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ListUsersDispatchThrottlingThreshold != 0 {
|
||||
opts = append(opts, server.WithListUsersDispatchThrottlingThreshold(cfg.OpenFgaServerSettings.ListUsersDispatchThrottlingThreshold))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ListUsersDispatchThrottlingMaxThreshold != 0 {
|
||||
opts = append(opts, server.WithListUsersDispatchThrottlingMaxThreshold(cfg.OpenFgaServerSettings.ListUsersDispatchThrottlingMaxThreshold))
|
||||
}
|
||||
if cfg.OpenFgaServerSettings.ListUsersDatabaseThrottleThreshold != 0 || cfg.OpenFgaServerSettings.ListUsersDatabaseThrottleDuration != 0 {
|
||||
opts = append(opts, server.WithListUsersDatabaseThrottle(cfg.OpenFgaServerSettings.ListUsersDatabaseThrottleThreshold, cfg.OpenFgaServerSettings.ListUsersDatabaseThrottleDuration))
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
func NewOpenFGAHttpServer(cfg setting.ZanzanaServerSettings, srv grpcserver.Provider) (*http.Server, error) {
|
||||
dialOpts := []grpc.DialOption{
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/open-feature/go-sdk/openfeature"
|
||||
)
|
||||
|
||||
func newFeaturesServiceProvider(url string, client *http.Client) (openfeature.FeatureProvider, error) {
|
||||
func newGOFFProvider(url string, client *http.Client) (openfeature.FeatureProvider, error) {
|
||||
options := gofeatureflag.ProviderOptions{
|
||||
Endpoint: url,
|
||||
// consider using github.com/grafana/grafana/pkg/infra/httpclient/provider.go
|
||||
@@ -19,11 +19,11 @@ const (
|
||||
|
||||
// OpenFeatureConfig holds configuration for initializing OpenFeature
|
||||
type OpenFeatureConfig struct {
|
||||
// ProviderType is either "static", "features-service", or "ofrep"
|
||||
// ProviderType is either "static", "goff", or "ofrep"
|
||||
ProviderType string
|
||||
// URL is the remote provider's URL (required for features-service + OFREP providers)
|
||||
// URL is the GOFF or OFREP service URL (required for GOFF + OFREP providers)
|
||||
URL *url.URL
|
||||
// HTTPClient is a pre-configured HTTP client (optional, used by features-service + OFREP providers)
|
||||
// HTTPClient is a pre-configured HTTP client (optional, used for GOFF + OFREP providers)
|
||||
HTTPClient *http.Client
|
||||
// StaticFlags are the feature flags to use with static provider
|
||||
StaticFlags map[string]bool
|
||||
@@ -35,9 +35,9 @@ type OpenFeatureConfig struct {
|
||||
|
||||
// InitOpenFeature initializes OpenFeature with the provided configuration
|
||||
func InitOpenFeature(config OpenFeatureConfig) error {
|
||||
// For remote providers, ensure we have a URL
|
||||
if (config.ProviderType == setting.FeaturesServiceProviderType || config.ProviderType == setting.OFREPProviderType) && (config.URL == nil || config.URL.String() == "") {
|
||||
return fmt.Errorf("URL is required for remote providers")
|
||||
// For GOFF + OFREP providers, ensure we have a URL
|
||||
if (config.ProviderType == setting.GOFFProviderType || config.ProviderType == setting.OFREPProviderType) && (config.URL == nil || config.URL.String() == "") {
|
||||
return fmt.Errorf("URL is required for GOFF + OFREP providers")
|
||||
}
|
||||
|
||||
p, err := createProvider(config.ProviderType, config.URL, config.StaticFlags, config.HTTPClient)
|
||||
@@ -66,10 +66,10 @@ func InitOpenFeatureWithCfg(cfg *setting.Cfg) error {
|
||||
}
|
||||
|
||||
var httpcli *http.Client
|
||||
if cfg.OpenFeature.ProviderType == setting.FeaturesServiceProviderType || cfg.OpenFeature.ProviderType == setting.OFREPProviderType {
|
||||
if cfg.OpenFeature.ProviderType == setting.GOFFProviderType || cfg.OpenFeature.ProviderType == setting.OFREPProviderType {
|
||||
var m *clientauthmiddleware.TokenExchangeMiddleware
|
||||
|
||||
if cfg.OpenFeature.ProviderType == setting.FeaturesServiceProviderType {
|
||||
if cfg.OpenFeature.ProviderType == setting.GOFFProviderType {
|
||||
m, err = clientauthmiddleware.NewTokenExchangeMiddleware(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create token exchange middleware: %w", err)
|
||||
@@ -103,13 +103,13 @@ func createProvider(
|
||||
staticFlags map[string]bool,
|
||||
httpClient *http.Client,
|
||||
) (openfeature.FeatureProvider, error) {
|
||||
if providerType == setting.FeaturesServiceProviderType || providerType == setting.OFREPProviderType {
|
||||
if providerType == setting.GOFFProviderType || providerType == setting.OFREPProviderType {
|
||||
if u == nil || u.String() == "" {
|
||||
return nil, fmt.Errorf("feature provider url is required for FeaturesServiceProviderType + OFREPProviderType")
|
||||
return nil, fmt.Errorf("feature provider url is required for GOFFProviderType + OFREPProviderType")
|
||||
}
|
||||
|
||||
if providerType == setting.FeaturesServiceProviderType {
|
||||
return newFeaturesServiceProvider(u.String(), httpClient)
|
||||
if providerType == setting.GOFFProviderType {
|
||||
return newGOFFProvider(u.String(), httpClient)
|
||||
}
|
||||
|
||||
if providerType == setting.OFREPProviderType {
|
||||
|
||||
@@ -35,9 +35,9 @@ func TestCreateProvider(t *testing.T) {
|
||||
expectedProvider: setting.StaticProviderType,
|
||||
},
|
||||
{
|
||||
name: "features-service provider",
|
||||
name: "goff provider",
|
||||
cfg: setting.OpenFeatureSettings{
|
||||
ProviderType: setting.FeaturesServiceProviderType,
|
||||
ProviderType: setting.GOFFProviderType,
|
||||
URL: u,
|
||||
TargetingKey: "grafana",
|
||||
},
|
||||
@@ -45,12 +45,12 @@ func TestCreateProvider(t *testing.T) {
|
||||
Namespace: "*",
|
||||
Audiences: []string{"features.grafana.app"},
|
||||
},
|
||||
expectedProvider: setting.FeaturesServiceProviderType,
|
||||
expectedProvider: setting.GOFFProviderType,
|
||||
},
|
||||
{
|
||||
name: "features-service provider with failing token exchange",
|
||||
name: "goff provider with failing token exchange",
|
||||
cfg: setting.OpenFeatureSettings{
|
||||
ProviderType: setting.FeaturesServiceProviderType,
|
||||
ProviderType: setting.GOFFProviderType,
|
||||
URL: u,
|
||||
TargetingKey: "grafana",
|
||||
},
|
||||
@@ -58,7 +58,7 @@ func TestCreateProvider(t *testing.T) {
|
||||
Namespace: "*",
|
||||
Audiences: []string{"features.grafana.app"},
|
||||
},
|
||||
expectedProvider: setting.FeaturesServiceProviderType,
|
||||
expectedProvider: setting.GOFFProviderType,
|
||||
failSigning: true,
|
||||
},
|
||||
{
|
||||
@@ -107,7 +107,7 @@ func TestCreateProvider(t *testing.T) {
|
||||
|
||||
tokenExchangeMiddleware := middleware.TestingTokenExchangeMiddleware(tokenExchangeClient)
|
||||
httpClient, err := createHTTPClient(tokenExchangeMiddleware)
|
||||
require.NoError(t, err, "failed to create features-service http client")
|
||||
require.NoError(t, err, "failed to create goff http client")
|
||||
provider, err := createProvider(tc.cfg.ProviderType, tc.cfg.URL, nil, httpClient)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -115,7 +115,7 @@ func TestCreateProvider(t *testing.T) {
|
||||
require.NoError(t, err, "failed to set provider")
|
||||
|
||||
switch tc.expectedProvider {
|
||||
case setting.FeaturesServiceProviderType:
|
||||
case setting.GOFFProviderType:
|
||||
_, ok := provider.(*gofeatureflag.Provider)
|
||||
assert.True(t, ok, "expected provider to be of type goff.Provider")
|
||||
|
||||
@@ -141,10 +141,10 @@ func testGoFFProvider(t *testing.T, failSigning bool) {
|
||||
_, err := openfeature.NewDefaultClient().BooleanValueDetails(ctx, "test", false, openfeature.NewEvaluationContext("test", map[string]interface{}{"test": "test"}))
|
||||
|
||||
// Error related to the token exchange should be returned if signing fails
|
||||
// otherwise, it should return a connection refused error since the features-service URL is not set
|
||||
// otherwise, it should return a connection refused error since the goff URL is not set
|
||||
if failSigning {
|
||||
assert.ErrorContains(t, err, "failed to exchange token: error signing token", "should return an error when signing fails")
|
||||
} else {
|
||||
assert.ErrorContains(t, err, "connect: connection refused", "should return an error when features-service url is not set")
|
||||
assert.ErrorContains(t, err, "connect: connection refused", "should return an error when goff url is not set")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -650,6 +650,13 @@ var (
|
||||
Stage: FeatureStageExperimental,
|
||||
Owner: grafanaDatavizSquad,
|
||||
},
|
||||
{
|
||||
Name: "kubernetesFeatureToggles",
|
||||
Description: "Use the kubernetes API for feature toggle management in the frontend",
|
||||
Stage: FeatureStageExperimental,
|
||||
FrontendOnly: true,
|
||||
Owner: grafanaOperatorExperienceSquad,
|
||||
},
|
||||
{
|
||||
Name: "cloudRBACRoles",
|
||||
Description: "Enabled grafana cloud specific RBAC roles",
|
||||
@@ -2083,13 +2090,6 @@ var (
|
||||
FrontendOnly: false,
|
||||
Owner: grafanaOperatorExperienceSquad,
|
||||
},
|
||||
{
|
||||
Name: "profilesExemplars",
|
||||
Description: "Enables profiles exemplars support in profiles drilldown",
|
||||
Stage: FeatureStageExperimental,
|
||||
Owner: grafanaObservabilityTracesAndProfilingSquad,
|
||||
FrontendOnly: false,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
2
pkg/services/featuremgmt/toggles_gen.csv
generated
2
pkg/services/featuremgmt/toggles_gen.csv
generated
@@ -90,6 +90,7 @@ pdfTables,preview,@grafana/grafana-operator-experience-squad,false,false,false
|
||||
canvasPanelPanZoom,preview,@grafana/dataviz-squad,false,false,true
|
||||
timeComparison,experimental,@grafana/dataviz-squad,false,false,true
|
||||
tableSharedCrosshair,experimental,@grafana/dataviz-squad,false,false,true
|
||||
kubernetesFeatureToggles,experimental,@grafana/grafana-operator-experience-squad,false,false,true
|
||||
cloudRBACRoles,preview,@grafana/identity-access-team,false,true,false
|
||||
alertingQueryOptimization,GA,@grafana/alerting-squad,false,false,false
|
||||
jitterAlertRulesWithinGroups,preview,@grafana/alerting-squad,false,true,false
|
||||
@@ -282,4 +283,3 @@ useMTPlugins,experimental,@grafana/plugins-platform-backend,false,false,true
|
||||
multiPropsVariables,experimental,@grafana/dashboards-squad,false,false,true
|
||||
smoothingTransformation,experimental,@grafana/datapro,false,false,true
|
||||
secretsManagementAppPlatformAwsKeeper,experimental,@grafana/grafana-operator-experience-squad,false,false,false
|
||||
profilesExemplars,experimental,@grafana/observability-traces-and-profiling,false,false,false
|
||||
|
||||
|
4
pkg/services/featuremgmt/toggles_gen.go
generated
4
pkg/services/featuremgmt/toggles_gen.go
generated
@@ -785,8 +785,4 @@ const (
|
||||
// FlagSecretsManagementAppPlatformAwsKeeper
|
||||
// Enables the creation of keepers that manage secrets stored on AWS secrets manager
|
||||
FlagSecretsManagementAppPlatformAwsKeeper = "secretsManagementAppPlatformAwsKeeper"
|
||||
|
||||
// FlagProfilesExemplars
|
||||
// Enables profiles exemplars support in profiles drilldown
|
||||
FlagProfilesExemplars = "profilesExemplars"
|
||||
)
|
||||
|
||||
15
pkg/services/featuremgmt/toggles_gen.json
generated
15
pkg/services/featuremgmt/toggles_gen.json
generated
@@ -2044,8 +2044,7 @@
|
||||
"metadata": {
|
||||
"name": "kubernetesFeatureToggles",
|
||||
"resourceVersion": "1764664939750",
|
||||
"creationTimestamp": "2024-01-18T05:32:44Z",
|
||||
"deletionTimestamp": "2026-01-07T12:02:51Z"
|
||||
"creationTimestamp": "2024-01-18T05:32:44Z"
|
||||
},
|
||||
"spec": {
|
||||
"description": "Use the kubernetes API for feature toggle management in the frontend",
|
||||
@@ -2867,18 +2866,6 @@
|
||||
"expression": "true"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "profilesExemplars",
|
||||
"resourceVersion": "1767777507980",
|
||||
"creationTimestamp": "2026-01-07T09:18:27Z"
|
||||
},
|
||||
"spec": {
|
||||
"description": "Enables profiles exemplars support in profiles drilldown",
|
||||
"stage": "experimental",
|
||||
"codeowner": "@grafana/observability-traces-and-profiling"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"name": "prometheusAzureOverrideAudience",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/plugins/auth"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin/coreplugin"
|
||||
"github.com/grafana/grafana/pkg/plugins/config"
|
||||
"github.com/grafana/grafana/pkg/plugins/envvars"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/loader/angular/angularinspector"
|
||||
@@ -18,7 +19,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/registry"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/signature"
|
||||
"github.com/grafana/grafana/pkg/plugins/pluginassets"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/coreplugin"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginaccesscontrol"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/provisionedplugins"
|
||||
)
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/plugins/auth"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin/coreplugin"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin/provider"
|
||||
"github.com/grafana/grafana/pkg/plugins/envvars"
|
||||
"github.com/grafana/grafana/pkg/plugins/log"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/client"
|
||||
@@ -37,7 +39,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/angularinspector"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/angularpatternsstore"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/clientmiddleware"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/coreplugin"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/installsync"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/keyretriever"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/keyretriever/dynamic"
|
||||
@@ -145,7 +146,8 @@ var WireSet = wire.NewSet(
|
||||
// WireExtensionSet provides a wire.ProviderSet of plugin providers that can be
|
||||
// extended.
|
||||
var WireExtensionSet = wire.NewSet(
|
||||
coreplugin.ProvideCoreProvider,
|
||||
provider.ProvideService,
|
||||
wire.Bind(new(plugins.BackendFactoryProvider), new(*provider.Service)),
|
||||
signature.ProvideOSSAuthorizer,
|
||||
wire.Bind(new(plugins.PluginLoaderAuthorizer), new(*signature.UnsignedPluginAuthorizer)),
|
||||
ProvideClientWithMiddlewares,
|
||||
|
||||
@@ -19,10 +19,10 @@ import (
|
||||
"github.com/grafana/grafana/pkg/infra/fs"
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin/coreplugin"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/services/org"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/coreplugin"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginstore"
|
||||
"github.com/grafana/grafana/pkg/services/searchV2"
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"github.com/grafana/grafana/pkg/infra/tracing"
|
||||
"github.com/grafana/grafana/pkg/plugins"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin/coreplugin"
|
||||
"github.com/grafana/grafana/pkg/plugins/backendplugin/provider"
|
||||
pluginsCfg "github.com/grafana/grafana/pkg/plugins/config"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/client"
|
||||
"github.com/grafana/grafana/pkg/plugins/manager/loader"
|
||||
@@ -25,7 +27,6 @@ import (
|
||||
"github.com/grafana/grafana/pkg/plugins/pluginassets"
|
||||
"github.com/grafana/grafana/pkg/plugins/pluginerrs"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/coreplugin"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pipeline"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginconfig"
|
||||
"github.com/grafana/grafana/pkg/services/pluginsintegration/pluginsources"
|
||||
@@ -51,7 +52,7 @@ func CreateIntegrationTestCtx(t *testing.T, cfg *setting.Cfg, coreRegistry *core
|
||||
disc := pipeline.ProvideDiscoveryStage(pCfg, reg)
|
||||
boot := pipeline.ProvideBootstrapStage(pCfg, signature.ProvideService(pCfg, statickey.New()), pluginassets.NewLocalProvider())
|
||||
valid := pipeline.ProvideValidationStage(pCfg, signature.NewValidator(signature.NewUnsignedAuthorizer(pCfg)), angularInspector)
|
||||
init := pipeline.ProvideInitializationStage(pCfg, reg, coreplugin.ProvideCoreProvider(coreRegistry), proc, &pluginfakes.FakeAuthService{}, pluginfakes.NewFakeRoleRegistry(), pluginfakes.NewFakeActionSetRegistry(), nil, tracing.InitializeTracerForTest(), provisionedplugins.NewNoop())
|
||||
init := pipeline.ProvideInitializationStage(pCfg, reg, provider.ProvideService(coreRegistry), proc, &pluginfakes.FakeAuthService{}, pluginfakes.NewFakeRoleRegistry(), pluginfakes.NewFakeActionSetRegistry(), nil, tracing.InitializeTracerForTest(), provisionedplugins.NewNoop())
|
||||
term, err := pipeline.ProvideTerminationStage(pCfg, reg, proc)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -97,7 +98,7 @@ func CreateTestLoader(t *testing.T, cfg *pluginsCfg.PluginManagementCfg, opts Lo
|
||||
if opts.Initializer == nil {
|
||||
reg := registry.ProvideService()
|
||||
coreRegistry := coreplugin.NewRegistry(make(map[string]backendplugin.PluginFactoryFunc))
|
||||
opts.Initializer = pipeline.ProvideInitializationStage(cfg, reg, coreplugin.ProvideCoreProvider(coreRegistry), process.ProvideService(), &pluginfakes.FakeAuthService{}, pluginfakes.NewFakeRoleRegistry(), pluginfakes.NewFakeActionSetRegistry(), nil, tracing.InitializeTracerForTest(), provisionedplugins.NewNoop())
|
||||
opts.Initializer = pipeline.ProvideInitializationStage(cfg, reg, provider.ProvideService(coreRegistry), process.ProvideService(), &pluginfakes.FakeAuthService{}, pluginfakes.NewFakeRoleRegistry(), pluginfakes.NewFakeActionSetRegistry(), nil, tracing.InitializeTracerForTest(), provisionedplugins.NewNoop())
|
||||
}
|
||||
|
||||
if opts.Terminator == nil {
|
||||
|
||||
@@ -6,9 +6,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
StaticProviderType = "static"
|
||||
FeaturesServiceProviderType = "features-service"
|
||||
OFREPProviderType = "ofrep"
|
||||
StaticProviderType = "static"
|
||||
GOFFProviderType = "goff"
|
||||
OFREPProviderType = "ofrep"
|
||||
)
|
||||
|
||||
type OpenFeatureSettings struct {
|
||||
@@ -34,7 +34,7 @@ func (cfg *Cfg) readOpenFeatureSettings() error {
|
||||
|
||||
cfg.OpenFeature.TargetingKey = config.Key("targetingKey").MustString(defaultTargetingKey)
|
||||
|
||||
if strURL != "" && (cfg.OpenFeature.ProviderType == FeaturesServiceProviderType || cfg.OpenFeature.ProviderType == OFREPProviderType) {
|
||||
if strURL != "" && (cfg.OpenFeature.ProviderType == GOFFProviderType || cfg.OpenFeature.ProviderType == OFREPProviderType) {
|
||||
u, err := url.Parse(strURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid feature provider url: %w", err)
|
||||
|
||||
@@ -37,8 +37,6 @@ type ZanzanaServerSettings struct {
|
||||
OpenFGAHttpAddr string
|
||||
// Cache settings
|
||||
CacheSettings OpenFgaCacheSettings
|
||||
// OpenFGA server settings
|
||||
OpenFgaServerSettings OpenFgaServerSettings
|
||||
// Max number of results returned by ListObjects() query. Default is 1000.
|
||||
ListObjectsMaxResults uint32
|
||||
// Deadline for the ListObjects() query. Default is 3 seconds.
|
||||
@@ -52,92 +50,6 @@ type ZanzanaServerSettings struct {
|
||||
AllowInsecure bool
|
||||
}
|
||||
|
||||
type OpenFgaServerSettings struct {
|
||||
// ListObjects settings
|
||||
// Max number of concurrent datastore reads for ListObjects queries
|
||||
MaxConcurrentReadsForListObjects uint32
|
||||
// Enable dispatch throttling for ListObjects queries
|
||||
ListObjectsDispatchThrottlingEnabled bool
|
||||
// Frequency for dispatch throttling in ListObjects queries
|
||||
ListObjectsDispatchThrottlingFrequency time.Duration
|
||||
// Threshold for dispatch throttling in ListObjects queries
|
||||
ListObjectsDispatchThrottlingThreshold uint32
|
||||
// Max threshold for dispatch throttling in ListObjects queries
|
||||
ListObjectsDispatchThrottlingMaxThreshold uint32
|
||||
// Database throttle threshold for ListObjects queries
|
||||
ListObjectsDatabaseThrottleThreshold int
|
||||
// Database throttle duration for ListObjects queries
|
||||
ListObjectsDatabaseThrottleDuration time.Duration
|
||||
|
||||
// ListUsers settings
|
||||
// Deadline for ListUsers queries
|
||||
ListUsersDeadline time.Duration
|
||||
// Max number of results returned by ListUsers queries
|
||||
ListUsersMaxResults uint32
|
||||
// Max number of concurrent datastore reads for ListUsers queries
|
||||
MaxConcurrentReadsForListUsers uint32
|
||||
// Enable dispatch throttling for ListUsers queries
|
||||
ListUsersDispatchThrottlingEnabled bool
|
||||
// Frequency for dispatch throttling in ListUsers queries
|
||||
ListUsersDispatchThrottlingFrequency time.Duration
|
||||
// Threshold for dispatch throttling in ListUsers queries
|
||||
ListUsersDispatchThrottlingThreshold uint32
|
||||
// Max threshold for dispatch throttling in ListUsers queries
|
||||
ListUsersDispatchThrottlingMaxThreshold uint32
|
||||
// Database throttle threshold for ListUsers queries
|
||||
ListUsersDatabaseThrottleThreshold int
|
||||
// Database throttle duration for ListUsers queries
|
||||
ListUsersDatabaseThrottleDuration time.Duration
|
||||
|
||||
// Check settings
|
||||
// Max number of concurrent datastore reads for Check queries
|
||||
MaxConcurrentReadsForCheck uint32
|
||||
// Database throttle threshold for Check queries
|
||||
CheckDatabaseThrottleThreshold int
|
||||
// Database throttle duration for Check queries
|
||||
CheckDatabaseThrottleDuration time.Duration
|
||||
|
||||
// Batch check settings
|
||||
// Max number of concurrent checks per batch check request
|
||||
MaxConcurrentChecksPerBatchCheck uint32
|
||||
// Max number of checks per batch check request
|
||||
MaxChecksPerBatchCheck uint32
|
||||
|
||||
// Resolve node settings
|
||||
// Max number of nodes that can be resolved in a single query
|
||||
ResolveNodeLimit uint32
|
||||
// Max breadth of nodes that can be resolved in a single query
|
||||
ResolveNodeBreadthLimit uint32
|
||||
|
||||
// Dispatch throttling settings for Check resolver
|
||||
// Enable dispatch throttling for Check resolver
|
||||
DispatchThrottlingCheckResolverEnabled bool
|
||||
// Frequency for dispatch throttling in Check resolver
|
||||
DispatchThrottlingCheckResolverFrequency time.Duration
|
||||
// Threshold for dispatch throttling in Check resolver
|
||||
DispatchThrottlingCheckResolverThreshold uint32
|
||||
// Max threshold for dispatch throttling in Check resolver
|
||||
DispatchThrottlingCheckResolverMaxThreshold uint32
|
||||
|
||||
// Shadow check/query settings
|
||||
// Timeout for shadow check resolver
|
||||
ShadowCheckResolverTimeout time.Duration
|
||||
// Timeout for shadow ListObjects query
|
||||
ShadowListObjectsQueryTimeout time.Duration
|
||||
// Max delta items for shadow ListObjects query
|
||||
ShadowListObjectsQueryMaxDeltaItems int
|
||||
|
||||
// Request settings
|
||||
// Global request timeout
|
||||
RequestTimeout time.Duration
|
||||
// Max size in bytes for authorization model
|
||||
MaxAuthorizationModelSizeInBytes int
|
||||
// Size of the authorization model cache
|
||||
AuthorizationModelCacheSize int
|
||||
// Offset for changelog horizon
|
||||
ChangelogHorizonOffset int
|
||||
}
|
||||
|
||||
// Parameters to configure OpenFGA cache.
|
||||
type OpenFgaCacheSettings struct {
|
||||
// Number of items that will be kept in the in-memory cache used to resolve Check queries.
|
||||
@@ -244,56 +156,5 @@ func (cfg *Cfg) readZanzanaSettings() {
|
||||
zs.CacheSettings.SharedIteratorLimit = uint32(serverSec.Key("shared_iterator_limit").MustUint(1000))
|
||||
zs.CacheSettings.SharedIteratorTTL = serverSec.Key("shared_iterator_ttl").MustDuration(10 * time.Second)
|
||||
|
||||
openfgaSec := cfg.SectionWithEnvOverrides("openfga")
|
||||
|
||||
// ListObjects settings
|
||||
zs.OpenFgaServerSettings.MaxConcurrentReadsForListObjects = uint32(openfgaSec.Key("max_concurrent_reads_for_list_objects").MustUint(0))
|
||||
zs.OpenFgaServerSettings.ListObjectsDispatchThrottlingEnabled = openfgaSec.Key("list_objects_dispatch_throttling_enabled").MustBool(false)
|
||||
zs.OpenFgaServerSettings.ListObjectsDispatchThrottlingFrequency = openfgaSec.Key("list_objects_dispatch_throttling_frequency").MustDuration(0)
|
||||
zs.OpenFgaServerSettings.ListObjectsDispatchThrottlingThreshold = uint32(openfgaSec.Key("list_objects_dispatch_throttling_threshold").MustUint(0))
|
||||
zs.OpenFgaServerSettings.ListObjectsDispatchThrottlingMaxThreshold = uint32(openfgaSec.Key("list_objects_dispatch_throttling_max_threshold").MustUint(0))
|
||||
zs.OpenFgaServerSettings.ListObjectsDatabaseThrottleThreshold = openfgaSec.Key("list_objects_database_throttle_threshold").MustInt(0)
|
||||
zs.OpenFgaServerSettings.ListObjectsDatabaseThrottleDuration = openfgaSec.Key("list_objects_database_throttle_duration").MustDuration(0)
|
||||
|
||||
// ListUsers settings
|
||||
zs.OpenFgaServerSettings.ListUsersDeadline = openfgaSec.Key("list_users_deadline").MustDuration(0)
|
||||
zs.OpenFgaServerSettings.ListUsersMaxResults = uint32(openfgaSec.Key("list_users_max_results").MustUint(0))
|
||||
zs.OpenFgaServerSettings.MaxConcurrentReadsForListUsers = uint32(openfgaSec.Key("max_concurrent_reads_for_list_users").MustUint(0))
|
||||
zs.OpenFgaServerSettings.ListUsersDispatchThrottlingEnabled = openfgaSec.Key("list_users_dispatch_throttling_enabled").MustBool(false)
|
||||
zs.OpenFgaServerSettings.ListUsersDispatchThrottlingFrequency = openfgaSec.Key("list_users_dispatch_throttling_frequency").MustDuration(0)
|
||||
zs.OpenFgaServerSettings.ListUsersDispatchThrottlingThreshold = uint32(openfgaSec.Key("list_users_dispatch_throttling_threshold").MustUint(0))
|
||||
zs.OpenFgaServerSettings.ListUsersDispatchThrottlingMaxThreshold = uint32(openfgaSec.Key("list_users_dispatch_throttling_max_threshold").MustUint(0))
|
||||
zs.OpenFgaServerSettings.ListUsersDatabaseThrottleThreshold = openfgaSec.Key("list_users_database_throttle_threshold").MustInt(0)
|
||||
zs.OpenFgaServerSettings.ListUsersDatabaseThrottleDuration = openfgaSec.Key("list_users_database_throttle_duration").MustDuration(0)
|
||||
|
||||
// Check settings
|
||||
zs.OpenFgaServerSettings.MaxConcurrentReadsForCheck = uint32(openfgaSec.Key("max_concurrent_reads_for_check").MustUint(0))
|
||||
zs.OpenFgaServerSettings.CheckDatabaseThrottleThreshold = openfgaSec.Key("check_database_throttle_threshold").MustInt(0)
|
||||
zs.OpenFgaServerSettings.CheckDatabaseThrottleDuration = openfgaSec.Key("check_database_throttle_duration").MustDuration(0)
|
||||
|
||||
// Batch check settings
|
||||
zs.OpenFgaServerSettings.MaxConcurrentChecksPerBatchCheck = uint32(openfgaSec.Key("max_concurrent_checks_per_batch_check").MustUint(0))
|
||||
zs.OpenFgaServerSettings.MaxChecksPerBatchCheck = uint32(openfgaSec.Key("max_checks_per_batch_check").MustUint(0))
|
||||
|
||||
// Resolve node settings
|
||||
zs.OpenFgaServerSettings.ResolveNodeLimit = uint32(openfgaSec.Key("resolve_node_limit").MustUint(0))
|
||||
zs.OpenFgaServerSettings.ResolveNodeBreadthLimit = uint32(openfgaSec.Key("resolve_node_breadth_limit").MustUint(0))
|
||||
|
||||
// Dispatch throttling settings for Check resolver
|
||||
zs.OpenFgaServerSettings.DispatchThrottlingCheckResolverEnabled = openfgaSec.Key("dispatch_throttling_check_resolver_enabled").MustBool(false)
|
||||
zs.OpenFgaServerSettings.DispatchThrottlingCheckResolverFrequency = openfgaSec.Key("dispatch_throttling_check_resolver_frequency").MustDuration(0)
|
||||
zs.OpenFgaServerSettings.DispatchThrottlingCheckResolverThreshold = uint32(openfgaSec.Key("dispatch_throttling_check_resolver_threshold").MustUint(0))
|
||||
zs.OpenFgaServerSettings.DispatchThrottlingCheckResolverMaxThreshold = uint32(openfgaSec.Key("dispatch_throttling_check_resolver_max_threshold").MustUint(0))
|
||||
|
||||
// Shadow check/query settings
|
||||
zs.OpenFgaServerSettings.ShadowCheckResolverTimeout = openfgaSec.Key("shadow_check_resolver_timeout").MustDuration(0)
|
||||
zs.OpenFgaServerSettings.ShadowListObjectsQueryTimeout = openfgaSec.Key("shadow_list_objects_query_timeout").MustDuration(0)
|
||||
zs.OpenFgaServerSettings.ShadowListObjectsQueryMaxDeltaItems = openfgaSec.Key("shadow_list_objects_query_max_delta_items").MustInt(0)
|
||||
|
||||
zs.OpenFgaServerSettings.RequestTimeout = openfgaSec.Key("request_timeout").MustDuration(0)
|
||||
zs.OpenFgaServerSettings.MaxAuthorizationModelSizeInBytes = openfgaSec.Key("max_authorization_model_size_in_bytes").MustInt(0)
|
||||
zs.OpenFgaServerSettings.AuthorizationModelCacheSize = openfgaSec.Key("authorization_model_cache_size").MustInt(0)
|
||||
zs.OpenFgaServerSettings.ChangelogHorizonOffset = openfgaSec.Key("changelog_horizon_offset").MustInt(0)
|
||||
|
||||
cfg.ZanzanaServer = zs
|
||||
}
|
||||
|
||||
@@ -1786,13 +1786,6 @@
|
||||
"skipUrlSync": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"valuesFormat": {
|
||||
"enum": [
|
||||
"csv",
|
||||
"json"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
|
||||
@@ -1801,13 +1801,6 @@
|
||||
"skipUrlSync": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"valuesFormat": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"csv",
|
||||
"json"
|
||||
]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
|
||||
@@ -321,7 +321,7 @@ func CreateGrafDir(t *testing.T, opts GrafanaOpts) (string, string) {
|
||||
_, err = openFeatureSect.NewKey("enable_api", strconv.FormatBool(opts.OpenFeatureAPIEnabled))
|
||||
require.NoError(t, err)
|
||||
if !opts.OpenFeatureAPIEnabled {
|
||||
_, err = openFeatureSect.NewKey("provider", "static") // in practice, APIEnabled being false goes with features-service type, but trying to make tests work
|
||||
_, err = openFeatureSect.NewKey("provider", "static") // in practice, APIEnabled being false goes with goff type, but trying to make tests work
|
||||
require.NoError(t, err)
|
||||
_, err = openFeatureSect.NewKey("targetingKey", "grafana")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
package exemplar
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
)
|
||||
|
||||
type Exemplar struct {
|
||||
Id string
|
||||
Value float64
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
func CreateExemplarFrame(labels map[string]string, exemplars []*Exemplar) *data.Frame {
|
||||
frame := data.NewFrame("exemplar")
|
||||
frame.Meta = &data.FrameMeta{
|
||||
DataTopic: data.DataTopicAnnotations,
|
||||
}
|
||||
fields := []*data.Field{
|
||||
data.NewField("Time", nil, []time.Time{}),
|
||||
data.NewField("Value", labels, []float64{}), // add labels here?
|
||||
data.NewField("Id", nil, []string{}),
|
||||
}
|
||||
fields[2].Config = &data.FieldConfig{
|
||||
DisplayName: "Profile ID",
|
||||
}
|
||||
for name := range labels {
|
||||
fields = append(fields, data.NewField(name, nil, []string{}))
|
||||
}
|
||||
frame.Fields = fields
|
||||
|
||||
for _, e := range exemplars {
|
||||
frame.AppendRow(time.UnixMilli(e.Timestamp), e.Value, e.Id)
|
||||
for name, value := range labels {
|
||||
field, _ := frame.FieldByName(name)
|
||||
if field != nil {
|
||||
field.Append(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
return frame
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package exemplar
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCreateExemplarFrame(t *testing.T) {
|
||||
exemplars := []*Exemplar{
|
||||
{Id: "1", Value: 1.0, Timestamp: 100},
|
||||
{Id: "2", Value: 2.0, Timestamp: 200},
|
||||
}
|
||||
labels := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
frame := CreateExemplarFrame(labels, exemplars)
|
||||
|
||||
require.Equal(t, "exemplar", frame.Name)
|
||||
require.Equal(t, 4, len(frame.Fields))
|
||||
require.Equal(t, "Time", frame.Fields[0].Name)
|
||||
require.Equal(t, "Value", frame.Fields[1].Name)
|
||||
require.Equal(t, "Id", frame.Fields[2].Name)
|
||||
require.Equal(t, "foo", frame.Fields[3].Name)
|
||||
|
||||
rows, err := frame.RowLen()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, rows)
|
||||
row := frame.RowCopy(0)
|
||||
require.Equal(t, 4, len(row))
|
||||
require.Equal(t, 1.0, row[1])
|
||||
require.Equal(t, "1", row[2])
|
||||
require.Equal(t, "bar", row[3])
|
||||
}
|
||||
@@ -18,8 +18,6 @@ import (
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -33,7 +31,7 @@ type ProfilingClient interface {
|
||||
ProfileTypes(ctx context.Context, start int64, end int64) ([]*ProfileType, error)
|
||||
LabelNames(ctx context.Context, labelSelector string, start int64, end int64) ([]string, error)
|
||||
LabelValues(ctx context.Context, label string, labelSelector string, start int64, end int64) ([]string, error)
|
||||
GetSeries(ctx context.Context, profileTypeID string, labelSelector string, start int64, end int64, groupBy []string, limit *int64, step float64, exemplarType typesv1.ExemplarType) (*SeriesResponse, error)
|
||||
GetSeries(ctx context.Context, profileTypeID string, labelSelector string, start int64, end int64, groupBy []string, limit *int64, step float64) (*SeriesResponse, error)
|
||||
GetProfile(ctx context.Context, profileTypeID string, labelSelector string, start int64, end int64, maxNodes *int64) (*ProfileResponse, error)
|
||||
GetSpanProfile(ctx context.Context, profileTypeID string, labelSelector string, spanSelector []string, start int64, end int64, maxNodes *int64) (*ProfileResponse, error)
|
||||
}
|
||||
|
||||
@@ -32,8 +32,6 @@ type GrafanaPyroscopeDataQuery struct {
|
||||
Limit *int64 `json:"limit,omitempty"`
|
||||
// Sets the maximum number of nodes in the flamegraph.
|
||||
MaxNodes *int64 `json:"maxNodes,omitempty"`
|
||||
// If set to true, the response will contain annotations
|
||||
Annotations *bool `json:"annotations,omitempty"`
|
||||
// A unique identifier for the query within the list of targets.
|
||||
// In server side expressions, the refId is used as a variable name to identify results.
|
||||
// By default, the UI will assign A->Z; however setting meaningful names may be useful.
|
||||
@@ -43,8 +41,8 @@ type GrafanaPyroscopeDataQuery struct {
|
||||
// Specify the query flavor
|
||||
// TODO make this required and give it a default
|
||||
QueryType *string `json:"queryType,omitempty"`
|
||||
// If set to true, exemplars will be requested
|
||||
IncludeExemplars bool `json:"includeExemplars"`
|
||||
// If set to true, the response will contain annotations
|
||||
Annotations *bool `json:"annotations,omitempty"`
|
||||
// For mixed data sources the selected datasource is on the query level.
|
||||
// For non mixed scenarios this is undefined.
|
||||
// TODO find a better way to do this ^ that's friendly to schema
|
||||
@@ -55,8 +53,7 @@ type GrafanaPyroscopeDataQuery struct {
|
||||
// NewGrafanaPyroscopeDataQuery creates a new GrafanaPyroscopeDataQuery object.
|
||||
func NewGrafanaPyroscopeDataQuery() *GrafanaPyroscopeDataQuery {
|
||||
return &GrafanaPyroscopeDataQuery{
|
||||
LabelSelector: "{}",
|
||||
GroupBy: []string{},
|
||||
IncludeExemplars: false,
|
||||
LabelSelector: "{}",
|
||||
GroupBy: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,16 +8,14 @@ import (
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/tracing"
|
||||
|
||||
typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1"
|
||||
|
||||
"connectrpc.com/connect"
|
||||
querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1"
|
||||
"github.com/grafana/pyroscope/api/gen/proto/go/querier/v1/querierv1connect"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1"
|
||||
"github.com/grafana/pyroscope/api/gen/proto/go/querier/v1/querierv1connect"
|
||||
)
|
||||
|
||||
type ProfileType struct {
|
||||
@@ -51,13 +49,6 @@ type Point struct {
|
||||
// Milliseconds unix timestamp
|
||||
Timestamp int64
|
||||
Annotations []*typesv1.ProfileAnnotation
|
||||
Exemplars []*Exemplar
|
||||
}
|
||||
|
||||
type Exemplar struct {
|
||||
Id string
|
||||
Value uint64
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
type ProfileResponse struct {
|
||||
@@ -108,7 +99,7 @@ func (c *PyroscopeClient) ProfileTypes(ctx context.Context, start int64, end int
|
||||
}
|
||||
}
|
||||
|
||||
func (c *PyroscopeClient) GetSeries(ctx context.Context, profileTypeID string, labelSelector string, start int64, end int64, groupBy []string, limit *int64, step float64, exemplarType typesv1.ExemplarType) (*SeriesResponse, error) {
|
||||
func (c *PyroscopeClient) GetSeries(ctx context.Context, profileTypeID string, labelSelector string, start int64, end int64, groupBy []string, limit *int64, step float64) (*SeriesResponse, error) {
|
||||
ctx, span := tracing.DefaultTracer().Start(ctx, "datasource.pyroscope.GetSeries", trace.WithAttributes(attribute.String("profileTypeID", profileTypeID), attribute.String("labelSelector", labelSelector)))
|
||||
defer span.End()
|
||||
req := connect.NewRequest(&querierv1.SelectSeriesRequest{
|
||||
@@ -119,7 +110,6 @@ func (c *PyroscopeClient) GetSeries(ctx context.Context, profileTypeID string, l
|
||||
Step: step,
|
||||
GroupBy: groupBy,
|
||||
Limit: limit,
|
||||
ExemplarType: exemplarType,
|
||||
})
|
||||
|
||||
resp, err := c.connectClient.SelectSeries(ctx, req)
|
||||
@@ -147,16 +137,6 @@ func (c *PyroscopeClient) GetSeries(ctx context.Context, profileTypeID string, l
|
||||
Timestamp: p.Timestamp,
|
||||
Annotations: p.Annotations,
|
||||
}
|
||||
if len(p.Exemplars) > 0 {
|
||||
points[i].Exemplars = make([]*Exemplar, len(p.Exemplars))
|
||||
for j, e := range p.Exemplars {
|
||||
points[i].Exemplars[j] = &Exemplar{
|
||||
Id: e.ProfileId,
|
||||
Value: e.Value,
|
||||
Timestamp: e.Timestamp,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
series[i] = &Series{
|
||||
|
||||
@@ -5,11 +5,10 @@ import (
|
||||
"testing"
|
||||
|
||||
"connectrpc.com/connect"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
googlev1 "github.com/grafana/pyroscope/api/gen/proto/go/google/v1"
|
||||
querierv1 "github.com/grafana/pyroscope/api/gen/proto/go/querier/v1"
|
||||
typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_PyroscopeClient(t *testing.T) {
|
||||
@@ -20,7 +19,7 @@ func Test_PyroscopeClient(t *testing.T) {
|
||||
|
||||
t.Run("GetSeries", func(t *testing.T) {
|
||||
limit := int64(42)
|
||||
resp, err := client.GetSeries(context.Background(), "memory:alloc_objects:count:space:bytes", "{}", 0, 100, []string{}, &limit, 15, typesv1.ExemplarType_EXEMPLAR_TYPE_NONE)
|
||||
resp, err := client.GetSeries(context.Background(), "memory:alloc_objects:count:space:bytes", "{}", 0, 100, []string{}, &limit, 15)
|
||||
require.Nil(t, err)
|
||||
|
||||
series := &SeriesResponse{
|
||||
@@ -33,21 +32,6 @@ func Test_PyroscopeClient(t *testing.T) {
|
||||
require.Equal(t, series, resp)
|
||||
})
|
||||
|
||||
t.Run("GetSeriesWithExemplars", func(t *testing.T) {
|
||||
limit := int64(42)
|
||||
resp, err := client.GetSeries(context.Background(), "memory:alloc_objects:count:space:bytes", "{}", 0, 100, []string{}, &limit, 15, typesv1.ExemplarType_EXEMPLAR_TYPE_INDIVIDUAL)
|
||||
require.Nil(t, err)
|
||||
|
||||
series := &SeriesResponse{
|
||||
Series: []*Series{
|
||||
{Labels: []*LabelPair{{Name: "foo", Value: "bar"}}, Points: []*Point{{Timestamp: int64(1000), Value: 30, Exemplars: []*Exemplar{{Id: "id1", Value: 3, Timestamp: 1000}}}, {Timestamp: int64(2000), Value: 10, Exemplars: []*Exemplar{{Id: "id2", Value: 1, Timestamp: 2000}}}}},
|
||||
},
|
||||
Units: "short",
|
||||
Label: "alloc_objects",
|
||||
}
|
||||
require.Equal(t, series, resp)
|
||||
})
|
||||
|
||||
t.Run("GetProfile", func(t *testing.T) {
|
||||
maxNodes := int64(-1)
|
||||
resp, err := client.GetProfile(context.Background(), "memory:alloc_objects:count:space:bytes", "{}", 0, 100, &maxNodes)
|
||||
@@ -131,21 +115,6 @@ func (f *FakePyroscopeConnectClient) SelectMergeStacktraces(ctx context.Context,
|
||||
|
||||
func (f *FakePyroscopeConnectClient) SelectSeries(ctx context.Context, req *connect.Request[querierv1.SelectSeriesRequest]) (*connect.Response[querierv1.SelectSeriesResponse], error) {
|
||||
f.Req = req
|
||||
if req.Msg.ExemplarType == typesv1.ExemplarType_EXEMPLAR_TYPE_INDIVIDUAL {
|
||||
return &connect.Response[querierv1.SelectSeriesResponse]{
|
||||
Msg: &querierv1.SelectSeriesResponse{
|
||||
Series: []*typesv1.Series{
|
||||
{
|
||||
Labels: []*typesv1.LabelPair{{Name: "foo", Value: "bar"}},
|
||||
Points: []*typesv1.Point{
|
||||
{Timestamp: int64(1000), Value: 30, Exemplars: []*typesv1.Exemplar{{Timestamp: int64(1000), Value: 3, ProfileId: "id1"}}},
|
||||
{Timestamp: int64(2000), Value: 10, Exemplars: []*typesv1.Exemplar{{Timestamp: int64(2000), Value: 1, ProfileId: "id2"}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return &connect.Response[querierv1.SelectSeriesResponse]{
|
||||
Msg: &querierv1.SelectSeriesResponse{
|
||||
Series: []*typesv1.Series{
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend/tracing"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/live"
|
||||
"github.com/grafana/grafana/pkg/tsdb/grafana-pyroscope-datasource/exemplar"
|
||||
"github.com/xlab/treeprint"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
@@ -22,8 +21,6 @@ import (
|
||||
|
||||
"github.com/grafana/grafana/pkg/tsdb/grafana-pyroscope-datasource/annotation"
|
||||
"github.com/grafana/grafana/pkg/tsdb/grafana-pyroscope-datasource/kinds/dataquery"
|
||||
|
||||
typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1"
|
||||
)
|
||||
|
||||
type queryModel struct {
|
||||
@@ -39,12 +36,8 @@ const (
|
||||
queryTypeProfile = string(dataquery.PyroscopeQueryTypeProfile)
|
||||
queryTypeMetrics = string(dataquery.PyroscopeQueryTypeMetrics)
|
||||
queryTypeBoth = string(dataquery.PyroscopeQueryTypeBoth)
|
||||
|
||||
exemplarsFeatureToggle = "profilesExemplars"
|
||||
)
|
||||
|
||||
var identityTransformation = func(value float64) float64 { return value }
|
||||
|
||||
// query processes single Pyroscope query transforming the response to data.Frame packaged in DataResponse
|
||||
func (d *PyroscopeDatasource) query(ctx context.Context, pCtx backend.PluginContext, query backend.DataQuery) backend.DataResponse {
|
||||
ctx, span := tracing.DefaultTracer().Start(ctx, "datasource.pyroscope.query", trace.WithAttributes(attribute.String("query_type", query.QueryType)))
|
||||
@@ -84,10 +77,6 @@ func (d *PyroscopeDatasource) query(ctx context.Context, pCtx backend.PluginCont
|
||||
logger.Error("Failed to parse the MinStep using default", "MinStep", dsJson.MinStep, "function", logEntrypoint())
|
||||
}
|
||||
}
|
||||
exemplarType := typesv1.ExemplarType_EXEMPLAR_TYPE_NONE
|
||||
if qm.IncludeExemplars && backend.GrafanaConfigFromContext(ctx).FeatureToggles().IsEnabled(exemplarsFeatureToggle) {
|
||||
exemplarType = typesv1.ExemplarType_EXEMPLAR_TYPE_INDIVIDUAL
|
||||
}
|
||||
seriesResp, err := d.client.GetSeries(
|
||||
gCtx,
|
||||
profileTypeId,
|
||||
@@ -97,7 +86,6 @@ func (d *PyroscopeDatasource) query(ctx context.Context, pCtx backend.PluginCont
|
||||
qm.GroupBy,
|
||||
qm.Limit,
|
||||
math.Max(query.Interval.Seconds(), parsedInterval.Seconds()),
|
||||
exemplarType,
|
||||
)
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
@@ -487,7 +475,6 @@ func seriesToDataFrames(resp *SeriesResponse, withAnnotations bool, stepDuration
|
||||
annotations := make([]*annotation.TimedAnnotation, 0)
|
||||
|
||||
for _, series := range resp.Series {
|
||||
exemplars := make([]*exemplar.Exemplar, 0)
|
||||
// We create separate data frames as the series may not have the same length
|
||||
frame := data.NewFrame("series")
|
||||
frameMeta := &data.FrameMeta{PreferredVisualization: "graph"}
|
||||
@@ -529,20 +516,14 @@ func seriesToDataFrames(resp *SeriesResponse, withAnnotations bool, stepDuration
|
||||
|
||||
// Apply rate calculation for cumulative profiles
|
||||
value := point.Value
|
||||
transformation := identityTransformation
|
||||
if isCumulativeProfile(profileTypeID) && stepDurationSec > 0 {
|
||||
transformation = func(value float64) float64 {
|
||||
return value / stepDurationSec
|
||||
}
|
||||
value = value / stepDurationSec
|
||||
|
||||
// Convert CPU nanoseconds to cores
|
||||
if isCPUTimeProfile(profileTypeID) {
|
||||
transformation = func(value float64) float64 {
|
||||
return value / stepDurationSec / 1e9
|
||||
}
|
||||
value = value / 1e9
|
||||
}
|
||||
}
|
||||
value = transformation(value)
|
||||
valueField.Append(value)
|
||||
if withAnnotations {
|
||||
for _, a := range point.Annotations {
|
||||
@@ -552,22 +533,10 @@ func seriesToDataFrames(resp *SeriesResponse, withAnnotations bool, stepDuration
|
||||
})
|
||||
}
|
||||
}
|
||||
for _, e := range point.Exemplars {
|
||||
exemplars = append(exemplars, &exemplar.Exemplar{
|
||||
Id: e.Id,
|
||||
Value: transformation(float64(e.Value)),
|
||||
Timestamp: e.Timestamp,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
frame.Fields = fields
|
||||
frames = append(frames, frame)
|
||||
|
||||
if len(exemplars) > 0 {
|
||||
frame := exemplar.CreateExemplarFrame(labels, exemplars)
|
||||
frames = append(frames, frame)
|
||||
}
|
||||
}
|
||||
|
||||
if len(annotations) > 0 {
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
|
||||
typesv1 "github.com/grafana/pyroscope/api/gen/proto/go/types/v1"
|
||||
|
||||
"github.com/grafana/grafana/pkg/tsdb/grafana-pyroscope-datasource/annotation"
|
||||
@@ -488,21 +487,10 @@ func Test_seriesToDataFrame(t *testing.T) {
|
||||
require.Nil(t, frames[0].Meta.Custom)
|
||||
})
|
||||
|
||||
t.Run("CPU time conversion to cores with exemplars", func(t *testing.T) {
|
||||
t.Run("CPU time conversion to cores", func(t *testing.T) {
|
||||
series := &SeriesResponse{
|
||||
Series: []*Series{
|
||||
{
|
||||
Labels: []*LabelPair{}, Points: []*Point{
|
||||
{
|
||||
Timestamp: int64(1000), Value: 3000000000, // 3s in nanoseconds
|
||||
Exemplars: []*Exemplar{{Value: 300000000, Timestamp: 1000}}, // 0.3s in nanoseconds
|
||||
},
|
||||
{
|
||||
Timestamp: int64(2000), Value: 1500000000, // 1.5s in nanoseconds
|
||||
Exemplars: []*Exemplar{{Value: 150000000, Timestamp: 1000}}, // 0.15s in nanoseconds
|
||||
},
|
||||
},
|
||||
},
|
||||
{Labels: []*LabelPair{}, Points: []*Point{{Timestamp: int64(1000), Value: 3000000000}, {Timestamp: int64(2000), Value: 1500000000}}}, // 3s and 1.5s in nanoseconds
|
||||
},
|
||||
Units: "ns",
|
||||
Label: "cpu",
|
||||
@@ -510,32 +498,19 @@ func Test_seriesToDataFrame(t *testing.T) {
|
||||
// should convert nanoseconds to cores and set unit to "cores"
|
||||
frames, err := seriesToDataFrames(series, false, 15.0, "process_cpu:cpu:nanoseconds:cpu:nanoseconds")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(frames))
|
||||
require.Equal(t, 1, len(frames))
|
||||
|
||||
require.Equal(t, "cores", frames[0].Fields[1].Config.Unit)
|
||||
|
||||
// Check values were converted: 3000000000/15/1e9 = 0.2 cores/sec, 1500000000/15/1e9 = 0.1 cores/sec
|
||||
values := fieldValues[float64](frames[0].Fields[1])
|
||||
require.Equal(t, []float64{0.2, 0.1}, values)
|
||||
// Check exemplar values were converted: 300000000/15/1e9 = 0.02 cores/sec, 150000000/15/1e9 = 0.01 cores/sec
|
||||
exemplarValues := fieldValues[float64](frames[1].Fields[1])
|
||||
require.Equal(t, []float64{0.02, 0.01}, exemplarValues)
|
||||
})
|
||||
|
||||
t.Run("Memory allocation unit conversion to bytes/sec", func(t *testing.T) {
|
||||
series := &SeriesResponse{
|
||||
Series: []*Series{
|
||||
{
|
||||
Labels: []*LabelPair{}, Points: []*Point{
|
||||
{
|
||||
Timestamp: int64(1000), Value: 150000000, // 150 MB
|
||||
Exemplars: []*Exemplar{{Value: 15000000, Timestamp: 1000}}, // 15 MB
|
||||
}, {
|
||||
Timestamp: int64(2000), Value: 300000000, // 300 MB
|
||||
Exemplars: []*Exemplar{{Value: 30000000, Timestamp: 1000}}, // 30 MB
|
||||
},
|
||||
},
|
||||
},
|
||||
{Labels: []*LabelPair{}, Points: []*Point{{Timestamp: int64(1000), Value: 150000000}, {Timestamp: int64(2000), Value: 300000000}}}, // 150 MB, 300 MB
|
||||
},
|
||||
Units: "bytes",
|
||||
Label: "memory_alloc",
|
||||
@@ -543,33 +518,19 @@ func Test_seriesToDataFrame(t *testing.T) {
|
||||
// should convert bytes to binBps and apply rate calculation
|
||||
frames, err := seriesToDataFrames(series, false, 15.0, "memory:alloc_space:bytes:space:bytes")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(frames))
|
||||
require.Equal(t, 1, len(frames))
|
||||
|
||||
require.Equal(t, "binBps", frames[0].Fields[1].Config.Unit)
|
||||
|
||||
// Check values were rate calculated: 150000000/15 = 10000000, 300000000/15 = 20000000
|
||||
values := fieldValues[float64](frames[0].Fields[1])
|
||||
require.Equal(t, []float64{10000000, 20000000}, values)
|
||||
// Check exemplar values were rate calculated: 15000000/15 = 1000000, 30000000/15 = 2000000
|
||||
exemplarValues := fieldValues[float64](frames[1].Fields[1])
|
||||
require.Equal(t, []float64{1000000, 2000000}, exemplarValues)
|
||||
})
|
||||
|
||||
t.Run("Count-based profile unit conversion to ops/sec", func(t *testing.T) {
|
||||
series := &SeriesResponse{
|
||||
Series: []*Series{
|
||||
{
|
||||
Labels: []*LabelPair{}, Points: []*Point{
|
||||
{
|
||||
Timestamp: int64(1000), Value: 1500, // 1500 contentions
|
||||
Exemplars: []*Exemplar{{Value: 150, Timestamp: 1000}}, // 150 contentions
|
||||
|
||||
}, {
|
||||
Timestamp: int64(2000), Value: 3000, // 3000 contentions
|
||||
Exemplars: []*Exemplar{{Value: 300, Timestamp: 1000}}, // 300 contentions
|
||||
},
|
||||
},
|
||||
},
|
||||
{Labels: []*LabelPair{}, Points: []*Point{{Timestamp: int64(1000), Value: 1500}, {Timestamp: int64(2000), Value: 3000}}}, // 1500, 3000 contentions
|
||||
},
|
||||
Units: "short",
|
||||
Label: "contentions",
|
||||
@@ -577,16 +538,13 @@ func Test_seriesToDataFrame(t *testing.T) {
|
||||
// should convert short to ops and apply rate calculation
|
||||
frames, err := seriesToDataFrames(series, false, 15.0, "mutex:contentions:count:contentions:count")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(frames))
|
||||
require.Equal(t, 1, len(frames))
|
||||
|
||||
require.Equal(t, "ops", frames[0].Fields[1].Config.Unit)
|
||||
|
||||
// Check values were rate calculated: 1500/15 = 100, 3000/15 = 200
|
||||
values := fieldValues[float64](frames[0].Fields[1])
|
||||
require.Equal(t, []float64{100, 200}, values)
|
||||
// Check exemplar values were rate calculated: 150/15 = 10, 300/15 = 20
|
||||
exemplarValues := fieldValues[float64](frames[1].Fields[1])
|
||||
require.Equal(t, []float64{10, 20}, exemplarValues)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -647,7 +605,7 @@ func (f *FakeClient) GetSpanProfile(ctx context.Context, profileTypeID, labelSel
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *FakeClient) GetSeries(ctx context.Context, profileTypeID, labelSelector string, start, end int64, groupBy []string, limit *int64, step float64, exemplarType typesv1.ExemplarType) (*SeriesResponse, error) {
|
||||
func (f *FakeClient) GetSeries(ctx context.Context, profileTypeID, labelSelector string, start, end int64, groupBy []string, limit *int64, step float64) (*SeriesResponse, error) {
|
||||
f.Args = []any{profileTypeID, labelSelector, start, end, groupBy, step}
|
||||
return &SeriesResponse{
|
||||
Series: []*Series{
|
||||
|
||||
@@ -3,8 +3,7 @@ import { render, screen, userEvent, waitFor } from 'test/test-utils';
|
||||
import { byLabelText, byRole, byText } from 'testing-library-selector';
|
||||
|
||||
import { setPluginLinksHook } from '@grafana/runtime';
|
||||
import server from '@grafana/test-utils/server';
|
||||
import { mockAlertRuleApi, setupMswServer } from 'app/features/alerting/unified/mockApi';
|
||||
import { setupMswServer } from 'app/features/alerting/unified/mockApi';
|
||||
import { AlertManagerDataSourceJsonData } from 'app/plugins/datasource/alertmanager/types';
|
||||
import { AccessControlAction } from 'app/types/accessControl';
|
||||
import { CombinedRule, RuleIdentifier } from 'app/types/unified-alerting';
|
||||
@@ -23,7 +22,6 @@ import {
|
||||
mockPluginLinkExtension,
|
||||
mockPromAlertingRule,
|
||||
mockRulerGrafanaRecordingRule,
|
||||
mockRulerGrafanaRule,
|
||||
} from '../../mocks';
|
||||
import { grafanaRulerRule } from '../../mocks/grafanaRulerApi';
|
||||
import { grantPermissionsHelper } from '../../test/test-utils';
|
||||
@@ -132,8 +130,6 @@ const dataSources = {
|
||||
};
|
||||
|
||||
describe('RuleViewer', () => {
|
||||
const api = mockAlertRuleApi(server);
|
||||
|
||||
beforeEach(() => {
|
||||
setupDataSources(...Object.values(dataSources));
|
||||
});
|
||||
@@ -253,22 +249,19 @@ describe('RuleViewer', () => {
|
||||
|
||||
expect(screen.getAllByRole('row')).toHaveLength(7);
|
||||
expect(screen.getAllByRole('row')[1]).toHaveTextContent(/6Provisioning2025-01-18 04:35:17/i);
|
||||
expect(screen.getAllByRole('row')[1]).toHaveTextContent('Updated by provisioning service');
|
||||
expect(screen.getAllByRole('row')[1]).toHaveTextContent('+4-3Latest');
|
||||
expect(screen.getAllByRole('row')[1]).toHaveTextContent('+3-3Latest');
|
||||
|
||||
expect(screen.getAllByRole('row')[2]).toHaveTextContent(/5Alerting2025-01-17 04:35:17/i);
|
||||
expect(screen.getAllByRole('row')[2]).toHaveTextContent('+5-6');
|
||||
expect(screen.getAllByRole('row')[2]).toHaveTextContent('+5-5');
|
||||
|
||||
expect(screen.getAllByRole('row')[3]).toHaveTextContent(/4different user2025-01-16 04:35:17/i);
|
||||
expect(screen.getAllByRole('row')[3]).toHaveTextContent('Changed alert title and thresholds');
|
||||
expect(screen.getAllByRole('row')[3]).toHaveTextContent('+6-5');
|
||||
expect(screen.getAllByRole('row')[3]).toHaveTextContent('+5-5');
|
||||
|
||||
expect(screen.getAllByRole('row')[4]).toHaveTextContent(/3user12025-01-15 04:35:17/i);
|
||||
expect(screen.getAllByRole('row')[4]).toHaveTextContent('+5-10');
|
||||
expect(screen.getAllByRole('row')[4]).toHaveTextContent('+5-9');
|
||||
|
||||
expect(screen.getAllByRole('row')[5]).toHaveTextContent(/2User ID foo2025-01-14 04:35:17/i);
|
||||
expect(screen.getAllByRole('row')[5]).toHaveTextContent('Updated evaluation interval and routing');
|
||||
expect(screen.getAllByRole('row')[5]).toHaveTextContent('+12-7');
|
||||
expect(screen.getAllByRole('row')[5]).toHaveTextContent('+11-7');
|
||||
|
||||
expect(screen.getAllByRole('row')[6]).toHaveTextContent(/1Unknown 2025-01-13 04:35:17/i);
|
||||
|
||||
@@ -282,10 +275,9 @@ describe('RuleViewer', () => {
|
||||
await renderRuleViewer(mockRule, mockRuleIdentifier, ActiveTab.VersionHistory);
|
||||
expect(await screen.findByRole('button', { name: /Compare versions/i })).toBeDisabled();
|
||||
|
||||
// Check for special updated_by values - use getAllByRole since some text appears in multiple columns
|
||||
expect(screen.getAllByRole('cell', { name: /provisioning/i }).length).toBeGreaterThan(0);
|
||||
expect(screen.getByRole('cell', { name: /^alerting$/i })).toBeInTheDocument();
|
||||
expect(screen.getByRole('cell', { name: /^Unknown$/i })).toBeInTheDocument();
|
||||
expect(screen.getByRole('cell', { name: /provisioning/i })).toBeInTheDocument();
|
||||
expect(screen.getByRole('cell', { name: /alerting/i })).toBeInTheDocument();
|
||||
expect(screen.getByRole('cell', { name: /Unknown/i })).toBeInTheDocument();
|
||||
expect(screen.getByRole('cell', { name: /user id foo/i })).toBeInTheDocument();
|
||||
});
|
||||
|
||||
@@ -329,47 +321,6 @@ describe('RuleViewer', () => {
|
||||
await renderRuleViewer(rule, ruleIdentifier);
|
||||
expect(screen.queryByText('Labels')).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('shows Notes column when versions have messages', async () => {
|
||||
await renderRuleViewer(mockRule, mockRuleIdentifier, ActiveTab.VersionHistory);
|
||||
|
||||
expect(await screen.findByRole('columnheader', { name: /Notes/i })).toBeInTheDocument();
|
||||
expect(screen.getAllByRole('row')).toHaveLength(7); // 1 header + 6 data rows
|
||||
expect(screen.getByRole('cell', { name: /Updated by provisioning service/i })).toBeInTheDocument();
|
||||
expect(screen.getByRole('cell', { name: /Changed alert title and thresholds/i })).toBeInTheDocument();
|
||||
expect(screen.getByRole('cell', { name: /Updated evaluation interval and routing/i })).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('does not show Notes column when no versions have messages', async () => {
|
||||
const versionsWithoutMessages = [
|
||||
mockRulerGrafanaRule(
|
||||
{},
|
||||
{
|
||||
uid: grafanaRulerRule.grafana_alert.uid,
|
||||
version: 2,
|
||||
updated: '2025-01-14T09:35:17.000Z',
|
||||
updated_by: { uid: 'foo', name: '' },
|
||||
}
|
||||
),
|
||||
mockRulerGrafanaRule(
|
||||
{},
|
||||
{
|
||||
uid: grafanaRulerRule.grafana_alert.uid,
|
||||
version: 1,
|
||||
updated: '2025-01-13T09:35:17.000Z',
|
||||
updated_by: null,
|
||||
}
|
||||
),
|
||||
];
|
||||
api.getAlertRuleVersionHistory(grafanaRulerRule.grafana_alert.uid, versionsWithoutMessages);
|
||||
|
||||
await renderRuleViewer(mockRule, mockRuleIdentifier, ActiveTab.VersionHistory);
|
||||
|
||||
await screen.findByRole('button', { name: /Compare versions/i });
|
||||
|
||||
expect(screen.getAllByRole('row')).toHaveLength(3); // 1 header + 2 data rows
|
||||
expect(screen.queryByRole('columnheader', { name: /Notes/i })).not.toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user