Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b82a89c34e |
@@ -1,6 +1,6 @@
|
||||
module air
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
tool github.com/air-verse/air
|
||||
|
||||
@@ -21,8 +21,8 @@ require (
|
||||
github.com/spf13/afero v1.14.0 // indirect
|
||||
github.com/spf13/cast v1.8.0 // indirect
|
||||
github.com/tdewolff/parse/v2 v2.8.1 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
)
|
||||
|
||||
+12
-12
@@ -167,19 +167,19 @@ golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b h1:QoALfVG9rhQ/M7vYDScfPdWjG
|
||||
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ=
|
||||
golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w=
|
||||
golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module bra
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
tool github.com/unknwon/bra
|
||||
|
||||
@@ -17,6 +17,6 @@ require (
|
||||
github.com/unknwon/com v1.0.1 // indirect
|
||||
github.com/unknwon/log v0.0.0-20200308114134-929b1006e34a // indirect
|
||||
github.com/urfave/cli v1.22.16 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect
|
||||
)
|
||||
|
||||
@@ -56,8 +56,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20191020152052-9984515f0562/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module cog
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
tool github.com/grafana/cog/cmd/cli
|
||||
|
||||
@@ -40,11 +40,11 @@ require (
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/yalue/merged_fs v1.3.0 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
+12
-12
@@ -85,20 +85,20 @@ github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4d
|
||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/yalue/merged_fs v1.3.0 h1:qCeh9tMPNy/i8cwDsQTJ5bLr6IRxbs6meakNE5O+wyY=
|
||||
github.com/yalue/merged_fs v1.3.0/go.mod h1:WqqchfVYQyclV2tnR7wtRhBddzBvLVR83Cjw9BKQw0M=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module cue
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
tool cuelang.org/go/cmd/cue
|
||||
|
||||
@@ -25,13 +25,13 @@ require (
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
github.com/tetratelabs/wazero v1.6.0 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
+12
-12
@@ -53,20 +53,20 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tetratelabs/wazero v1.6.0 h1:z0H1iikCdP8t+q341xqepY4EWvHEw8Es7tlqiVzlP3g=
|
||||
github.com/tetratelabs/wazero v1.6.0/go.mod h1:0U0G41+ochRKoPKCJlh0jMg1CHkyfK8kDqiirMmKY8A=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module golangci-lint
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
tool github.com/golangci/golangci-lint/v2/cmd/golangci-lint
|
||||
|
||||
@@ -198,12 +198,12 @@ require (
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20250911091902-df9299821621 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/mod v0.28.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/tools v0.37.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
honnef.co/go/tools v0.6.1 // indirect
|
||||
|
||||
@@ -481,8 +481,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
|
||||
golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -496,8 +496,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -507,8 +507,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -529,8 +529,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
@@ -545,8 +545,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
@@ -559,8 +559,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
|
||||
golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
|
||||
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module jb
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
tool github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
|
||||
|
||||
@@ -15,6 +15,6 @@ require (
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect
|
||||
)
|
||||
|
||||
@@ -54,8 +54,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module lefthook
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
tool github.com/evilmartians/lefthook
|
||||
|
||||
@@ -43,9 +43,9 @@ require (
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
gopkg.in/alessio/shellescape.v1 v1.0.0-20170105083845-52074bc9df61 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
||||
@@ -91,14 +91,14 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
gopkg.in/alessio/shellescape.v1 v1.0.0-20170105083845-52074bc9df61 h1:8ajkpB4hXVftY5ko905id+dOnmorcS2CHNxxHLLDcFM=
|
||||
gopkg.in/alessio/shellescape.v1 v1.0.0-20170105083845-52074bc9df61/go.mod h1:IfMagxm39Ys4ybJrDb7W3Ob8RwxftP0Yy+or/NVz1O8=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module swagger
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
tool github.com/go-swagger/go-swagger/cmd/swagger
|
||||
|
||||
@@ -51,12 +51,12 @@ require (
|
||||
github.com/toqueteos/webbrowser v1.2.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.16.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/crypto v0.42.0 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
+12
-12
@@ -101,19 +101,19 @@ go.mongodb.org/mongo-driver v1.16.1 h1:rIVLL3q0IHM39dvE+z2ulZLp9ENZKThVfuvN/IiN4
|
||||
go.mongodb.org/mongo-driver v1.16.1/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
+1
-6
@@ -39,18 +39,13 @@
|
||||
/docs/sources/ @irenerl24
|
||||
|
||||
/docs/sources/alerting/ @JohnnyK-Grafana
|
||||
|
||||
/docs/sources/dashboards/ @imatwawana
|
||||
/docs/sources/as-code/ @urbiz-grafana
|
||||
/docs/sources/developer-resources/ @urbiz-grafana
|
||||
/docs/sources/datasources/ @lwandz13
|
||||
/docs/sources/panels-visualizations/ @imatwawana
|
||||
/docs/sources/upgrade-guide/ @jtvdez
|
||||
/docs/sources/whatsnew/ @jtvdez
|
||||
|
||||
/docs/sources/developer-resources/plugins/ @grafana/plugins-platform-frontend @grafana/plugins-platform-backend
|
||||
/docs/sources/visualizations/dashboards/ @imatwawana
|
||||
/docs/sources/visualizations/panels-visualizations/ @imatwawana
|
||||
/docs/sources/developers/plugins/ @grafana/plugins-platform-frontend @grafana/plugins-platform-backend
|
||||
|
||||
/docs/sources/dashboards/share-dashboards-panels/_index.md @imatwawana @jtvdez
|
||||
/docs/sources/dashboards/share-dashboards-panels/shared-dashboards/index.md @jtvdez
|
||||
|
||||
@@ -12,7 +12,6 @@ on:
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
statuses: write
|
||||
|
||||
# Since this is run on a pull request, we want to apply the patches intended for the
|
||||
# target branch onto the source branch, to verify compatibility before merging.
|
||||
|
||||
@@ -29,10 +29,6 @@ permissions:
|
||||
# target branch onto the source branch, to verify compatibility before merging.
|
||||
jobs:
|
||||
dispatch-job:
|
||||
# If the source is not from a fork then dispatch the job to the workflow.
|
||||
# This will fail on forks when trying to broker a token, so instead, forks will create the required status and mark
|
||||
# it as a success
|
||||
if: ${{ ! github.event.pull_request.head.repo.fork }}
|
||||
env:
|
||||
HEAD_REF: ${{ inputs.head_ref }}
|
||||
BASE_REF: ${{ github.base_ref }}
|
||||
@@ -80,20 +76,3 @@ jobs:
|
||||
triggering_github_handle: SENDER
|
||||
}
|
||||
})
|
||||
dispatch-job-fork:
|
||||
# If the source is from a fork then use the built-in workflow token to create the same status and unconditionally
|
||||
# mark it as a success.
|
||||
if: ${{ github.event.pull_request.head.repo.fork }}
|
||||
permissions:
|
||||
statuses: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Create status
|
||||
uses: myrotvorets/set-commit-status-action@6d6905c99cd24a4a2cbccc720b62dc6ca5587141
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
sha: ${{ inputs.pr_commit_sha }}
|
||||
repo: ${{ inputs.repo }}
|
||||
status: success
|
||||
context: "Test Patches (event)"
|
||||
description: "Test Patches (event) on a fork"
|
||||
|
||||
@@ -13,7 +13,7 @@ jobs:
|
||||
outputs:
|
||||
changed: ${{ steps.detect-changes.outputs.backend || steps.detect-changes.outputs.frontend || steps.detect-changes.outputs.dockerfile }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: true # required to get more history in the changed-files action
|
||||
fetch-depth: 2
|
||||
@@ -21,7 +21,7 @@ jobs:
|
||||
id: detect-changes
|
||||
uses: ./.github/actions/change-detection
|
||||
with:
|
||||
self: .github/workflows/pr-test-docker.yml
|
||||
self: .github/workflows/pr-test-integration.yml
|
||||
|
||||
build-dockerfile:
|
||||
needs: detect-changes
|
||||
@@ -31,9 +31,9 @@ jobs:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: docker/setup-docker-action@3fb92d6d9c634363128c8cce4bc3b2826526370a # v4
|
||||
- uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19 # v4
|
||||
- name: Build Dockerfile
|
||||
run: make build-docker-full
|
||||
|
||||
@@ -212,7 +212,6 @@ jobs:
|
||||
run-id: ${{ github.run_id }}
|
||||
bucket-path: ${{ needs.setup.outputs.version }}_${{ github.run_id }}
|
||||
environment: prod
|
||||
runs-on: ubuntu-x64-small
|
||||
|
||||
publish-dockerhub:
|
||||
if: github.ref_name == 'main'
|
||||
@@ -312,29 +311,20 @@ jobs:
|
||||
repositories: '["grafana"]'
|
||||
permissions: '{"issues": "write", "pull_requests": "write", "contents": "read"}'
|
||||
- name: Find PR
|
||||
continue-on-error: true
|
||||
id: find-pr
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }}
|
||||
GRAFANA_COMMIT: ${{ needs.setup.outputs.grafana-commit }}
|
||||
REPO: ${{ github.repository }}
|
||||
run: |
|
||||
set -eo pipefail
|
||||
gh api "/repos/${REPO}/commits/${GRAFANA_COMMIT}/pulls" | jq -r '.[0].number' | tee issue_number.txt
|
||||
echo "ISSUE_NUMBER=$(cat issue_number.txt)" >> "$GITHUB_ENV"
|
||||
run: echo "ISSUE_NUMBER=$(gh api "/repos/grafana/grafana/commits/${GRAFANA_COMMIT}/pulls" | jq -r '.[0].number')" >> "$GITHUB_ENV"
|
||||
- name: Find Comment
|
||||
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3
|
||||
if: ${{ steps.find-pr.outcome == 'success' }}
|
||||
id: fc
|
||||
continue-on-error: true
|
||||
with:
|
||||
issue-number: ${{ env.ISSUE_NUMBER }}
|
||||
comment-author: 'grafana-delivery-bot[bot]'
|
||||
body-includes: GitHub Actions Build
|
||||
token: ${{ steps.generate_token.outputs.token }}
|
||||
- name: Create or update comment
|
||||
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v4
|
||||
if: ${{ steps.find-pr.outcome == 'success' }} # Run even if comment wasn't found
|
||||
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
|
||||
with:
|
||||
token: ${{ steps.generate_token.outputs.token }}
|
||||
comment-id: ${{ steps.fc.outputs.comment-id }}
|
||||
|
||||
@@ -1,19 +1,3 @@
|
||||
<!-- 12.2.2 START -->
|
||||
|
||||
# 12.2.2 (2025-11-19)
|
||||
|
||||
### Features and enhancements
|
||||
|
||||
- **Access control:** Reduce memory usage when fetching user's permissions [#113414](https://github.com/grafana/grafana/pull/113414), [@hairyhenderson](https://github.com/hairyhenderson)
|
||||
- **Table:** Pill and JSON Cells should allow formatting [#113130](https://github.com/grafana/grafana/pull/113130), [@fastfrwrd](https://github.com/fastfrwrd)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- **AnalyticsSummaries:** Fix dashboard rollup not resetting "last X days" metrics to zero (Enterprise)
|
||||
- **AnalyticsSummaries:** Fix dashboard rollup totals resetting incorrectly (Enterprise)
|
||||
- **Security:** fix for CVE-2025-41115 in SCIM (System for Cross-domain Identity Management) (Enterprise)
|
||||
|
||||
<!-- 12.2.2 END -->
|
||||
<!-- 12.2.1 START -->
|
||||
|
||||
# 12.2.1 (2025-10-21)
|
||||
|
||||
+2
-2
@@ -14,9 +14,9 @@ ARG JS_SRC=js-builder
|
||||
|
||||
# Dependabot cannot update dependencies listed in ARGs
|
||||
# By using FROM instructions we can delegate dependency updates to dependabot
|
||||
FROM alpine:3.23.0 AS alpine-base
|
||||
FROM alpine:3.21.3 AS alpine-base
|
||||
FROM ubuntu:22.04 AS ubuntu-base
|
||||
FROM golang:1.25.5-alpine AS go-builder-base
|
||||
FROM golang:1.25.3-alpine AS go-builder-base
|
||||
FROM --platform=${JS_PLATFORM} node:22-alpine AS js-builder-base
|
||||
# Javascript build stage
|
||||
FROM --platform=${JS_PLATFORM} ${JS_IMAGE} AS js-builder
|
||||
|
||||
@@ -8,7 +8,7 @@ WIRE_TAGS = "oss"
|
||||
include .citools/Variables.mk
|
||||
|
||||
GO = go
|
||||
GO_VERSION = 1.25.5
|
||||
GO_VERSION = 1.25.3
|
||||
GO_LINT_FILES ?= $(shell ./scripts/go-workspace/golangci-lint-includes.sh)
|
||||
GO_TEST_FILES ?= $(shell ./scripts/go-workspace/test-includes.sh)
|
||||
SH_FILES ?= $(shell find ./scripts -name *.sh)
|
||||
|
||||
+1
-1
@@ -1,6 +1,6 @@
|
||||
module github.com/grafana/grafana/apps/advisor
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.4.0
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module github.com/grafana/grafana/apps/alerting/alertenrichment
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/grafana/grafana-app-sdk v0.40.3
|
||||
@@ -25,8 +25,8 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
||||
@@ -69,8 +69,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -79,8 +79,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module github.com/grafana/grafana/apps/alerting/notifications
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/grafana/grafana-app-sdk v0.40.3
|
||||
@@ -82,13 +82,12 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect
|
||||
|
||||
@@ -252,8 +252,8 @@ go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -270,8 +270,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
@@ -280,22 +280,22 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -308,8 +308,8 @@ golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
+9
-10
@@ -1,6 +1,6 @@
|
||||
module github.com/grafana/grafana/apps/dashboard
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
cuelang.org/go v0.11.1
|
||||
@@ -11,6 +11,7 @@ require (
|
||||
github.com/grafana/grafana/pkg/apimachinery v0.0.0-20250514132646-acbc7b54ed9e
|
||||
github.com/prometheus/client_golang v1.23.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/net v0.45.0
|
||||
k8s.io/apimachinery v0.33.3
|
||||
k8s.io/apiserver v0.33.3
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff
|
||||
@@ -118,18 +119,16 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/crypto v0.42.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
|
||||
+16
-18
@@ -326,16 +326,16 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI=
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -345,8 +345,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -354,8 +354,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191020152052-9984515f0562/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -374,25 +374,23 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU=
|
||||
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -402,8 +400,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package schemaversion
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
+5
-5
@@ -1,6 +1,6 @@
|
||||
module github.com/grafana/grafana/apps/folder
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/grafana/grafana-app-sdk v0.40.3
|
||||
@@ -47,11 +47,11 @@ require (
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
|
||||
+8
-8
@@ -122,8 +122,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -132,14 +132,14 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
+10
-11
@@ -1,6 +1,6 @@
|
||||
module github.com/grafana/grafana/apps/iam
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
replace github.com/grafana/grafana => ../../
|
||||
|
||||
@@ -201,7 +201,7 @@ require (
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/grafana/alerting v0.0.0-20251120160935-5264e10a83a2 // indirect
|
||||
github.com/grafana/alerting v0.0.0-20250903205312-24567882c5d1 // indirect
|
||||
github.com/grafana/authlib/types v0.0.0-20250710201142-9542f2f28d43 // indirect
|
||||
github.com/grafana/dataplane/sdata v0.0.9 // indirect
|
||||
github.com/grafana/dskit v0.0.0-20250611075409-46f51e1ce914 // indirect
|
||||
@@ -403,18 +403,17 @@ require (
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
gocloud.dev v0.42.0 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/crypto v0.42.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
|
||||
gonum.org/v1/gonum v0.16.0 // indirect
|
||||
|
||||
+18
-22
@@ -724,8 +724,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grafana/alerting v0.0.0-20251120160935-5264e10a83a2 h1:obwROzBGK12TBI1dnjLRm5prG3klZl7rFCDew9omeCk=
|
||||
github.com/grafana/alerting v0.0.0-20251120160935-5264e10a83a2/go.mod h1:EfKE30jNw2b4whbJjtgea3JXUfUSVadmVSL/IAGgJeQ=
|
||||
github.com/grafana/alerting v0.0.0-20250903205312-24567882c5d1 h1:1Xjk9zr9P4jeRsdHlWkQPiByd16YEEeTVqkwn8i6iMQ=
|
||||
github.com/grafana/alerting v0.0.0-20250903205312-24567882c5d1/go.mod h1:EfKE30jNw2b4whbJjtgea3JXUfUSVadmVSL/IAGgJeQ=
|
||||
github.com/grafana/authlib v0.0.0-20250710201142-9542f2f28d43 h1:vVPT0i5Y1vI6qzecYStV2yk7cHKrC3Pc7AgvwT5KydQ=
|
||||
github.com/grafana/authlib v0.0.0-20250710201142-9542f2f28d43/go.mod h1:1fWkOiL+m32NBgRHZtlZGz2ji868tPZACYbqP3nBRJI=
|
||||
github.com/grafana/authlib/types v0.0.0-20250710201142-9542f2f28d43 h1:NlkGMnVi/oUn6Cr90QbJYpQJ4FnjyAIG9Ex5GtTZIzw=
|
||||
@@ -1473,8 +1473,8 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1520,8 +1520,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1581,8 +1581,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1623,8 +1623,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1724,10 +1724,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU=
|
||||
golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
@@ -1735,8 +1733,8 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1750,8 +1748,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -1822,10 +1820,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools/godoc v0.1.0-deprecated h1:o+aZ1BOj6Hsx/GBdJO/s815sqftjSnrZZwyYTHODvtk=
|
||||
golang.org/x/tools/godoc v0.1.0-deprecated/go.mod h1:qM63CriJ961IHWmnWa9CjZnBndniPt4a3CK0PVB9bIg=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module github.com/grafana/grafana/apps/investigations
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/grafana/grafana-app-sdk v0.40.3
|
||||
@@ -65,12 +65,12 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
|
||||
+12
-12
@@ -159,34 +159,34 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module github.com/grafana/grafana/apps/playlist
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/grafana/grafana-app-sdk v0.40.3
|
||||
@@ -65,12 +65,12 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
|
||||
+12
-12
@@ -159,34 +159,34 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
+7
-7
@@ -1,6 +1,6 @@
|
||||
module github.com/grafana/grafana/apps/plugins
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/grafana/authlib/types v0.0.0-20250710201142-9542f2f28d43
|
||||
@@ -71,13 +71,13 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/crypto v0.42.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
|
||||
+14
-14
@@ -171,8 +171,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
@@ -185,8 +185,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -194,8 +194,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -206,23 +206,23 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -231,8 +231,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module github.com/grafana/grafana/apps/preferences
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/grafana/grafana-app-sdk v0.40.3
|
||||
@@ -47,11 +47,11 @@ require (
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
|
||||
@@ -122,8 +122,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -132,14 +132,14 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module github.com/grafana/grafana/apps/provisioning
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/google/go-github/v70 v70.0.0
|
||||
@@ -64,12 +64,12 @@ require (
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/crypto v0.42.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/grpc v1.74.2 // indirect
|
||||
|
||||
+14
-14
@@ -149,8 +149,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
@@ -163,8 +163,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -172,8 +172,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -184,23 +184,23 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -209,8 +209,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
+5
-5
@@ -1,6 +1,6 @@
|
||||
module github.com/grafana/grafana/apps/secret
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/grafana/grafana-app-sdk v0.40.3
|
||||
@@ -52,11 +52,11 @@ require (
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
|
||||
+8
-8
@@ -134,8 +134,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -144,14 +144,14 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module github.com/grafana/grafana/apps/shorturl
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/grafana/grafana-app-sdk v0.40.3
|
||||
@@ -64,12 +64,12 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/net v0.45.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.18.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
|
||||
+12
-12
@@ -159,34 +159,34 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
|
||||
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
||||
@@ -204,10 +204,6 @@ instrument_queries = false
|
||||
# This is useful when databases have auto-generated primary keys enabled.
|
||||
delete_auto_gen_ids = false
|
||||
|
||||
# Set to true to skip dashboard UID migrations on startup.
|
||||
# Improves startup performance for instances with large numbers of annotations who do not plan to downgrade Grafana.
|
||||
skip_dashboard_uid_migration_on_startup = false
|
||||
|
||||
#################################### Cache server #############################
|
||||
[remote_cache]
|
||||
# Either "redis", "memcached" or "database" default is "database"
|
||||
|
||||
@@ -201,10 +201,6 @@
|
||||
# This is useful when databases have auto-generated primary keys enabled.
|
||||
;delete_auto_gen_ids = false
|
||||
|
||||
# Set to true to skip dashboard UID migrations on startup.
|
||||
# Improves startup performance for instances with large numbers of annotations who do not plan to downgrade Grafana.
|
||||
;skip_dashboard_uid_migration_on_startup = false
|
||||
|
||||
#################################### Cache server #############################
|
||||
[remote_cache]
|
||||
# Either "redis", "memcached" or "database" default is "database"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module high-card
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require github.com/prometheus/client_golang v1.22.0
|
||||
|
||||
@@ -13,6 +13,6 @@ require (
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
)
|
||||
|
||||
@@ -24,8 +24,8 @@ github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzM
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module utf8-support
|
||||
|
||||
go 1.25.5
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
@@ -15,6 +15,6 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
)
|
||||
|
||||
@@ -24,8 +24,8 @@ github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzM
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.25.5
|
||||
FROM golang:1.25.3
|
||||
|
||||
ADD main.go /go/src/webhook/main.go
|
||||
|
||||
|
||||
-4
@@ -140,10 +140,6 @@ The following list contains role-based access control actions.
|
||||
| `roles:read` | <ul><li>`roles:*`</li><li>`roles:uid:*`</li></ul> | List roles and read a specific role with its permissions. |
|
||||
| `roles:write` | <ul><li>`permissions:type:delegate`</li><ul> | Create or update a custom role. |
|
||||
| `roles:write` | <ul><li>`permissions:type:escalate`</li><ul> | Reset basic roles to their default permissions. |
|
||||
| `secret.securevalues:create` | <ul><li>`secret.securevalues:*`</li><li> | Create secure values. |
|
||||
| `secret.securevalues:read` | <ul><li>`secret.securevalues:*`</li><li> | Read and list secure values. |
|
||||
| `secret.securevalues:write` | <ul><li>`secret.securevalues:*`</li><li> | Update secure values. |
|
||||
| `secret.securevalues:delete` | <ul><li>`secret.securevalues:*`</li><li> | Delete secure values. |
|
||||
| `server.stats:read` | None | Read Grafana instance statistics. |
|
||||
| `server.usagestats.report:read` | None | View usage statistics report. |
|
||||
| `serviceaccounts:write` | <ul><li>`serviceaccounts:*`</li><ul> | Create Grafana service accounts. |
|
||||
|
||||
@@ -141,20 +141,6 @@ Alternatively, you can use the `index()` function to retrieve the query value:
|
||||
{{ index $values "B" }} CPU usage for {{ index $labels "instance" }} over the last 5 minutes.
|
||||
```
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
|
||||
Variable names that start with a number (for example, `1B`) are not [valid identifiers in Go templates](https://go.dev/ref/spec#Identifiers).
|
||||
|
||||
To access a value or label whose key starts with a number, use the `index` function:
|
||||
|
||||
```
|
||||
{{ index $values "1B" }} CPU usage for {{ index $labels "1instance" }} over the last 5 minutes.
|
||||
```
|
||||
|
||||
Using `{{ $values.1B.Value }}` is invalid and causes the template code to render as plain text.
|
||||
|
||||
{{< /admonition >}}
|
||||
|
||||
#### $value
|
||||
|
||||
The `$value` variable is a string containing the labels and values of all instant queries; threshold, reduce and math expressions, and classic conditions in the alert rule.
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
---
|
||||
canonical: https://grafana.com/docs/grafana/latest/alerting/monitor-status/alerts-page/
|
||||
title: Alerts overview page
|
||||
description: Use the Alert page to assess, prioritize, and take action on alerts quickly in Grafana Cloud.
|
||||
weight: 405
|
||||
keywords:
|
||||
- Grafana Cloud
|
||||
- alerting
|
||||
- triage
|
||||
- incidents
|
||||
- monitoring
|
||||
- Grafana-managed alerts
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
---
|
||||
|
||||
{{< docs/public-preview product="Alerts page" >}}
|
||||
|
||||
# Alert view
|
||||
|
||||
Grafana Alerting provides a consolidated snapshot of your firing and pending Grafana-managed alerts in a simplified view. For users with complex deployments, it can be difficult to monitor and prioritize critical incidents among a large volume of firing or pending alerts. With the Alert page, you have a view where you can quickly explore and sort your recent alert history and see what alerts require review or action.
|
||||
|
||||
To see your firing and pending alerts in the alert page, go to **Alerts & IRM > Alerting > Alerts**.
|
||||
|
||||
{{< figure src="/media/docs/alerting/alerts-page.png" max-width="750px" alt="Filter your firing and pending alerts in the Alert view." >}}
|
||||
|
||||
## How it works
|
||||
|
||||
The Alerts page only shows alerts from Grafana-managed alert rules. Grafana uses a metric called `GRAFANA_ALERTS`, which is recorded in the default Mimir data source that is provisioned for cloud users.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
OSS users need to manually configure this. To configure alert state history for OSS, refer to the <a href="https://grafana.com/docs/grafana/latest/alerting/set-up/configure-alert-state-history/#configure-loki-and-prometheus-for-alert-state">configure Loki and Prometheus for alert state</a> documentation.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Filter alerts in the Alerts page
|
||||
|
||||
The page displays the count of alert rules and instances that are both firing and pending. Use the filters to group your alerts and find specific alerts by label.
|
||||
|
||||
To group alerts by label, click the **Group by** field and select the labels from the dropdown menu. You can select multiple labels for alert grouping.
|
||||
|
||||
To filter by specific label values, click the **Filters** field and select a label to filter by, followed by an expression of what label value you want to use. You can enter multiple label values in your search.
|
||||
|
||||
You can also select a time range from the time picker to further adjust your results. Click the **time range** field and enter an absolute time range or select a period from the quick ranges list to apply a new time window to the display results.
|
||||
@@ -1,43 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
description: Deploy, configure and provision Grafana with as-code workflows.
|
||||
menuTitle: As code
|
||||
title: Deploy, configure and provision Grafana with as-code workflows
|
||||
hero:
|
||||
title: Configure and provision Grafana with as-code workflows
|
||||
level: 1
|
||||
width: 100
|
||||
height: 100
|
||||
description: Manage resources, including folders and dashboards, and configurations with as-code workflows.
|
||||
cards:
|
||||
items:
|
||||
- description: Using Observability as code, you can version, automate, and scale Grafana configurations, including dashboards and observability workflows.
|
||||
height: 24
|
||||
href: ./observability-as-code/
|
||||
title: Observability as code
|
||||
- description: Using Infrastructure as code, you can declaratively manage what Grafana resources to use.
|
||||
height: 24
|
||||
href: ./infrastructure-as-code/
|
||||
title: Infrastructure as code
|
||||
weight: 850
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/
|
||||
---
|
||||
|
||||
{{< docs/hero-simple key="hero" >}}
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
**Observability as code** lets you apply code management best practices to your observability resources. By representing Grafana resources as code, you can integrate them into existing infrastructure-as-code workflows and apply standard development practices. Instead of manually configuring dashboards or settings through the Grafana UI, you can:
|
||||
|
||||
- Write configurations in code: Define dashboards in JSON or other supported formats.
|
||||
- Sync your Grafana setup to GitHub: Track changes, collaborate, and roll back updates using Git and GitHub, or other remote sources.
|
||||
- Automate with CI/CD: Integrate Grafana directly into your development and deployment pipelines.
|
||||
- Standardize workflows: Ensure consistency across your teams by using repeatable, codified processes for managing Grafana resources.
|
||||
|
||||
In Grafana Cloud, you can use **Infrastructure as code** to declaratively create and manage dashboards via configuration files in source code, and incorporate them efficiently into your own use cases. This enables you to review code, reuse it, and create better workflows. Infrastructure as code tools include Terraform, Ansible, the Grafana Operator, and Grizzly.
|
||||
|
||||
## Explore
|
||||
|
||||
{{< card-grid key="cards" type="simple" >}}
|
||||
@@ -1,200 +0,0 @@
|
||||
---
|
||||
keywords:
|
||||
- Infrastructure as code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
menuTitle: Infrastructure as code
|
||||
title: Provision Grafana Cloud with infrastructure as code
|
||||
weight: 800
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/
|
||||
---
|
||||
|
||||
# Provision Grafana Cloud with infrastructure as code
|
||||
|
||||
With Grafana Cloud, you can create dashboards via configuration files in source code. This enables you to review code, reuse it, and create better workflows.
|
||||
|
||||
Via code, you can _declaratively_ manage _what_ Grafana resources to use.
|
||||
The as-code tools and tutorials that follow show you what do to, to declaratively manage Grafana resources, and incorporate them efficiently into your own use cases.
|
||||
|
||||
## Grafana Terraform provider
|
||||
|
||||
Grafana administrators can manage dashboards, alerts and collectors, add synthetic monitoring probes and checks, manage identity and access, and more using the [Terraform provider for Grafana](https://registry.terraform.io/providers/grafana/grafana/latest).
|
||||
|
||||
The following example shows a Terraform configuration for creating a dashboard:
|
||||
|
||||
```terraform
|
||||
resource "grafana_dashboard" "metrics" {
|
||||
config_json = jsonencode({
|
||||
title = "as-code dashboard"
|
||||
uid = "ascode"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
This example dashboard only creates the dashboard and does not add any panels or rows.
|
||||
To get started, see the [Grafana Terraform provider guides](/docs/grafana-cloud/as-code/infrastructure-as-code/terraform/) or refer to the [Terraform Grafana Provider documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs).
|
||||
|
||||
### Who is this recommended for?
|
||||
|
||||
Grafana Terraform provider is best suited for users who are already using Terraform for non-Grafana use cases.
|
||||
|
||||
To manage the entire Grafana ecosystem of resources on either Grafana Cloud or OSS deployments of Grafana, it’s best to use the Terraform Grafana provider because it supports the most Grafana resources compared to Grafana’s other as-code solutions.
|
||||
|
||||
For Grafana Fleet Management users, the Grafana Terraform provider is best used to preregister new collectors before they are operational or add remote attributes to collectors already registered with the service.
|
||||
|
||||
### Known limitations
|
||||
|
||||
Managing dashboards isn’t the simplest process—you have to work with long JSON files, which can become difficult to review and update, as well. Grafonnet can help with generating dashboard JSONs that can be used in Terraform, but Grafonnet requires knowing Jsonnet.
|
||||
|
||||
## Grafana Ansible collection
|
||||
|
||||
Resources for configuration management are available for Grafana through the [Ansible collection for Grafana](https://docs.ansible.com/ansible/latest/collections/grafana/grafana/index.html#plugins-in-grafana-grafana). The Grafana Ansible collection can be used to manage a variety of resources, including folders, cloud stacks, and dashboards. You can programmatically manage resources on Grafana that aren’t currently part of the Grafana Ansible collection by writing Ansible playbooks that use the HTTP APIs to manage resources for Grafana.
|
||||
|
||||
The following example shows an Ansible configuration for creating a dashboard:
|
||||
|
||||
```yaml
|
||||
- name: dashboard as code
|
||||
grafana.grafana.dashboard:
|
||||
dashboard: { 'title': 'as-code dashboard', 'uid': 'ascode' }
|
||||
stack_slug: '{{ stack_slug }}'
|
||||
grafana_api_key: '{{ grafana_api_key }}'
|
||||
state: present
|
||||
```
|
||||
|
||||
This example dashboard creates only the dashboard and does not add any panels or rows.
|
||||
|
||||
To get started, see the [quickstart guides for the Grafana Ansible Collection](/docs/grafana-cloud/as-code/infrastructure-as-code/ansible/) or check out the [collections's documentation](https://docs.ansible.com/ansible/latest/collections/grafana/grafana/index.html#plugins-in-grafana-grafana).
|
||||
|
||||
### Who is this recommended for?
|
||||
|
||||
Like Terraform, the Grafana Ansible collection is best suited for people already using Ansible for non-Grafana use cases. The collection only works for Grafana Cloud right now, so it makes the most sense for Grafana Cloud customers who want to manage resources declaratively using Ansible.
|
||||
|
||||
### Known limitations
|
||||
|
||||
The Grafana Ansible collection only works for Grafana Cloud and only supports eight resources: API keys, cloud stacks, plugins, dashboards, folders, data sources, alert contact points, and notification policies. This can be a drawback if you want to manage the entire Grafana ecosystem as code with Ansible. As with Terraform, building dashboards is a challenging process.
|
||||
|
||||
## Grafana Operator
|
||||
|
||||
The Grafana Operator is a Kubernetes operator that can provision, manage, and operate Grafana instances and their associated resources within Kubernetes through Custom Resources. This Kubernetes-native tool eases the administration of Grafana, offering a declarative approach to managing dashboards, data sources, and folders. It also automatically syncs the Kubernetes Custom resources and the actual resources in the Grafana Instance. It supports leveraging Grafonnet for generating Grafana dashboard definitions for seamless dashboard configuration as code.
|
||||
|
||||
To get started, see the [quickstart guides for the Grafana Operator](/docs/grafana-cloud/as-code/infrastructure-as-code/grafana-operator/) or check out the [Grafana Operator's documentation](https://grafana.github.io/grafana-operator/).
|
||||
|
||||
A sample Kubernetes configuration for creating a dashboard using the Grafana operator looks like this:
|
||||
|
||||
```yaml
|
||||
apiVersion: integreatly.org/v1alpha1
|
||||
kind: GrafanaDashboard
|
||||
metadata:
|
||||
name: simple-dashboard
|
||||
labels:
|
||||
app: grafana
|
||||
spec:
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
dashboards: <Grafana-custom-resource-name>
|
||||
json: >
|
||||
{
|
||||
"title": "as-code dashboard",
|
||||
“uid” : “ascode”
|
||||
}
|
||||
```
|
||||
|
||||
### Who is this recommended for?
|
||||
|
||||
The Grafana Operator is particularly fitting for:
|
||||
|
||||
- Teams seeking integrated solutions to manage Grafana resources within the Kubernetes cluster ecosystem.
|
||||
- Teams employing a GitOps approach, allowing them to treat Grafana configurations as code, stored alongside application manifests for versioned and automated deployments.
|
||||
|
||||
### Known limitations
|
||||
|
||||
While the Grafana Operator simplifies many aspects of operating Grafana and its resources on Kubernetes, its current support is mainly focused on managing dashboards, folders, and data sources. Advanced features like alerting and plugins (only works for OSS) are not supported yet.
|
||||
|
||||
## Grizzly
|
||||
|
||||
[Grizzly](https://grafana.github.io/grizzly/) is a command line tool that allows you to manage your observability resources with code. Grizzly supports Kubernetes-inspired YAML representation for the Grafana resource, which makes it easier to learn. With Grizzly, you can move dashboards within Grafana instances and also retrieve information about already provisioned Grafana resources. Grizzly currently supports:
|
||||
|
||||
- Grafana dashboards and dashboard folders
|
||||
- Grafana data sources
|
||||
- Prometheus recording rules and alerts in Grafana Cloud
|
||||
- Grafana Cloud Synthetic Monitoring checks
|
||||
|
||||
Grizzly can also deploy dashboards built in Jsonnet using Grafonnet. (Learn more in the [Grafonnet documentation](https://grafana.github.io/grafonnet-lib/api-docs/).)
|
||||
|
||||
The following example shows a Kubernetes-style Grizzly configuration for creating a dashboard:
|
||||
|
||||
```yaml
|
||||
apiVersion: grizzly.grafana.com/v1alpha1
|
||||
kind: Dashboard
|
||||
metadata:
|
||||
name: as-code-dashboard
|
||||
spec:
|
||||
title: as-code dashboard
|
||||
uid: ascode
|
||||
```
|
||||
|
||||
To get started, see the [Grizzly guides](grizzly/dashboards-folders-datasources/) or refer to the [Grizzly’s documentation](https://grafana.github.io/grizzly/).
|
||||
|
||||
### Who is this recommended for?
|
||||
|
||||
Grizzly is best suited for users who are either using Jsonnet to manage Grafana resources or those who prefer a Kubernetes-style YAML definition of their Grafana resources.
|
||||
|
||||
### Known limitations
|
||||
|
||||
Grizzly currently doesn’t support Grafana OnCall and Grafana Alerting resources.
|
||||
|
||||
## Grafana Crossplane provider
|
||||
|
||||
[Grafana Crossplane provider](https://github.com/grafana/crossplane-provider-grafana) is built using Terrajet and provides support for all resources supported by the Grafana Terraform provider. It enables users to define Grafana resources as Kubernetes manifests and it also help users who build their GitOps pipelines around Kubernetes manifests using tools like ArgoCD.
|
||||
|
||||
To get started with the Grafana Crossplane provider, install Crossplane in the Kubernetes cluster and use this command to install the provider:
|
||||
|
||||
```shell
|
||||
kubectl crossplane install provider grafana/crossplane-provider-grafana:v0.1.0
|
||||
```
|
||||
|
||||
During installation of the provider, CRDs for all the resources supported by the Terraform provider are added to the cluster so users can begin defining their Grafana resources as Kubernetes custom resources. The Crossplane provider ensures that whatever is defined in the custom resource definitions is what is visible in Grafana UI. If any changes are made directly in the UI, the changes will be discarded when the provider resyncs. This helps ensure that whatever is defined declaratively in the cluster will be the source of truth for Grafana resources.
|
||||
|
||||
To get started, refer to the examples folder in the Grafana Crossplane repository.
|
||||
|
||||
The following example shows a Kubernetes custom resource definition for creating a dashboard:
|
||||
|
||||
```yaml
|
||||
apiVersion: grafana.jet.crossplane.io/v1alpha1
|
||||
kind: Dashboard
|
||||
metadata:
|
||||
name: as-code-dashboard
|
||||
spec:
|
||||
forProvider:
|
||||
configJson: |
|
||||
{
|
||||
"title": "as-code dashboard",
|
||||
"uid": "ascode"
|
||||
}
|
||||
providerConfigRef:
|
||||
name: grafana-crossplane-provider
|
||||
```
|
||||
|
||||
### Who is this recommended for?
|
||||
|
||||
The Grafana Crossplane provider is intended for existing Crossplane users looking to manage Grafana resources from within Kubernetes and as Kubernetes manifests for the GitOps pipelines.
|
||||
|
||||
### Known limitations
|
||||
|
||||
To use the Crossplane provider, you must have the Crossplane CLI and Crossplane installed in the Kubernetes cluster. Note that the Crossplane provider is in an alpha stage, so it has not reached a stable state yet.
|
||||
|
||||
## Grafana as code comparison
|
||||
|
||||
Most of the tools defined here can be used with one another.
|
||||
The following chart compares the properties and tools mentioned above.
|
||||
|
||||
| Property/Tool | Grafana Terraform Provider | Grafana Ansible Collection | Grafana Operator | Grizzly | Grafana Crossplane Provider |
|
||||
| -------------------------------------- | --------------------------- | ----------------------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------- |
|
||||
| Grafana resources supported | All major Grafana resources | Grafana Cloud stack, plugins, API keys, dashboards, data sources, and folders | Dashboards, Datasources, Folders | Synthetic Monitoring checks, dashboards, data sources, folders, and Prometheus rules | All major Grafana resources |
|
||||
| Tool format | HCL/JSON | YAML | YAML | Jsonnet/YAML/JSON | YAML/JSON |
|
||||
| Follows Kubernetes-style manifests | | | ✓ | ✓ | ✓ |
|
||||
| Easy dashboard building process | | | ✓ | ✓ | |
|
||||
| Manage resources using Kubernetes | | | ✓ | | ✓ |
|
||||
| Retrieves Grafana resource information | ✓ | | | | |
|
||||
| Built-in resource sync process | | | ✓ | ✓ | ✓ |
|
||||
| Recommended for | Existing Terraform users | Existing Ansible users | Users looking to manage Grafana resources from within Kubernetes | Users looking to define Grafana resources in a Kubernetes-style YAML and users looking to get built-in workflow support and sync process | Users looking to manage Grafana resources from within Kubernetes |
|
||||
@@ -1,29 +0,0 @@
|
||||
---
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Ansible
|
||||
menuTitle: Ansible
|
||||
title: Grafana Ansible collection
|
||||
weight: 110
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/ansible/
|
||||
---
|
||||
|
||||
# Grafana Ansible collection
|
||||
|
||||
The [Grafana Ansible collection](https://docs.ansible.com/ansible/latest/collections/grafana/grafana/) provides configuration management resources for Grafana. You can use it to manage resources such as dashboards, Cloud stacks, folders, and more.
|
||||
|
||||
The collection also houses the [Grafana Agent role](https://github.com/grafana/grafana-ansible-collection/tree/main/roles/grafana_agent) which can be used to deploy and manage Grafana Agent across various Linux machines.
|
||||
|
||||
{{< docs/shared lookup="agent-deprecation.md" source="alloy" version="next" >}}
|
||||
|
||||
For resources currently not available in the Grafana Ansible collection, you can manage those resources on Grafana Cloud programmatically by writing Ansible playbooks that use the [Ansible's builtin uri module](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/uri_module.html) to call the [HTTP APIs](/docs/grafana/latest/developers/http_api/) to manage resources for the Grafana Cloud portal, as well as those within a stack.
|
||||
|
||||
Use the following guides to get started using Ansible to manage your Grafana Cloud stack:
|
||||
|
||||
| Topic | Description |
|
||||
| ----------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
|
||||
| [Create and manage a Grafana Cloud stack using Ansible](ansible-cloud-stack/) | Describes how to create a Grafana Cloud stack and add a data source and dashboard using [Ansible](https://www.ansible.com/). |
|
||||
| [Install Grafana Agent on a Linux host using Ansible](ansible-grafana-agent-linux/) | Describes how to install the Grafana Agent on a Linux node using Ansible and use it to push logs to Grafana Cloud. |
|
||||
| [Monitor multiple Linux hosts with Grafana Agent Role](ansible-multiple-agents/) | Describes how to use the Grafana Ansible collection to manage agents across multiple Linux hosts. |
|
||||
@@ -1,259 +0,0 @@
|
||||
---
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Ansible
|
||||
title: Create and manage a Grafana Cloud stack using Ansible
|
||||
weight: 100
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/ansible/ansible-cloud-stack/
|
||||
---
|
||||
|
||||
# Create and manage a Grafana Cloud stack using Ansible
|
||||
|
||||
Learn how to add a data source, a dashboard, and a folder to a Grafana Cloud stack using Ansible collection for Grafana.
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before you begin, you should have the following available:
|
||||
|
||||
- A Grafana Cloud account.
|
||||
- [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/index.html) installed on your machine
|
||||
|
||||
## Create a Cloud stack
|
||||
|
||||
1. Create a Grafana Cloud Access Policy and get a token.
|
||||
You'll need this for the Ansible playbook to be able to create a Grafana Cloud stack.
|
||||
Refer to [Create a Grafana Cloud Access Policy](/docs/grafana-cloud/security-and-account-management/authentication-and-permissions/access-policies/create-access-policies/).
|
||||
|
||||
1. Create an Ansible playbook file.
|
||||
|
||||
This Ansible playbook will create a Grafana Cloud stack by using the [Cloud stack module](https://docs.ansible.com/ansible/latest/collections/grafana/grafana/cloud_stack_module.html#ansible-collections-grafana-grafana-cloud-stack-module).
|
||||
|
||||
Create a file named `cloud-stack.yml` and add the following:
|
||||
|
||||
```yaml
|
||||
- name: Create Grafana Cloud stack
|
||||
connection: local
|
||||
hosts: localhost
|
||||
|
||||
vars:
|
||||
grafana_cloud_api_key: '<Your Cloud Access Policy token>'
|
||||
stack_name: '<stack-name>'
|
||||
org_name: '<org-name>'
|
||||
|
||||
tasks:
|
||||
- name: Create a Grafana Cloud stack
|
||||
grafana.grafana.cloud_stack:
|
||||
name: '{{ stack_name }}'
|
||||
stack_slug: '{{ stack_name }}'
|
||||
cloud_api_key: '{{ grafana_cloud_api_key }}'
|
||||
org_slug: '{{ org_name }}'
|
||||
delete_protection: true
|
||||
state: present
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<token>` with a token from the Cloud Access Policy you created in the Grafana Cloud portal.
|
||||
- `<stack-name>` with the name of your stack.
|
||||
- `<org-name>` with the name of the organization in Grafana Cloud.
|
||||
|
||||
## Create an API key in the Grafana stack
|
||||
|
||||
Create an API key in the Grafana stack.
|
||||
You'll need this key to configure Ansible to be able to create data source, folders, and dashboards.
|
||||
|
||||
1. Log into your Grafana Cloud instance.
|
||||
2. Click **Administration** and select **API keys**.
|
||||
3. Click **Add API key**.
|
||||
4. In **Key name**, enter a name for your API key.
|
||||
5. In **Role**, select **Admin** or **Editor** to associate the role with this API key.
|
||||
6. Click **Copy** to save it for later use.
|
||||
|
||||
## Add a data source
|
||||
|
||||
This guide uses the InfluxDB data source.
|
||||
The required arguments vary depending on the type of data source you select.
|
||||
|
||||
1. Create a file named `data-source.yml` and add the following:
|
||||
|
||||
```yaml
|
||||
- name: Add/Update data source
|
||||
connection: local
|
||||
hosts: localhost
|
||||
|
||||
vars:
|
||||
data_sources:
|
||||
[
|
||||
{
|
||||
name: '<data-source-name>',
|
||||
type: 'influxdb',
|
||||
url: '<data-source-url>',
|
||||
user: '<username>',
|
||||
secureJsonData: { password: '<password>' },
|
||||
database: '<db-name>',
|
||||
id: <id>,
|
||||
uid: '<uid>',
|
||||
access: 'proxy',
|
||||
},
|
||||
]
|
||||
|
||||
grafana_api_key: '<API-Key>'
|
||||
stack_name: '<stack-name>'
|
||||
|
||||
tasks:
|
||||
- name: Create/Update Data sources
|
||||
grafana.grafana.datasource:
|
||||
datasource: '{{ item }}'
|
||||
stack_slug: '{{ stack_name }}'
|
||||
grafana_api_key: '{{ grafana_api_key }}'
|
||||
state: present
|
||||
loop: '{{ data_sources }}'
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<data-source-name>` with the name of the data source to be added in Grafana.
|
||||
- `<data-source-url>` with URL of your data source.
|
||||
- `<username>` with the username for authenticating with your data source.
|
||||
- `<password>` with the password for authenticating with your data source.
|
||||
- `<db-name>` with name of your database.
|
||||
- `<id>` with the ID for your data source in Grafana.
|
||||
- `<uid>` wth the UID for your data source in Grafana.
|
||||
- `<stack-name>` with the name of your stack.
|
||||
- `<API-key>` with the [API key created in the Grafana instance](#create-an-api-key-in-the-grafana-stack).
|
||||
|
||||
## Add a folder
|
||||
|
||||
This Ansible playbook creates a folder in your Grafana instance by using the [Folder module](https://docs.ansible.com/ansible/latest/collections/grafana/grafana/folder_module.html#ansible-collections-grafana-grafana-folder-module).
|
||||
|
||||
1. Create a file named `folder.yml` and add the following:
|
||||
|
||||
```yaml
|
||||
- name: Add/Update Folders
|
||||
connection: local
|
||||
hosts: localhost
|
||||
|
||||
vars:
|
||||
folders: [{ title: '<folder-name>', uid: '<uid>' }]
|
||||
|
||||
stack_name: '<stack-name>'
|
||||
grafana_api_key: <API-key>
|
||||
|
||||
tasks:
|
||||
- name: Create/Update a Folder in Grafana
|
||||
grafana.grafana.folder:
|
||||
title: '{{ item.title }}'
|
||||
uid: '{{ item.uid }}'
|
||||
stack_slug: '{{ stack_name }}'
|
||||
grafana_api_key: '{{ grafana_api_key }}'
|
||||
state: present
|
||||
loop: '{{ folders }}'
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<folder-name>` with the name of the folder to be added in Grafana.
|
||||
- `<uid>` with the UID for your folder in Grafana.
|
||||
- `<stack-name>` with the name of your stack.
|
||||
- `<API-key>` with the [API key created in the Grafana instance](#create-an-api-key-in-the-grafana-stack).
|
||||
|
||||
## Add a dashboard to the folder
|
||||
|
||||
This Ansible playbook iterates through the dashboard JSON source code files in the folder referenced in `dashboards_path` and adds them in the Grafana instance by using the [Dashboard module](https://docs.ansible.com/ansible/latest/collections/grafana/grafana/dashboard_module.html#ansible-collections-grafana-grafana-dashboard-module).
|
||||
|
||||
1. Create a file named `dashboard.yml` and add the following:
|
||||
|
||||
```yaml
|
||||
- name: Add/Update Dashboards
|
||||
connection: local
|
||||
hosts: localhost
|
||||
|
||||
vars:
|
||||
dashboards_path: <path-to-dashboard-files> # Example "./dashboards"
|
||||
stack_name: "<stack-name>"
|
||||
grafana_api_key: <API-key>
|
||||
|
||||
tasks:
|
||||
- name: Find dashboard files
|
||||
find:
|
||||
paths: "{{ dashboards_path }}"
|
||||
file_type: file
|
||||
recurse: Yes
|
||||
patterns: "*.json"
|
||||
register: files_matched
|
||||
no_log: True
|
||||
|
||||
- name: Create list of dashboard file names
|
||||
set_fact:
|
||||
dashboard_file_names: "{{ dashboard_file_names | default ([]) + [item.path] }}"
|
||||
loop: "{{ files_matched.files }}"
|
||||
no_log: True
|
||||
|
||||
- name: Create/Update a dashboard
|
||||
grafana.grafana.dashboard:
|
||||
dashboard: "{{ lookup('ansible.builtin.file','{{ item }}' ) }}"
|
||||
stack_slug: "{{ stack_name }}"
|
||||
grafana_api_key: "{{ grafana_api_key }}"
|
||||
state: present
|
||||
loop: "{{ dashboard_file_names }}"
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<path-to-dashboard-files>` with the path to the folder containing dashboard JSON source code files.
|
||||
- `<stack-name>` with the name of your stack.
|
||||
- `<API-key>` with the [API key created in the Grafana instance](#create-an-api-key-in-the-grafana-stack).
|
||||
|
||||
## Run the Ansible playbooks
|
||||
|
||||
In a terminal, run the following commands from the directory where all of the Ansible playbooks are located.
|
||||
|
||||
1. To create the Grafana Cloud stack.
|
||||
|
||||
```shell
|
||||
ansible-playbook cloud-stack.yml
|
||||
```
|
||||
|
||||
1. To add a data source to the Grafana stack.
|
||||
|
||||
```shell
|
||||
ansible-playbook data-source.yml
|
||||
```
|
||||
|
||||
1. To add a folder to the Grafana stack
|
||||
|
||||
```shell
|
||||
ansible-playbook folder.yml
|
||||
```
|
||||
|
||||
1. To add a dashboard to the folder in your Grafana stack.
|
||||
|
||||
```shell
|
||||
ansible-playbook dashboard.yml
|
||||
```
|
||||
|
||||
## Validation
|
||||
|
||||
Once you run the Ansible playbooks, you should be able to verify the following:
|
||||
|
||||
- The new Grafana stack is created and visible in the Cloud Portal.
|
||||
|
||||

|
||||
|
||||
- A new data source (InfluxDB in this example) is visible in the Grafana stack.
|
||||
|
||||

|
||||
|
||||
- A new folder in Grafana.
|
||||
In the following image, a folder named `Demos` was added.
|
||||
|
||||

|
||||
|
||||
- A new dashboard in the Grafana stack.
|
||||
In the following image a dashboard named `InfluxDB Cloud Demos` was created inside the "Demos" folder.
|
||||
|
||||

|
||||
|
||||
## Summary
|
||||
|
||||
In this guide, you created a Grafana Cloud stack along with a data source, folder, and dashboard imported from a JSON file using Ansible.
|
||||
|
||||
To learn more about managing Grafana using Ansible, refer to the [Grafana Ansible collection](https://docs.ansible.com/ansible/latest/collections/grafana/grafana/).
|
||||
-156
@@ -1,156 +0,0 @@
|
||||
---
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Ansible
|
||||
title: Install Grafana Agent on a Linux host using Ansible
|
||||
weight: 200
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/ansible/ansible-grafana-agent-linux/
|
||||
---
|
||||
|
||||
# Install Grafana Agent on a Linux host using Ansible
|
||||
|
||||
{{< docs/shared lookup="agent-deprecation.md" source="alloy" version="next" >}}
|
||||
|
||||
This guide shows how to install Grafana Agent on a Linux host using [Ansible](https://www.ansible.com/) and to use it to push logs to Grafana Cloud.
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before you begin, you should have the following available:
|
||||
|
||||
- A Grafana Cloud account.
|
||||
- A Linux machine
|
||||
- Command line (terminal) access to that Linux machine with `unzip` binary installed
|
||||
- Account permissions sufficient to install and use Grafana Agent on the Linux machine
|
||||
- [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/index.html) installed on the Linux machine
|
||||
|
||||
## Choose your Grafana Agent installation method
|
||||
|
||||
This guide covers two methods for installing and configuring Grafana Agent using Ansible:
|
||||
|
||||
- Installing Grafana Agent in Flow mode
|
||||
- Installing Grafana Agent in static mode
|
||||
|
||||
Depending on your specific needs and the configuration of your environment, you may choose one method over the other for better compatibility or ease of setup.
|
||||
|
||||
<!-- vale Grafana.Spelling = NO -->
|
||||
|
||||
### Install Grafana Agent in flow mode using Ansible
|
||||
|
||||
This Ansible playbook installs Grafana Agent in Flow mode and also creates a systemd service to manage it.
|
||||
|
||||
It creates a user named `grafana-agent` on the Linux machine for running Grafana Agent.
|
||||
|
||||
1. Create a file named `grafana-agent.yml` and add the following:
|
||||
|
||||
```yaml
|
||||
- name: Install Grafana Agent Flow
|
||||
hosts: all
|
||||
become: true
|
||||
tasks:
|
||||
- name: Install Grafana Agent Flow
|
||||
ansible.builtin.include_role:
|
||||
name: grafana.grafana.grafana_agent
|
||||
vars:
|
||||
grafana_agent_mode: flow
|
||||
# Change config file on the host to .river
|
||||
grafana_agent_config_filename: config.river
|
||||
# Change config file to be copied
|
||||
grafana_agent_provisioned_config_file: '<path-to-config-file>'
|
||||
# Remove default flags
|
||||
grafana_agent_flags_extra:
|
||||
server.http.listen-addr: '0.0.0.0:12345'
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<path-to-config-file-on-localhost>` with the path to river configuration file on the Ansible Controller (Localhost).
|
||||
|
||||
### Install Grafana Agent in static mode using Ansible
|
||||
|
||||
This Ansible playbook installs Grafana Agent in static mode and also creates a systemd service to manage it.
|
||||
It creates a user named `grafana-agent` on the Linux machine for running Grafana Agent.
|
||||
|
||||
1. Create a file named `grafana-agent.yml` and add the following:
|
||||
|
||||
```yaml
|
||||
- name: Install Grafana Agent in static mode
|
||||
hosts: all
|
||||
become: true
|
||||
|
||||
vars:
|
||||
grafana_cloud_api_key: <Your Cloud Access Policy token>
|
||||
logs_username: <loki-username> # Example - 411478
|
||||
loki_url: <loki-push-url> # Example - https://logs-prod-017.grafana.net/loki/api/v1/push
|
||||
tasks:
|
||||
- name: Install Grafana Agent in static mode
|
||||
ansible.builtin.include_role:
|
||||
name: grafana_agent
|
||||
vars:
|
||||
grafana_agent_logs_config:
|
||||
configs:
|
||||
- clients:
|
||||
- basic_auth:
|
||||
password: '{{ grafana_cloud_api_key }}'
|
||||
username: '{{ logs_username }}'
|
||||
url: '{{ loki_url }}'
|
||||
name: default
|
||||
positions:
|
||||
filename: /tmp/positions.yaml
|
||||
scrape_configs:
|
||||
- job_name: integrations/node_exporter_direct_scrape
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
instance: hostname
|
||||
__path__: /var/log/*.log
|
||||
job: integrations/node_exporter
|
||||
target_config:
|
||||
sync_period: 10s
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<Your Cloud Access Policy token>` with a token from the Cloud Access Policy you created in the Grafana Cloud portal.
|
||||
- `<loki-username>` with the Loki Username
|
||||
- `<loki-push-url>` with the push endpoint URL of Loki Instance
|
||||
|
||||
## Run the Ansible playbook on the Linux machine
|
||||
|
||||
In the Linux machine's terminal, run the following command from the directory where the Ansible playbook is located.
|
||||
|
||||
```shell
|
||||
ansible-playbook grafana-agent.yml
|
||||
```
|
||||
|
||||
## Validate
|
||||
|
||||
<!-- vale Grafana.ReferTo = NO -->
|
||||
|
||||
1. Grafana Agent service on the Linux machine should be `active` and `running`. You should see a similar output:
|
||||
<!-- vale Grafana.ReferTo = NO -->
|
||||
|
||||
```shell
|
||||
$ sudo systemctl status grafana-agent.service
|
||||
grafana-agent.service - Grafana Agent
|
||||
Loaded: loaded (/etc/systemd/system/grafana-agent.service; enabled; vendor preset: enabled)
|
||||
Active: active (running) since Wed 2022-07-20 09:56:15 UTC; 36s ago
|
||||
Main PID: 3176 (agent-linux-amd)
|
||||
Tasks: 8 (limit: 515)
|
||||
Memory: 92.5M
|
||||
CPU: 380ms
|
||||
CGroup: /system.slice/grafana-agent.service
|
||||
└─3176 /usr/local/bin/agent-linux-amd64 --config.file=/etc/grafana-cloud/agent-config.yaml
|
||||
```
|
||||
|
||||
1. In a Grafana Cloud stack, click **Explore** in the left-side menu.
|
||||
|
||||
1. At the top of the page, use the dropdown menu to select your Loki logs data source. In the Log Browser, run the query `{job="integrations/node_exporter"}`
|
||||
|
||||

|
||||
|
||||
## Summary
|
||||
|
||||
In this guide, you installed Grafana Agent on a Linux node using Ansible and used it to pushed logs to Grafana Cloud.
|
||||
|
||||
To learn more about the Grafana Ansible collection, refer to the [GitHub repository](https://github.com/grafana/grafana-ansible-collection) or its [documentation](https://docs.ansible.com/ansible/latest/collections/grafana/grafana/index.html).
|
||||
@@ -1,207 +0,0 @@
|
||||
---
|
||||
menuTitle: Monitor multiple Linux hosts with the grafana_agent role
|
||||
title: Monitor multiple Linux hosts with grafana_agent role
|
||||
weight: 300
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/ansible/ansible-multiple-agents/
|
||||
---
|
||||
|
||||
# Monitor multiple Linux hosts with the `grafana_agent` role
|
||||
|
||||
{{< docs/shared lookup="agent-deprecation.md" source="alloy" version="next" >}}
|
||||
|
||||
Monitoring multiple Linux hosts can be difficult.
|
||||
To make it easier, you can use the `grafana_agent` role with the [Grafana Ansible collection](../).
|
||||
This guide shows how to use the `grafana_agent` Ansible role to deploy and manage Grafana Agent across multiple Linux hosts so you can monitor them using Grafana Cloud.
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before you begin, you should have:
|
||||
|
||||
- Linux hosts
|
||||
- SSH access to the Linux hosts
|
||||
- Account permissions sufficient to install and use Grafana Agent on the Linux hosts
|
||||
|
||||
## Install the Grafana Ansible collection
|
||||
|
||||
The [`grafana_agent` role](https://github.com/grafana/grafana-ansible-collection/tree/main/roles/grafana_agent) is available in the Grafana Ansible collection as of the 1.1.0 release.
|
||||
|
||||
To install the Grafana Ansible collection, run this command:
|
||||
|
||||
```
|
||||
ansible-galaxy collection install grafana.grafana
|
||||
```
|
||||
|
||||
## Create an Ansible inventory file
|
||||
|
||||
Next, you will set up your hosts and create an inventory file.
|
||||
|
||||
1. Create your hosts and add public SSH keys to them.
|
||||
|
||||
This example uses eight Linux hosts: two Ubuntu hosts, two CentOS hosts, two Fedora hosts, and two Debian hosts.
|
||||
|
||||
1. Create an Ansible inventory file.
|
||||
|
||||
The Ansible inventory, which resides in a file named `inventory`, looks similar to this:
|
||||
|
||||
```
|
||||
146.190.208.216 # hostname = ubuntu-01
|
||||
146.190.208.190 # hostname = ubuntu-02
|
||||
137.184.155.128 # hostname = centos-01
|
||||
146.190.216.129 # hostname = centos-02
|
||||
198.199.82.174 # hostname = debian-01
|
||||
198.199.77.93 # hostname = debian-02
|
||||
143.198.182.156 # hostname = fedora-01
|
||||
143.244.174.246 # hostname = fedora-02
|
||||
```
|
||||
|
||||
1. Create an `ansible.cfg` file within the same directory as `inventory`, with the following values:
|
||||
```
|
||||
[defaults]
|
||||
inventory = inventory # Path to the inventory file
|
||||
private_key_file = ~/.ssh/id_rsa # Path to my private SSH Key
|
||||
remote_user=root # username
|
||||
```
|
||||
{{< admonition type="note" >}}
|
||||
If you are copying the previously listed files, remove the comments (#).
|
||||
{{< /admonition >}}
|
||||
|
||||
## Use the `grafana_agent` Ansible role
|
||||
|
||||
Next you will create an Ansible playbook that calls the `grafana_agent` role from the `grafana.grafana` Ansible collection.
|
||||
|
||||
To use the `grafana_agent` Ansible role:
|
||||
|
||||
1. Create a file named `deploy-agent.yml` in the same directory as `ansible.cfg` and `inventory` and add the configuration below.
|
||||
|
||||
```yaml
|
||||
- name: Install Grafana Agent
|
||||
hosts: all
|
||||
become: true
|
||||
|
||||
vars:
|
||||
grafana_cloud_api_key: <Your Cloud Access Policy token>
|
||||
metrics_username: <prometheus-username> # Example - 825019
|
||||
logs_username: <loki-username> # Example - 411478
|
||||
prometheus_url: <prometheus-push-url> # Example - https://prometheus-us-central1.grafana.net/api/prom/push
|
||||
loki_url: <loki-push-url> # Example - https://logs-prod-017.grafana.net/loki/api/v1/push
|
||||
tasks:
|
||||
- name: Install Grafana Agent
|
||||
ansible.builtin.include_role:
|
||||
name: grafana.grafana.grafana_agent
|
||||
vars:
|
||||
grafana_agent_metrics_config:
|
||||
configs:
|
||||
- name: integrations
|
||||
remote_write:
|
||||
- basic_auth:
|
||||
password: '{{ grafana_cloud_api_key }}'
|
||||
username: '{{ metrics_username }}'
|
||||
url: '{{ prometheus_url }}'
|
||||
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
wal_directory: /tmp/grafana-agent-wal
|
||||
grafana_agent_logs_config:
|
||||
configs:
|
||||
- name: default
|
||||
clients:
|
||||
- basic_auth:
|
||||
password: '{{ grafana_cloud_api_key }}'
|
||||
username: '{{ logs_username }}'
|
||||
url: '{{ loki_url }}'
|
||||
positions:
|
||||
filename: /tmp/positions.yaml
|
||||
target_config:
|
||||
sync_period: 10s
|
||||
scrape_configs:
|
||||
- job_name: varlogs
|
||||
static_configs:
|
||||
- targets: [localhost]
|
||||
labels:
|
||||
instance: ${HOSTNAME:-default}
|
||||
job: varlogs
|
||||
__path__: /var/log/*log
|
||||
grafana_agent_integrations_config:
|
||||
node_exporter:
|
||||
enabled: true
|
||||
instance: ${HOSTNAME:-default}
|
||||
prometheus_remote_write:
|
||||
- basic_auth:
|
||||
password: '{{ grafana_cloud_api_key }}'
|
||||
username: '{{ metrics_username }}'
|
||||
url: '{{ prometheus_url }}'
|
||||
grafana_agent_env_vars:
|
||||
HOSTNAME: '%H'
|
||||
```
|
||||
|
||||
The playbook calls the `grafana_agent` role from the `grafana.grafana` Ansible collection.
|
||||
|
||||
The Agent configuration in this playbook send metrics and logs from the Linux hosts to Grafana Cloud along with the hostname of each instance
|
||||
|
||||
Refer to the [Grafana Ansible documentation](https://github.com/grafana/grafana-ansible-collection/tree/main/roles/grafana_agent#role-variables) to understand the other variables you can pass to the `grafana_agent` role.
|
||||
|
||||
When deploying the Agent across multiple instances for monitoring them, It is essential that the Agent is able to auto-detect the hostname for ease in monitoring.
|
||||
Notice that the label `instance` has been set to the value `${HOSTNAME:-default}`, which is substituted by the value of the HOSTNAME environment variable in the Linux host.
|
||||
|
||||
To read more about the variable substitution, refer to the Grafana Agent [node_exporter_config](/docs/grafana-cloud/send-data/agent/static/configuration/integrations/node-exporter-config/) documentation.
|
||||
|
||||
1. To run the playbook, run this command:
|
||||
|
||||
```
|
||||
ansible-playbook deploy-agent.yml
|
||||
```
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
You can place the `deploy-agent.yml`, `ansible.cfg` and `inventory` files in different directories based on your needs.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Check that logs and metrics are being ingested into Grafana Cloud
|
||||
|
||||
Logs and metrics will soon be available in Grafana Cloud.
|
||||
To test this, use the Explore feature.
|
||||
Click the **Explore** icon (compass icon) in the vertical navigation bar.
|
||||
|
||||
### Check logs
|
||||
|
||||
To check logs:
|
||||
|
||||
1. Use the drop-down menu at the top of the page to select your Loki logs data source.
|
||||
|
||||
1. In the log browser, run the query `{instance="centos-01"}` where `centos-01` is the hostname of one of the Linux hosts.
|
||||
|
||||
If you see log lines (shown in the example below), logs are being received.
|
||||
|
||||
{{< figure alt="Grafana Explore showing a graph and log output from the preceding query" src="/static/assets/img/blog/ansible-to-manage-agent1.png" >}}
|
||||
|
||||
If no log lines appear, logs aren't being collected.
|
||||
|
||||
### Check metrics
|
||||
|
||||
To check metrics:
|
||||
|
||||
1. Use the drop-down menu at the top of the page to select your Prometheus data source.
|
||||
|
||||
1. Run the query `{instance="centos-01"}` where `centos-01` is the hostname of one of the Linux hosts.
|
||||
|
||||
If you see a metrics graph and table (shown in the example below), metrics are being received.
|
||||
|
||||
{{< figure alt="Grafana Explore showing a graph and metrics table output from the preceding query" src="/static/assets/img/blog/ansible-to-manage-agent2.png" >}}
|
||||
|
||||
If no metrics appear, metrics aren't being collected.
|
||||
|
||||
### View dashboards
|
||||
|
||||
Now that you have logs and metrics in Grafana, you can use dashboards to view them.
|
||||
Here's an example of one of the prebuilt dashboards included with the Linux integration in Grafana Cloud:
|
||||
|
||||
{{< figure alt="The Grafana Node Exporter integration dashboard showing panels of visualizations" src="/static/assets/img/blog/ansible-to-manage-agent3.png" >}}
|
||||
|
||||
Using the **Instance** drop-down in the dashboard, you can select from the hostnames where you deployed Grafana Agent and start monitoring them.
|
||||
|
||||
## Summary
|
||||
|
||||
The `grafana_agent` Ansible role makes it easy to deploy and manage Grafana Agent across multiple machines.
|
||||
This example showed Grafana Agent deployments across eight Linux hosts, but it's possible to monitor more hosts using the`grafana_agent` role.
|
||||
To add monitor more Linux hosts, update the `inventory` file and re-run the Ansible playbook.
|
||||
|
||||
To learn more about the Grafana Ansible collection, see its [GitHub repository](https://github.com/grafana/grafana-ansible-collection) or its [documentation](https://docs.ansible.com/ansible/latest/collections/grafana/grafana/index.html).
|
||||
@@ -1,33 +0,0 @@
|
||||
---
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Grafana Operator
|
||||
menuTitle: Grafana Operator
|
||||
title: Grafana Operator
|
||||
weight: 120
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/grafana-operator/
|
||||
---
|
||||
|
||||
# Grafana Operator
|
||||
|
||||
[Grafana Operator](https://grafana.github.io/grafana-operator/) is a Kubernetes operator built to help you manage your Grafana instances and its resources from within Kubernetes. The Operator can install and manage local Grafana instances, Dashboards and Datasources through Kubernetes/OpenShift Custom resources. The Grafana Operator Automatically syncs the Kubernetes Custom resources and the actual resources in the Grafana Instance.
|
||||
|
||||
## Installing the Grafana Operator
|
||||
|
||||
To install the Grafana Operator in your Kubernetes cluster, Run the following command in your terminal:
|
||||
|
||||
```
|
||||
helm repo add grafana https://grafana.github.io/helm-charts
|
||||
helm upgrade -i grafana-operator grafana/grafana-operator
|
||||
```
|
||||
|
||||
For other installation methods, Refer [Grafana Operator Installation Documentation](https://grafana.github.io/grafana-operator/docs/installation/).
|
||||
|
||||
## Getting Started
|
||||
|
||||
Use the following guide to get started with using Grafana Operator to manage your Grafana instance:
|
||||
|
||||
- [Manage data sources, and dashboards with folders using the Grafana Operator](operator-dashboards-folders-datasources/) describes how to add a folders, data sources, and dashboards, using the [Grafana Operator](https://grafana.github.io/grafana-operator/).
|
||||
- [Manage Dashboards with GitOps Using ArgoCD](manage-dashboards-argocd/) describes how to create and manage dashboards using ArgoCD and [Grafana Operator](https://grafana.github.io/grafana-operator/).
|
||||
-299
@@ -1,299 +0,0 @@
|
||||
---
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Grafana Operator
|
||||
- ArgoCD
|
||||
title: Manage Dashboards with GitOps Using ArgoCD
|
||||
weight: 110
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/grafana-operator/manage-dashboards-argocd/
|
||||
---
|
||||
|
||||
# Managing Grafana Dashboards with GitOps Using ArgoCD
|
||||
|
||||
This guide will walk you through setting up a continuous deployment pipeline using ArgoCD to synchronize your Grafana dashboards with a Git repository. We'll use the Grafana Dashboard Custom Resource provided by the Grafana Operator to manage dashboard configurations declaratively.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- An existing Grafana Cloud stack
|
||||
- A Kubernetes cluster with Grafana Operator installed, as shown in [Grafana Operator Installation](/docs/grafana-cloud/as-code/infrastructure-as-code/grafana-operator/#installing-the-grafana-operator).
|
||||
- ArgoCD installed on your Kubernetes cluster. Refer the [Installation Guide](https://argo-cd.readthedocs.io/en/stable/getting_started/).
|
||||
- Git repository to store your dashboard configurations.
|
||||
|
||||
## Set Up Your Git Repository
|
||||
|
||||
Within the repository, create a directory structure to organize your grafana and dashboard configurations. For this tutorial, lets create a folder named `grafana`.
|
||||
|
||||
## Grafana Operator Setup
|
||||
|
||||
The Grafana Operator allows us to authenticate with the Grafana instance using the Grafana Custom Resource (CR).
|
||||
|
||||
1. **Create the Grafana API Token Secret:**
|
||||
|
||||
Store the Grafana API Token in a secret with the following content in a file named `grafana-token.yml` in the `grafana` folder in your Git repo:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: grafana-cloud-credentials
|
||||
namespace: <grafana-operator-namespace>
|
||||
stringData:
|
||||
GRAFANA_CLOUD_INSTANCE_TOKEN: <Grafana-API-Key>
|
||||
type: Opaque
|
||||
```
|
||||
|
||||
Replace the following field values:
|
||||
|
||||
- `<Grafana-API-Key>` with API key from the Grafana instance. To create an API key, refer [Grafana API Key Documentation](/docs/grafana/latest/administration/api-keys/).
|
||||
- `<grafana-operator-namespace>` with the namespace where the grafana-operator is deployed in Kubernetes Cluster.
|
||||
|
||||
2. **Configure the Grafana Custom Resource:**
|
||||
|
||||
Set up connection to your Grafana Cloud instance by creating a file named `grafana-cloud.yml` in the `grafana` folder in your Git repo with the following contents:
|
||||
|
||||
```yaml
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: Grafana
|
||||
metadata:
|
||||
name: <Grafana-cloud-stack-name>
|
||||
namespace: <grafana-operator-namespace>
|
||||
labels:
|
||||
dashboards: <Grafana-cloud-stack-name>
|
||||
spec:
|
||||
external:
|
||||
url: https://<Grafana-cloud-stack-name>.grafana.net/
|
||||
apiKey:
|
||||
name: grafana-cloud-credentials
|
||||
key: GRAFANA_CLOUD_INSTANCE_TOKEN
|
||||
```
|
||||
|
||||
Replace the following field values:
|
||||
|
||||
- `<Grafana-API-Key>` with API key from the Grafana instance.
|
||||
- `<Grafana-cloud-stack-name>` with the name of your Grafana Cloud Stack.
|
||||
- `<grafana-operator-namespace>` with the namespace where the grafana-operator is deployed in Kubernetes Cluster.
|
||||
|
||||
## Add Dashboards to a Git repository
|
||||
|
||||
In your `grafana` directory, Create a sub-folder called `dashboards`. For this tutorial, we will create 3 seperate dashboards.
|
||||
|
||||
1. Under `dashboards` folder, Create a file named `simple-dashboard.yaml` with the following content for the first dashboard:
|
||||
|
||||
```yaml
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: GrafanaDashboard
|
||||
metadata:
|
||||
name: grafanadashboard-sample
|
||||
namespace: <grafana-operator-namespace>
|
||||
spec:
|
||||
resyncPeriod: 30s
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
dashboards: <Grafana-cloud-stack-name>
|
||||
json: >
|
||||
{
|
||||
"id": null,
|
||||
"title": "Simple Dashboard",
|
||||
"tags": [],
|
||||
"style": "dark",
|
||||
"timezone": "browser",
|
||||
"editable": true,
|
||||
"hideControls": false,
|
||||
"graphTooltip": 1,
|
||||
"panels": [],
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"time_options": [],
|
||||
"refresh_intervals": []
|
||||
},
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"annotations": {
|
||||
"list": []
|
||||
},
|
||||
"refresh": "5s",
|
||||
"schemaVersion": 17,
|
||||
"version": 0,
|
||||
"links": []
|
||||
}
|
||||
```
|
||||
|
||||
Replace the following field values:
|
||||
- `<Grafana-cloud-stack-name>` with the name of your Grafana Cloud Stack.
|
||||
- `<grafana-operator-namespace>` with the namespace where the grafana-operator is deployed in Kubernetes Cluster.
|
||||
|
||||
1. Under `dashboards` folder, Create a file named `dashboard-from-cm.yaml` with the following content for the second dashboard:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: dashboard-definition
|
||||
namespace: <grafana-operator-namespace>
|
||||
spec:
|
||||
resyncPeriod: 30s
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
dashboards: <Grafana-cloud-stack-name>
|
||||
json: >
|
||||
{
|
||||
"id": null,
|
||||
"title": "Simple Dashboard from ConfigMap",
|
||||
"tags": [],
|
||||
"style": "dark",
|
||||
"timezone": "browser",
|
||||
"editable": true,
|
||||
"hideControls": false,
|
||||
"graphTooltip": 1,
|
||||
"panels": [],
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"time_options": [],
|
||||
"refresh_intervals": []
|
||||
},
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"annotations": {
|
||||
"list": []
|
||||
},
|
||||
"refresh": "5s",
|
||||
"schemaVersion": 17,
|
||||
"version": 0,
|
||||
"links": []
|
||||
}
|
||||
---
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: GrafanaDashboard
|
||||
metadata:
|
||||
name: grafanadashboard-from-configmap
|
||||
namespace: <grafana-operator-namespace>
|
||||
spec:
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
dashboards: <Grafana-cloud-stack-name>
|
||||
configMapRef:
|
||||
name: dashboard-definition
|
||||
key: json
|
||||
```
|
||||
|
||||
Replace the following field values:
|
||||
- `<Grafana-cloud-stack-name>` with the name of your Grafana Cloud Stack.
|
||||
- `<grafana-operator-namespace>` with the namespace where the grafana-operator is deployed in Kubernetes Cluster.
|
||||
|
||||
1. Under `dashboards` folder, Create a file named `dashboard-from-id.yaml` with the following content for the third dashboard:
|
||||
|
||||
```yaml
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: GrafanaDashboard
|
||||
metadata:
|
||||
name: node-exporter-latest
|
||||
namespace: <grafana-operator-namespace>
|
||||
spec:
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
dashboards: <Grafana-cloud-stack-name>
|
||||
grafanaCom:
|
||||
id: 1860
|
||||
```
|
||||
|
||||
Replace the following field values:
|
||||
- `<Grafana-cloud-stack-name>` with the name of your Grafana Cloud Stack.
|
||||
- `<grafana-operator-namespace>` with the namespace where the grafana-operator is deployed in Kubernetes Cluster.
|
||||
|
||||
## Configure Argo CD to Sync the Git Repository
|
||||
|
||||
Once all changes are committed to Git, Log in to the Argo CD user interface or use the CLI.
|
||||
|
||||
2. Create an Argo CD application to manage the synchronization:
|
||||
|
||||
**Using UI**:
|
||||
- Navigate to 'New App' and fill out the form with your Git repository details and the path to your `grafana` folder.
|
||||
- Make sure to tick mark directory Recurse.
|
||||
- Set the sync policy to `Automatic`.
|
||||
|
||||
**Using CLI**:
|
||||
- Prepare an application manifest named `argo-application.yaml` with the configuration pointing to your Git repository:
|
||||
|
||||
```yaml
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: Grafana
|
||||
namespace: <argocd-namespace>
|
||||
spec:
|
||||
destination:
|
||||
name: ''
|
||||
namespace: ''
|
||||
server: 'https://kubernetes.default.svc'
|
||||
source:
|
||||
path: <Path-to-grafana-folder>
|
||||
repoURL: '<Git-repo-url>'
|
||||
targetRevision: HEAD
|
||||
directory:
|
||||
recurse: true
|
||||
sources: []
|
||||
project: default
|
||||
syncPolicy:
|
||||
automated:
|
||||
prune: true
|
||||
selfHeal: true
|
||||
syncOptions:
|
||||
- CreateNamespace=true
|
||||
retry:
|
||||
limit: 2
|
||||
backoff:
|
||||
duration: 5s
|
||||
maxDuration: 3m0s
|
||||
factor: 2
|
||||
```
|
||||
|
||||
Replace the following field values:
|
||||
- `<Git-repo-url>` with the URL of your GIT Repository.
|
||||
- `<Path-to-grafana-folder>` with the path to the `grafana` folder.
|
||||
- `<argocd-namespace>` with the namespace where ArgoCD is deployed in Kubernetes Cluster.
|
||||
|
||||
- Create the application in Argo CD:
|
||||
|
||||
```shell
|
||||
kubectl apply -f argo-application.yaml
|
||||
```
|
||||
|
||||
## Verify Sync Status in Argo CD
|
||||
|
||||
1. Monitor the newly created Argo CD application, ensuring that it successfully syncs your dashboard configuration.
|
||||
|
||||
2. Visit the Argo CD dashboard and check the sync status. If it's successful, your Grafana dashboard should be up to date with the configuration from your Git repository.
|
||||
|
||||
## Updating the Dashboards
|
||||
|
||||
To update an existing dashboard:
|
||||
|
||||
1. Make changes to the dashboard JSON configuration in your Git repository.
|
||||
2. Commit and push the changes.
|
||||
3. Argo CD will detect the update and synchronize the changes to your Cutom Resource.
|
||||
4. Grafana Operator will then sync changes to the Grafana Instance.
|
||||
|
||||
## Validating the Grafana Dashboard Update
|
||||
|
||||
Log in to your Grafana dashboard and confirm that the changes have been applied. You should see the dashboard update reflected in the Grafana UI.
|
||||
|
||||
## Additional Tips
|
||||
|
||||
- You can also install the Grafana Operator's Helm Chart using ArgoCD to manage your setup with GitOps.
|
||||
- You can follow a similar setup for Grafana Dashboards and Folders.
|
||||
|
||||
## Conclusion
|
||||
|
||||
You've set up a GitOps workflow to manage Grafana dashboards using Argo CD and the Grafana Operator. Your dashboards are now version-controlled and can be consistently deployed across environments. This approach provides a reliable and auditable way to manage observability dashboards and scale your operations.
|
||||
|
||||
To learn more about managing Grafana using Grafana Operator, see the [Grafana Operator documentation](https://grafana.github.io/grafana-operator/docs/).
|
||||
-168
@@ -1,168 +0,0 @@
|
||||
---
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Grafana Operator
|
||||
title: Manage folders, data sources, and dashboards using Grafana Operator
|
||||
weight: 100
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/grafana-operator/operator-dashboards-folders-datasources/
|
||||
---
|
||||
|
||||
# Creating and managing folders, data sources, and dashboards using the Grafana Operator
|
||||
|
||||
Learn how to manage data sources, folders and dashboard, using Grafana Operator.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you begin, you should have the following available:
|
||||
|
||||
- An existing Grafana Cloud stack.
|
||||
- Grafana Operator Installed in your Cluster, as shown in [Grafana Operator Installation](/docs/grafana-cloud/as-code/infrastructure-as-code/grafana-operator/#installing-the-grafana-operator).
|
||||
|
||||
## Grafana Operator Setup
|
||||
|
||||
The Grafana Operator allows us to authenticate with the Grafana instance using the Grafana Custom Resource (CR).
|
||||
|
||||
1. **Create the Grafana API Token Secret:**
|
||||
|
||||
Store the Grafana API Token in a secret with the following content in a file named `grafana-token.yml`:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: grafana-cloud-credentials
|
||||
namespace: <grafana-operator-namespace>
|
||||
stringData:
|
||||
GRAFANA_CLOUD_INSTANCE_TOKEN: <Grafana-API-Key>
|
||||
type: Opaque
|
||||
```
|
||||
|
||||
Replace the following field values:
|
||||
|
||||
- `<Grafana-API-Key>` with API key from the Grafana instance. To create an API key, refer [Grafana API Key Documentation](/docs/grafana/latest/administration/api-keys/).
|
||||
- `<grafana-operator-namespace>` with the namespace where the grafana-operator is deployed in Kubernetes Cluster.
|
||||
|
||||
2. **Configure the Grafana Custom Resource:**
|
||||
|
||||
Set up connection to your Grafana Cloud instance by creating a file named `grafana-cloud.yml` with the following contents:
|
||||
|
||||
```yaml
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: Grafana
|
||||
metadata:
|
||||
name: <Grafana-cloud-stack-name>
|
||||
namespace: <grafana-operator-namespace>
|
||||
labels:
|
||||
dashboards: <Grafana-cloud-stack-name>
|
||||
spec:
|
||||
external:
|
||||
url: https://<Grafana-cloud-stack-name>.grafana.net/
|
||||
apiKey:
|
||||
name: grafana-cloud-credentials
|
||||
key: GRAFANA_CLOUD_INSTANCE_TOKEN
|
||||
```
|
||||
|
||||
Replace the following field values:
|
||||
|
||||
- `<Grafana-API-Key>` with API key from the Grafana instance.
|
||||
- `<Grafana-cloud-stack-name>` with the name of your Grafana Cloud Stack.
|
||||
- `<grafana-operator-namespace>` with the namespace where the grafana-operator is deployed in Kubernetes Cluster.
|
||||
|
||||
## Add a data source
|
||||
|
||||
The following steps use the Prometheus data source. The required arguments vary depending on the data source you select.
|
||||
|
||||
1. **Create the Data Source Configuration:**
|
||||
|
||||
Save a new YAML file `datasource.yml` with the following content:
|
||||
|
||||
```yaml
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: GrafanaDatasource
|
||||
metadata:
|
||||
name: <data-source-name>
|
||||
namespace: <grafana-operator-namespace>
|
||||
spec:
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
dashboards: <Grafana-cloud-stack-name>
|
||||
allowCrossNamespaceImport: true
|
||||
datasource:
|
||||
access: proxy
|
||||
database: prometheus
|
||||
jsonData:
|
||||
timeInterval: 5s
|
||||
tlsSkipVerify: true
|
||||
name: <data-source-name>
|
||||
type: prometheus
|
||||
url: <data-source-url>
|
||||
```
|
||||
|
||||
Replace the following field values:
|
||||
|
||||
- `<data-source-name>` with the name of the data source to be added in Grafana.
|
||||
- `<data-source-url>` with URL of your data source.
|
||||
- `<Grafana-cloud-stack-name>` with the name of your Grafana Cloud Stack.
|
||||
- `<grafana-operator-namespace>` with the namespace where the grafana-operator is deployed in Kubernetes Cluster.
|
||||
|
||||
## Add a dashboard to a folder
|
||||
|
||||
Use the following YAML definition to create a simple dashboard in the Grafana instance under a custom folder. If the folder defined under spec.folder fields doesnt not exist, The operator will create it before placing the dashboard inside the folder.
|
||||
|
||||
1. **Prepare the Dashboard Configuration File:**
|
||||
|
||||
In `dashboard.yml`, define the dashboard and assign it to a folder:
|
||||
|
||||
```yaml
|
||||
apiVersion: grafana.integreatly.org/v1beta1
|
||||
kind: GrafanaDashboard
|
||||
metadata:
|
||||
name: <folder-name>
|
||||
namespace: <grafana-operator-namespace>
|
||||
spec:
|
||||
instanceSelector:
|
||||
matchLabels:
|
||||
dashboards: <Grafana-cloud-stack-name>
|
||||
folder: "<folder-name>"
|
||||
json: >
|
||||
{
|
||||
"title": "as-code dashboard",
|
||||
“uid” : “ascode”
|
||||
}
|
||||
```
|
||||
|
||||
Replace the following field values:
|
||||
|
||||
- `<folder-name>` with the name of the folder in which you want the Dashboard to be created.
|
||||
- `<Grafana-cloud-stack-name>` with the name of your Grafana Cloud Stack.
|
||||
- `<grafana-operator-namespace>` with the namespace where the grafana-operator is deployed in Kubernetes Cluster.
|
||||
|
||||
## Apply Kubernetes Manifests
|
||||
|
||||
In a terminal, run the following commands from the directory where all of the above Kubernetes YAML definitions are located.
|
||||
|
||||
1. Create Kubernetes Custom resources for all of the above configurations.
|
||||
|
||||
```shell
|
||||
kubectl apply -f grafana-token.yml grafana-cloud.yml datasource.yml dashboard.yml
|
||||
```
|
||||
|
||||
## Validation
|
||||
|
||||
Once you apply the configurations, you should be able to verify the following:
|
||||
|
||||
- A new data source is visible in Grafana. In the following image a datasource named `InfluxDB` was created.
|
||||
|
||||

|
||||
|
||||
- A new dashboard and folder in Grafana. In the following image a dashboard named `InfluxDB Cloud Demos` was created inside the `Demos` folder.
|
||||
|
||||

|
||||
|
||||
## Conclusion
|
||||
|
||||
In this guide, you created a data source, folder, and dashboard using the Grafana Operator.
|
||||
|
||||
To learn more about managing Grafana using Grafana Operator, see the [Grafana Operator documentation](https://grafana.github.io/grafana-operator/docs/).
|
||||
@@ -1,22 +0,0 @@
|
||||
---
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Grizzly
|
||||
- CLI
|
||||
menuTitle: Grizzly
|
||||
title: Grizzly
|
||||
weight: 130
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/grizzly/
|
||||
---
|
||||
|
||||
# Grizzly (deprecated)
|
||||
|
||||
{{< admonition type="warning" >}}
|
||||
Grizzly has been removed. It is no longer deployed, enhanced, or supported.
|
||||
|
||||
Use the [Grafana CLI](/docs/grafana/<GRAFANA_VERSION>/observability-as-code/grafana-cli/) instead.
|
||||
{{< /admonition >}}
|
||||
|
||||
[Grizzly](https://grafana.github.io/grizzly/) is a command line tool that allows you to manage your observability resources with code. You can use it to manage dashboards, data sources, Prometheus rules, and Synthetic monitoring.
|
||||
-149
@@ -1,149 +0,0 @@
|
||||
---
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Grizzly
|
||||
title: Creating and managing folders, data sources, and dashboards using Grizzly
|
||||
weight: 100
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/grizzly/dashboards-folders-datasources/
|
||||
---
|
||||
|
||||
# Creating and managing folders, data sources, and dashboards using Grizzly
|
||||
|
||||
Learn how to add a data sources, folders and dashboard, using Grizzly.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you begin, you should have the following available:
|
||||
|
||||
- A Grafana Cloud account
|
||||
- An existing Grafana Cloud stack with a Grafana API Key
|
||||
- [Grizzly](https://grafana.github.io/grizzly/installation/) installed on your machine
|
||||
|
||||
## Authentication Setup
|
||||
|
||||
To authenticate with the Grizzly API, you must create environment variables. Run the following commands to create environment variables named `GRAFANA_URL` and `GRAFANA_TOKEN`:
|
||||
|
||||
```shell
|
||||
export GRAFANA_URL=<Grafana-instance-url>
|
||||
export GRAFANA_TOKEN=<Grafana-API-Key>
|
||||
```
|
||||
|
||||
Replace the following field values:
|
||||
|
||||
- `<Grafana-instance-url>` with the URL of your Grafana instance.
|
||||
- `<Grafana-API-Key>` with API key from the Grafana instance.
|
||||
|
||||
## Add a data source
|
||||
|
||||
The following steps use the InfluxDB data source. The required arguments vary depending on the data source you select.
|
||||
|
||||
1. Create a file named `data-source.yml` and add the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: grizzly.grafana.com/v1alpha1
|
||||
kind: Datasource
|
||||
metadata:
|
||||
name: <data-source-name>
|
||||
spec:
|
||||
name: <data-source-name>
|
||||
type: influxdb
|
||||
url: <data-source-url>
|
||||
database: <db-name>
|
||||
user: <username>
|
||||
secureJsonData:
|
||||
password: '<password>'
|
||||
uid: <uid>
|
||||
id: <id>
|
||||
access: proxy
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<data-source-name>` with the name of the data source to be added in Grafana.
|
||||
- `<data-source-url>` with URL of your data source.
|
||||
- `<username>` with the username for authenticating with your data source.
|
||||
- `<password>` with the password for authenticating with your data source.
|
||||
- `<db-name>` with name of your database.
|
||||
- `<id>` with the ID for your data source in Grafana.
|
||||
- `<uid>` wth the UID for your data source in Grafana.
|
||||
|
||||
## Add a folder
|
||||
|
||||
The following YAML definition creates a folder in your Grafana instance.
|
||||
|
||||
1. Create a file named `folder.yml` and add the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: grizzly.grafana.com/v1alpha1
|
||||
kind: DashboardFolder
|
||||
metadata:
|
||||
name: <folder-name>
|
||||
spec:
|
||||
title: <folder-name>
|
||||
uid: <uid>
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<folder-name>` with the name of the folder to be added in Grafana.
|
||||
- `<uid>` with the UID for your folder in Grafana.
|
||||
|
||||
## Add a dashboard to the folder
|
||||
|
||||
Use the following YAML definition to create a simple dashboard in the Grafana instance folder from the previous step. To add more than a title and UID to the dashboard, you can convert your dashboard JSON config to YAML and paste it under `spec`.
|
||||
|
||||
1. Create a file named `dashboard.yml` and add the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: grizzly.grafana.com/v1alpha1
|
||||
kind: Dashboard
|
||||
metadata:
|
||||
folder: <folder-name>
|
||||
name: influxdb-cloud-demos
|
||||
spec:
|
||||
title: InfluxDB Cloud Demos
|
||||
uid: influxdb-cloud-demos
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<folder-name>` with the name of the folder created in the previous step.
|
||||
|
||||
## Using Grizzly CLI
|
||||
|
||||
In a terminal, run the following commands from the directory where all of the YAML definitions are located.
|
||||
|
||||
1. Add the data source.
|
||||
|
||||
```shell
|
||||
grr apply data-source.yml
|
||||
```
|
||||
|
||||
1. Add a folder.
|
||||
|
||||
```shell
|
||||
grr apply folder.yml
|
||||
```
|
||||
|
||||
1. Add a dashboard to the folder.
|
||||
|
||||
```shell
|
||||
grr apply dashboard.yml
|
||||
```
|
||||
|
||||
## Validation
|
||||
|
||||
Once you apply the configurations using the Grizzly CLI, you should be able to verify the following:
|
||||
|
||||
- A new data source (InfluxDB in this example) is visible in Grafana.
|
||||
|
||||

|
||||
|
||||
- A new dashboard and folder in Grafana. In the following image a dashboard named `InfluxDB Cloud Demos` was created inside the `Demos` folder.
|
||||
|
||||

|
||||
|
||||
## Conclusion
|
||||
|
||||
In this guide, you created a data source, folder, and dashboard using Grizzly.
|
||||
|
||||
To learn more about managing Grafana using Grizzly, see the [Grizzly documentation](https://grafana.github.io/grizzly/).
|
||||
@@ -1,26 +0,0 @@
|
||||
---
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Terraform
|
||||
menuTitle: Terraform
|
||||
title: Grafana Terraform provider
|
||||
weight: 100
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/
|
||||
---
|
||||
|
||||
# Grafana Terraform provider
|
||||
|
||||
The [Grafana Terraform provider](https://registry.terraform.io/providers/grafana/grafana/latest) provisions configuration management resources for Grafana. You can use it to manage resources such as dashboards, data sources, plugins, folders, organizations or alert notification channels.
|
||||
|
||||
Use the following guides to get started using Terraform to manage your Grafana Cloud stack:
|
||||
|
||||
- [Creating and managing a Grafana Cloud stack using Terraform](terraform-cloud-stack/) describes how to create a Grafana Cloud stack and add a data source and dashboard using [Terraform](https://www.terraform.io/).
|
||||
- [Creating and managing dashboards using Terraform and GitHub Actions](dashboards-github-action/) describes how to create and manage multiple dashboards represented as JSON source code for Grafana using [Terraform](https://www.terraform.io/) and [GitHub Actions](https://github.com/features/actions).
|
||||
- [Managing IRM on Grafana Cloud using Terraform](terraform-oncall/) describes how to connect an integration to Grafana IRM, configure escalation policies, and add your on-call schedule using [Terraform](https://www.terraform.io/).
|
||||
- [Managing Fleet Management in Grafana Cloud using Terraform](https://grafana.com/docs/grafana-cloud/as-code/infrastructure-as-code/terraform/terraform-fleet-management/) describes how to create collectors and pipelines in Grafana Fleet Management using [Terraform](https://www.terraform.io/).
|
||||
- [Managing Frontend Observability in Grafana Cloud using Terraform](https://grafana.com/docs/grafana-cloud/as-code/infrastructure-as-code/terraform/terraform-frontend-observability/) describes how to manage resources in Frontend Observability using [Terraform](https://www.terraform.io/).
|
||||
- [Manage Cloud Provider Observability in Grafana Cloud using Terraform](terraform-cloud-provider-o11y/) describes how to manage Amazon CloudWatch and Microsoft Azure resources in Cloud Provider Observability using Terraform.
|
||||
- [Manage Knowledge Graph in Grafana Cloud using Terraform](terraform-knowledge-graph/) describes how to create and manage notification alerts, suppressed assertions, custom model rules, log configurations, and threshold configurations in Grafana Cloud Knowledge Graph using [Terraform](https://www.terraform.io/).
|
||||
- [Install plugins in Grafana Cloud using Terraform](terraform-plugins) describes how to install plugins in Grafana Cloud using [Terraform](https://www.terraform.io/).
|
||||
@@ -1,250 +0,0 @@
|
||||
---
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Terraform
|
||||
- GitHub Actions
|
||||
title: Creating and managing dashboards using Terraform and GitHub Actions
|
||||
weight: 110
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/dashboards-github-action/
|
||||
---
|
||||
|
||||
# Creating and managing dashboards using Terraform and GitHub Actions
|
||||
|
||||
Learn how to create and manage multiple dashboards represented as JSON source code for Grafana using [Terraform](https://www.terraform.io/) and [GitHub Actions](https://github.com/features/actions).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you begin, you should have the following available:
|
||||
|
||||
- A Grafana Cloud account, as shown in [Get started](/docs/grafana-cloud/get-started/)
|
||||
- A [GitHub](https://github.com/) repository
|
||||
|
||||
## Add Dashboards to a GitHub repository
|
||||
|
||||
For this guide, we are adding dashboards for ElasticSearch, InfluxDB, and AWS EC2. You can use different dashboards according to your configured data sources.
|
||||
|
||||
1. In your GitHub repository, create a folder named `dashboards` in the root directory.
|
||||
|
||||
1. In the `dashboards` folder create three sub-folders. For this guide, we will create three sub-folders named `elasticsearch`, `influxdb`, and `aws`.
|
||||
|
||||
1. Add dashboard JSON source code to each of the three sub-folders.
|
||||
|
||||
## Terraform configuration for Grafana provider
|
||||
|
||||
This Terraform configuration configures the [Grafana provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs) to provide necessary authentication when creating folders and dashboards in the Grafana instance.
|
||||
|
||||
1. Create a service account and token in the Grafana instance by following these steps:
|
||||
1. [Create a service account in Grafana](/docs/grafana-cloud/account-management/authentication-and-permissions/service-accounts/#create-a-service-account-in-grafana)
|
||||
1. [Add a token to a service account](/docs/grafana-cloud/account-management/authentication-and-permissions/service-accounts/#add-a-token-to-a-service-account-in-grafana)
|
||||
|
||||
1. Create a file named `main.tf` in the Git root directory and add the following code block:
|
||||
|
||||
```terraform
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
version = ">= 2.9.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "grafana" {
|
||||
alias = "cloud"
|
||||
|
||||
url = "<Grafana-instance-url>"
|
||||
auth = "<Grafana-Service-Account-token>"
|
||||
}
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<Grafana-instance-url>` with the URL of your Grafana instance, for example `"https://my-stack.grafana.net/"`.
|
||||
- `<Grafana-Service-Account-token>` with a Service Account token from the Grafana instance.
|
||||
|
||||
## Terraform configuration for folders
|
||||
|
||||
This Terraform configuration creates three folders named `ElasticSearch`, `InfluxDB` and `AWS` in the Grafana instance using [grafana_folder (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/folder).
|
||||
|
||||
Create a file named `folders.tf` in the Git root directory and add the following code block:
|
||||
|
||||
```terraform
|
||||
resource "grafana_folder" "ElasticSearch" {
|
||||
provider = grafana.cloud
|
||||
|
||||
title = "ElasticSearch"
|
||||
}
|
||||
|
||||
resource "grafana_folder" "InfluxDB" {
|
||||
provider = grafana.cloud
|
||||
|
||||
title = "InfluxDB"
|
||||
}
|
||||
|
||||
resource "grafana_folder" "AWS" {
|
||||
provider = grafana.cloud
|
||||
|
||||
title = "AWS"
|
||||
}
|
||||
```
|
||||
|
||||
## Terraform configuration for dashboards
|
||||
|
||||
This Terraform configuration iterates through the Json files in the three folders (`elasticsearch`, `influxdb` and `aws`) you created in the GitHub repository and adds them to the respective folders in the Grafana instance using [grafana_dashboard (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/dashboard).
|
||||
|
||||
For example, the dashboard represented as JSON source code in the `elasticsearch` folder in the GitHub repository will be created in the `ElasticSearch` folder in the Grafana instance.
|
||||
|
||||
Create a file named `dashboards.tf` in the Git root directory and add the following code block:
|
||||
|
||||
```terraform
|
||||
resource "grafana_dashboard" "elasticsearch" {
|
||||
provider = grafana.cloud
|
||||
|
||||
for_each = fileset("${path.module}/dashboards/elasticsearch", "*.json")
|
||||
config_json = file("${path.module}/dashboards/elasticsearch/${each.key}")
|
||||
folder = grafana_folder.ElasticSearch.id
|
||||
}
|
||||
|
||||
resource "grafana_dashboard" "influxdb" {
|
||||
provider = grafana.cloud
|
||||
|
||||
for_each = fileset("${path.module}/dashboards/influxdb", "*.json")
|
||||
config_json = file("${path.module}/dashboards/influxdb/${each.key}")
|
||||
folder = grafana_folder.InfluxDB.id
|
||||
}
|
||||
|
||||
resource "grafana_dashboard" "aws" {
|
||||
provider = grafana.cloud
|
||||
|
||||
for_each = fileset("${path.module}/dashboards/aws", "*.json")
|
||||
config_json = file("${path.module}/dashboards/aws/${each.key}")
|
||||
folder = grafana_folder.AWS.id
|
||||
}
|
||||
```
|
||||
|
||||
## GitHub workflow for managing dashboards using Terraform
|
||||
|
||||
This GitHub workflow consists of the following steps:
|
||||
|
||||
- Using the [actions/checkout@v3](https://github.com/actions/checkout) action, The GitHub repository is checked out so that the GitHub workflow can access it.
|
||||
- The Terraform CLI is installed on the GitHub runner using the [hashicorp/setup-terraform@v1](https://github.com/hashicorp/setup-terraform) action.
|
||||
- `terraform init` is run as a bash command in the GitHub runner to initialize a working directory containing Terraform configuration files.
|
||||
- `terraform fmt -check` is run as a bash command in the GitHub runner to check if the Terraform configuration files are properly formatted. If the Terraform configuration files are not properly formatted, the workflow will fail at this step.
|
||||
- `terraform plan` is run as a bash command in the GitHub runner to preview the changes that Terraform will make.
|
||||
- Using [mshick/add-pr-comment@v1](https://github.com/mshick/add-pr-comment) action, the preview from Terraform plan is posted as a comment on the pull request. This helps in reviewing the changes that Terraform will make before the pull request is merged.
|
||||
- `terraform appy -auto-approve` is run as a bash command in the GitHub runner to apply the Terraform configuration files. `-auto-approve` flag is added to the command to skip interactive approval of plan before applying and make the workflow automated.
|
||||
This step is run only when changes are committed to `main` branch. When a pull request is merged, the merge action creates a commit to the `main` branch which triggers the `terraform apply -auto-approve` step to execute.
|
||||
|
||||
1. In your GitHub repository, create a folder named `.github` in the root directory .
|
||||
|
||||
1. In the `.github` folder create a sub-folder named `workflows`.
|
||||
|
||||
1. To add the GitHub workflow to your GitHub repository, create a file named `terraform.yml` in the `workflows` directory and add the following code block:
|
||||
|
||||
````yaml
|
||||
name: Terraform
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
terraform:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
# Checkout the repository to the GitHub Actions runner
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# Install the latest version of Terraform CLI
|
||||
- name: Setup Terraform
|
||||
uses: hashicorp/setup-terraform@v1
|
||||
|
||||
# Initialize a new or existing Terraform working directory by creating initial files, loading any remote state, downloading modules, etc.
|
||||
- name: Terraform Init
|
||||
run: terraform init
|
||||
|
||||
# Checks that all Terraform configuration files adhere to a canonical format
|
||||
- name: Terraform Format
|
||||
run: terraform fmt -check
|
||||
|
||||
# Previews the changes that Terraform will make
|
||||
- name: Plan Terraform
|
||||
id: plan
|
||||
continue-on-error: true
|
||||
run: terraform plan -input=false -no-color
|
||||
|
||||
# Post the preview (terraform plan) from the previous step as a GitHub pull request comment
|
||||
- name: Post Plan to GitHub PR
|
||||
if: github.ref != 'refs/heads/main'
|
||||
uses: mshick/add-pr-comment@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
repo-token-user-login: 'github-actions[bot]'
|
||||
message: |
|
||||
Applying:
|
||||
|
||||
```
|
||||
${{ steps.plan.outputs.stdout }}
|
||||
```
|
||||
|
||||
# Applies the terraform configuration files when the branch is `main`
|
||||
- name: Apply Terraform
|
||||
if: github.ref == 'refs/heads/main'
|
||||
id: apply
|
||||
continue-on-error: true
|
||||
run: |
|
||||
terraform apply -auto-approve
|
||||
````
|
||||
|
||||
1. Commit the changes made to the `terraform.yml` in the previous step to the `main` branch in your GitHub repository. Once the changes are committed, The GitHub workflow you created should start to run automatically as the workflow we defined in the previous step runs when a pull request is created or when changes are committed to `main` branch.
|
||||
|
||||
## Managing the Terraform state
|
||||
|
||||
If you are not using a [Terraform backend](https://www.terraform.io/language/settings/backends/configuration) to store the `.tfstate` file, add the following code block to the end of the GitHub workflow file to make sure the Terraform state file is stored in Git.
|
||||
|
||||
```yaml
|
||||
- name: commit the terraform state
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: stefanzweifel/git-auto-commit-action@v4
|
||||
with:
|
||||
commit_message: Updating Terraform state
|
||||
file_pattern: terraform.tfstate
|
||||
```
|
||||
|
||||
When you run `terraform apply`,Terraform automatically manages and updates the `terraform.tfstate` file to store state about your infrastructure and configuration.
|
||||
This step uses the [stefanzweifel/git-auto-commit-action@v4](https://github.com/stefanzweifel/git-auto-commit-action) action to auto-commit the `terraform.tfstate` file for changes made by the running the `terraform apply` step.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The Terraform state file (terraform.tfstate) should not be stored in Git to avoid leakage of sensitive data. Instead, store Terraform state file using a remote backend like AWS S3 with proper RBAC. For more information, see [Terraform state](https://www.terraform.io/language/state).
|
||||
{{< /admonition >}}
|
||||
|
||||
## Validation
|
||||
|
||||
Once the GitHub workflow run is successful, you should be able to verify the following:
|
||||
|
||||
- `ElasticSearch`, `InfluxDB` and `AWS` folders are created in the Grafana instance.
|
||||
|
||||

|
||||
|
||||
- Dashboard represented as JSON source code from `elasticsearch` folder in GitHub are added under the `ElasticSearch` folder in the Grafana instance.
|
||||
|
||||

|
||||
|
||||
- Dashboard source code from the `influxdb` folder in GitHub is added under the `InfluxDB` folder in the Grafana instance.
|
||||
|
||||

|
||||
|
||||
- Dashboards from `aws` folder in GitHub are added under the `AWS` folder in the Grafana instance.
|
||||
|
||||

|
||||
|
||||
## Conclusion
|
||||
|
||||
In this guide, you created a GitHub workflow using Terraform to manage dashboard source code. Using this workflow, the dashboards in the Grafana instance will always be synchronized with the JSON source code files for dashboards in GitHub.
|
||||
|
||||
To learn more about managing Grafana Cloud using Terraform, see [Grafana provider's documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs).
|
||||
-115
@@ -1,115 +0,0 @@
|
||||
---
|
||||
description: Learn how to manage Grafana Cloud Provider Observability resources in Grafana Cloud using Terraform
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Terraform
|
||||
- Cloud Provider Observability
|
||||
title: Manage Cloud Provider Observability in Grafana Cloud using Terraform
|
||||
weight: 210
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/terraform-cloud-provider-o11y/
|
||||
---
|
||||
|
||||
# Manage Cloud Provider Observability in Grafana Cloud using Terraform
|
||||
|
||||
Manage Cloud Provider Observability, including Amazon CloudWatch and Microsoft Azure resources, in Grafana Cloud using Terraform.
|
||||
For more information on Cloud Provider Observability, refer to the [Cloud Provider Observability](/docs/grafana-cloud/monitor-infrastructure/monitor-cloud-provider/) documentation.
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before you begin, you should have the following available:
|
||||
|
||||
- A Grafana Cloud account
|
||||
- For more information on setting up a Grafana Cloud account, refer to [Get started](/docs/grafana-cloud/get-started/).
|
||||
- Terraform installed on your machine
|
||||
- For more information on how to install Terraform, refer to the [Terraform install documentation](https://developer.hashicorp.com/terraform/install).
|
||||
- Administrator permissions in your Grafana instance
|
||||
- For more information on assigning Grafana RBAC roles, refer to [Assign RBAC roles](/docs/grafana-cloud/security-and-account-management/authentication-and-permissions/access-control/assign-rbac-roles/).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Save all of the following Terraform configuration files in the same directory.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Configure authentication for the Grafana Provider
|
||||
|
||||
The Grafana Provider is a logical abstraction of an upstream API that you can use to interact with Grafana Cloud resources.
|
||||
You must configure it with the following information:
|
||||
|
||||
- A Grafana Cloud access policy token that includes the permissions the provider needs to access the Grafana Cloud Provider API.
|
||||
- A regional cloud provider API endpoint to establish which Grafana Cloud stack you are accessing.
|
||||
|
||||
To configure authentication for the Grafana Provider:
|
||||
|
||||
1. Create a Grafana Cloud access policy and token.
|
||||
- To create an access policy for your organization, refer to the [Create an access policy for a stack steps](/docs/grafana-cloud/security-and-account-management/authentication-and-permissions/access-policies/create-access-policies/#create-an-access-policy-for-a-stack) and use the following scopes listed for the supported Amazon CloudWatch or Microsoft Azure resources:
|
||||
- Amazon CloudWatch
|
||||
- Metrics scrape or resource metadata scrape
|
||||
- `integration-management:read`
|
||||
- `integration-management:write`
|
||||
- `stacks:read`
|
||||
- Metric streams
|
||||
- `metrics:write`
|
||||
- ALB access logs, logs with Lambda, or logs with Amazon Data Firehose
|
||||
- `logs:write`
|
||||
- Microsoft Azure
|
||||
- Serverless metrics
|
||||
- `integration-management:read`
|
||||
- `integration-management:write`
|
||||
- `stacks:read`
|
||||
- Logs with Azure functions
|
||||
- `logs:write`
|
||||
|
||||
1. Obtain the regional Cloud Provider API endpoint.
|
||||
- To obtain the regional Cloud provider API endpoint, use your access policy token and the following command to return a list of all of the Grafana stacks you own, along with their respective Cloud Provider API hostnames:
|
||||
```bash
|
||||
curl -sH "Authorization: Bearer <Access Token from previous step>" "https://grafana.com/api/instances" | \
|
||||
jq '[.items[]|{stackName: .slug, clusterName:.clusterSlug, cloudProviderAPIURL: "https://cloud-provider-api-\(.clusterSlug).grafana.net"}]'
|
||||
```
|
||||
1. Create a file named `cloud-provider.tf` and add the following code block:
|
||||
|
||||
```tf
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "grafana" {
|
||||
cloud_api_url = "<CLOUD_PROVIDER_API_URL>"
|
||||
cloud_access_policy_token = "<CLOUD_ACCESS_POLICY_TOKEN>"
|
||||
}
|
||||
```
|
||||
|
||||
1. Create a `variables.tf` file and paste the `<CLOUD_ACCESS_POLICY_TOKEN>` and `<CLOUD_PROVIDER_API_URL` variables with your values.
|
||||
1. Run the following Terraform command:
|
||||
```tf
|
||||
terraform apply -var-file="variables.tf"
|
||||
```
|
||||
|
||||
## Configure your resources
|
||||
|
||||
To find instructions for configuring specific Amazon CloudWatch and Microsoft Azure resources in Cloud Provider Observability using Terraform, refer to the following documents:
|
||||
|
||||
- Amazon CloudWatch
|
||||
- [Metrics scrape](/docs/grafana-cloud/monitor-infrastructure/monitor-cloud-provider/aws/cloudwatch-metrics/config-cw-metric-scrape/): Pull CloudWatch metrics from multiple regions for your AWS account, without needing to install Grafana Alloy.
|
||||
- [Metric streams](/docs/grafana-cloud/monitor-infrastructure/monitor-cloud-provider/aws/cloudwatch-metrics/config-cw-metric-streams/#configure-metric-streams-with-terraform): Push metrics with CloudWatch metric streams using Amazon Data Firehose, providing real-time insights and scalability while simplifying configuration and reducing cost and manual effort.
|
||||
- [ALB access logs](/docs/grafana-cloud/monitor-infrastructure/monitor-cloud-provider/aws/logs/cloudwatch-logs/config-alb-access-logs-lambda/#configure-with-terraform): Send application load balancer access logs from AWS to Grafana Cloud using a Lambda function.
|
||||
- [Logs with Lambda](/docs/grafana-cloud/monitor-infrastructure/monitor-cloud-provider/aws/logs/cloudwatch-logs/config-cw-logs-lambda/#configure-with-terraform): Send logs to Grafana Cloud from multiple AWS services using a lambda-promtail function.
|
||||
- [Logs with Amazon Data Firehose](/docs/grafana-cloud/monitor-infrastructure/monitor-cloud-provider/aws/logs/firehose-logs/config-firehose-logs/#configure-with-terraform): Send logs from AWS to Grafana Cloud with Amazon Data Firehose and minimal infrastructure.
|
||||
- Microsoft Azure
|
||||
- [Serverless metrics](/docs/grafana-cloud/monitor-infrastructure/monitor-cloud-provider/azure/collect-azure-serverless/config-azure-metrics-serverless/): Monitor your Azure resources without the need to configure or deploy a collector by using Cloud Provider Observability.
|
||||
- [Logs with Azure functions](/docs/grafana-cloud/monitor-infrastructure/monitor-cloud-provider/azure/config-azure-logs-azure-function/): Send Azure event logs to a Loki endpoint using an Azure function that subscribes to an Azure event hub.
|
||||
|
||||
## Grafana cloud provider resources
|
||||
|
||||
You can define the following Cloud Provider Observability resources and data sources using Terraform:
|
||||
|
||||
| Resource name | Description |
|
||||
| --------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `grafana_cloud_provider_aws_account` | Represents an AWS IAM role that authorizes Grafana Cloud to pull Amazon CloudWatch metrics for a set of regions. Usually, there's one of these resources per configured AWS account. For a full reference of this resource, refer to [the Terraform Grafana Provider reference documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/cloud_provider_aws_account). |
|
||||
| `grafana_cloud_provider_aws_cloudwatch_scrape_job` | Represents a Grafana AWS scrape job. This configures Grafana to fetch a list of metrics/statistics for one or many AWS services, and for a given `grafana_cloud_provider_aws_account`. For a full reference of this resource, refer to [the Terraform Grafana Provider reference documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/cloud_provider_aws_cloudwatch_scrape_job) |
|
||||
| `grafana_cloud_provider_aws_resource_metadata_scrape_job` | Represents a Grafana AWS Resource Metadata scrape job. This resource configures Grafana to fetch resource metadata for one or multiple AWS services, for a given `grafana_cloud_provider_aws_account`. For a full reference of this resource, refer to [the Terraform Grafana Provider reference documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/cloud_provider_aws_resource_metadata_scrape_job) |
|
||||
| `grafana_cloud_provider_azure_credential` | A resource representing an Azure Service Principal credential used by Grafana Cloud to pull Azure Monitor metrics from one or more subscriptions. For a full reference of this resource, refer to [the Terraform Grafana Provider resource documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/cloud_provider_azure_credential). |
|
||||
@@ -1,231 +0,0 @@
|
||||
---
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Terraform
|
||||
title: Creating and managing a Grafana Cloud stack using Terraform
|
||||
weight: 100
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/terraform-cloud-stack/
|
||||
---
|
||||
|
||||
# Creating and managing a Grafana Cloud stack using Terraform
|
||||
|
||||
Learn how to add a data source, a dashboard, and a folder to a Grafana Cloud stack using Terraform.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before you begin, you should have the following available:
|
||||
|
||||
- A Grafana Cloud account, as shown in [Get started](/docs/grafana-cloud/get-started/)
|
||||
- [Terraform](https://www.terraform.io/downloads) installed on your machine
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
All of the following Terraform configuration files should be saved in the same directory.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Create a Cloud stack
|
||||
|
||||
1. Create a Terraform configuration file.
|
||||
|
||||
This Terraform configuration will create a Grafana Cloud stack and a second token needed for your Grafana instance.
|
||||
|
||||
Create a file named `cloud-stack.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
version = ">= 2.9.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
// Step 1: Create a stack
|
||||
provider "grafana" {
|
||||
alias = "cloud"
|
||||
cloud_access_policy_token = "<cloud-access-token>"
|
||||
}
|
||||
|
||||
|
||||
resource "grafana_cloud_stack" "my_stack" {
|
||||
provider = grafana.cloud
|
||||
|
||||
name = "<stack-name>"
|
||||
slug = "<stack-name>"
|
||||
region_slug = "<region>" # Example "us","eu" etc
|
||||
delete_protection = true
|
||||
}
|
||||
|
||||
// Step 2: Create a service account and key for the stack
|
||||
resource "grafana_cloud_stack_service_account" "cloud_sa" {
|
||||
provider = grafana.cloud
|
||||
stack_slug = grafana_cloud_stack.my_stack.slug
|
||||
|
||||
name = "<service-account-name>"
|
||||
role = "Admin"
|
||||
is_disabled = false
|
||||
}
|
||||
|
||||
resource "grafana_cloud_stack_service_account_token" "cloud_sa" {
|
||||
provider = grafana.cloud
|
||||
stack_slug = grafana_cloud_stack.my_stack.slug
|
||||
|
||||
name = "terraform serviceaccount key"
|
||||
service_account_id = grafana_cloud_stack_service_account.cloud_sa.id
|
||||
}
|
||||
|
||||
// Step 3: Create resources within the stack
|
||||
provider "grafana" {
|
||||
alias = "my_stack"
|
||||
|
||||
url = grafana_cloud_stack.my_stack.url
|
||||
auth = grafana_cloud_stack_service_account_token.cloud_sa.key
|
||||
}
|
||||
resource "grafana_folder" "my_folder" {
|
||||
provider = grafana.my_stack
|
||||
|
||||
title = "Test Folder"
|
||||
}
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<cloud-access-token>` with your Grafana Cloud Access Policy Token.
|
||||
To create a new one, refer [Grafana Cloud Access Policies](https://grafana.com/docs/grafana-cloud/account-management/authentication-and-permissions/access-policies/)
|
||||
Add all stacks to the realms list.
|
||||
The scopes needed for the example are:
|
||||
- dashboards:read
|
||||
- orgs:read
|
||||
- stack-dashboards:read
|
||||
- stacks:read
|
||||
- dashboards:write
|
||||
- orgs:write
|
||||
- stack-dashboards:write
|
||||
- stacks:write
|
||||
- stack-service-accounts:write
|
||||
- dashboards:delete
|
||||
- stack-dashboards:delete
|
||||
- stacks:delete
|
||||
- `<stack-name>` with the name of your stack.
|
||||
- `<region>` with the region in which you want to create the stack. For example `us`, `eu`.
|
||||
- `<service-account-name>` with a name for the serviceaccount that will be created to use for operations within the stack/instance.
|
||||
|
||||
The first provider block, `grafana.cloud`, uses the Cloud Access Policy Token from the Cloud Portal and is referenced as a parameter when creating the Cloud stack and the token in the Grafana instance to provide the necessary authentication.
|
||||
|
||||
The second provider block, `grafana.my_stack`, is referenced as a parameter when creating resources inside the Grafana instance.
|
||||
|
||||
## Add a data source
|
||||
|
||||
This guide uses the InfluxDB data source. The required arguments for [grafana_data_source (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source) vary depending on the type of data source you select.
|
||||
|
||||
1. Create a file named `datasource.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
resource "grafana_data_source" "<data-source-name>" {
|
||||
provider = grafana.my_stack
|
||||
|
||||
type = "influxdb"
|
||||
name = "<data-source-name>"
|
||||
url = "<data-source-url>"
|
||||
username = "<username>"
|
||||
password = "<password>"
|
||||
database_name = "<db-name>"
|
||||
}
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<data-source-name>` with the name of the data source to be added in Grafana.
|
||||
- `<data-source-url>` with URL of your data source.
|
||||
- `<username>` with the username for authenticating with your data source.
|
||||
- `<password>` with password for authenticating with your data source.
|
||||
- `<db-name>` with name of your database.
|
||||
|
||||
## Add a folder
|
||||
|
||||
This Terraform configuration creates a folder in your Grafana instance using [grafana_folder (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/folder).
|
||||
|
||||
1. Create a file named `folder.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
resource "grafana_folder" "<folder-name>" {
|
||||
provider = grafana.my_stack
|
||||
|
||||
title = "<folder-name>"
|
||||
}
|
||||
```
|
||||
|
||||
1. Replace the following field value:
|
||||
- `<folder-name>` with a name for the folder.
|
||||
|
||||
## Add a dashboard to the folder
|
||||
|
||||
This Terraform configuration creates a dashboard inside the folder created above in your Grafana instance using [grafana_dashboard (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/dashboard).
|
||||
|
||||
1. Create a file named `dashboard.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
# Using a JSON file
|
||||
resource "grafana_dashboard" "dashboard" {
|
||||
provider = grafana.my_stack
|
||||
|
||||
config_json = file("<file-name>.json")
|
||||
folder = grafana_folder.<folder-name>.id
|
||||
}
|
||||
```
|
||||
|
||||
1. Replace the following field value:
|
||||
- `<file-name>` with the name of the JSON file that has the source code for the dashboard.
|
||||
|
||||
The dashboard is represented by its JSON source code and referenced in the `config_json` parameter.
|
||||
|
||||
## Apply the Terraform configuration
|
||||
|
||||
In a terminal, run the following commands from the directory where all of the configuration files are located.
|
||||
|
||||
1. Initialize a working directory containing Terraform configuration files.
|
||||
|
||||
```shell
|
||||
terraform init
|
||||
```
|
||||
|
||||
1. Preview the changes that Terraform will make.
|
||||
|
||||
```shell
|
||||
terraform plan
|
||||
```
|
||||
|
||||
1. Apply the configuration files.
|
||||
|
||||
```shell
|
||||
terraform apply
|
||||
```
|
||||
|
||||
## Validation
|
||||
|
||||
Once you apply the changes in the Terraform configurations, you should be able to verify the following:
|
||||
|
||||
- The new Grafana stack is created and visible in the Cloud Portal
|
||||
|
||||

|
||||
|
||||
- A service account key token is added in your Grafana instance. In the following image, the service account key token named "terraform serviceaccount key" was added by the [grafana_cloud_stack_service_account_token (Resource)](#create-a-cloud-stack).
|
||||
|
||||

|
||||
|
||||
- A new data source (InfluxDB in this example) is visible in the grafana instance.
|
||||
|
||||

|
||||
|
||||
- A new folder in Grafana. In the following image, a folder named "Demos" was added by the [grafana_folder (Resource)](./#add-a-folder).
|
||||
|
||||

|
||||
|
||||
- A new dashboard in the Grafana instance. In the following image a dashboard named "InfluxDB Cloud Demos" was created inside the "Demos" folder.
|
||||
|
||||

|
||||
|
||||
## Conclusion
|
||||
|
||||
In this guide, you created a Grafana Cloud stack along with a data source, folder, and dashboard imported from a JSON file using Terraform.
|
||||
|
||||
To learn more about managing Grafana Cloud using Terraform, see [Grafana provider's documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs).
|
||||
@@ -1,384 +0,0 @@
|
||||
---
|
||||
description: Learn how to create Grafana Fleet Management collectors and pipelines in Grafana Cloud using Terraform
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Terraform
|
||||
- Fleet Management
|
||||
- Alloy
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
title: Manage Fleet Management in Grafana Cloud using Terraform
|
||||
weight: 200
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/terraform-fleet-management/
|
||||
---
|
||||
|
||||
# Manage Fleet Management in Grafana Cloud using Terraform
|
||||
|
||||
Learn how to create [Grafana Fleet Management](https://grafana.com/docs/grafana-cloud/send-data/fleet-management/) collectors and pipelines in Grafana Cloud using Terraform.
|
||||
This guide shows you how to create an access policy and a token for Fleet Management and [Grafana Alloy](https://grafana.com/docs/alloy/latest/), a collector with remote attributes, and a pipeline for profiling Alloy.
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before you begin, you should have the following available:
|
||||
|
||||
- A Grafana Cloud account, as shown in [Get started](https://grafana.com/docs/grafana-cloud/get-started/)
|
||||
- [Terraform](https://www.terraform.io/downloads) installed on your machine
|
||||
- [Alloy](https://grafana.com/docs/alloy/latest/set-up/install/) installed on your machine
|
||||
- Administrator permissions in your Grafana instance
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
All of the following Terraform configuration files should be saved in the same directory.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Configure a provider for Grafana Cloud
|
||||
|
||||
This Terraform configuration configures the [Grafana provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs) to provide necessary authentication when interacting with the Cloud API.
|
||||
The [`grafana_cloud_stack` (Data Source)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/data-sources/cloud_stack) is used to retrieve the user ID and URL details of your instance.
|
||||
|
||||
1. Create a Grafana Cloud access policy and token.
|
||||
To create a new one, refer to [Grafana Cloud Access Policies](https://grafana.com/docs/grafana-cloud/security-and-account-management/authentication-and-permissions/access-policies/).
|
||||
Add your stack to the realms list.
|
||||
The scopes needed for the examples in this guide are:
|
||||
- `accesspolicies:read`
|
||||
- `accesspolicies:write`
|
||||
- `accesspolicies:delete`
|
||||
- `stacks:read`
|
||||
|
||||
1. Create a file named `cloud-provider.tf` and add the following code block:
|
||||
|
||||
```terraform
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
version = ">= 3.19.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "grafana" {
|
||||
alias = "cloud"
|
||||
|
||||
cloud_access_policy_token = "<CLOUD_ACCESS_POLICY_TOKEN>"
|
||||
}
|
||||
|
||||
data "grafana_cloud_stack" "stack" {
|
||||
provider = grafana.cloud
|
||||
|
||||
slug = "<STACK_SLUG>"
|
||||
}
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<CLOUD_ACCESS_POLICY_TOKEN>` with the access policy token you created in the first step
|
||||
- `<STACK_SLUG>` with your stack slug, which is the subdomain where your Grafana Cloud instance is available: `https://<STACK_SLUG>.grafana.net`
|
||||
|
||||
## Create an access policy and token for Fleet Management
|
||||
|
||||
This Terraform configuration creates the following:
|
||||
|
||||
- An access policy named `fleet-management-policy` with `fleet-management:read` and `fleet-management:write` scopes, using [`grafana_cloud_access_policy` (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/cloud_access_policy)
|
||||
- A token named `fleet-management-token`, using [`grafana_cloud_access_policy_token` (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/cloud_access_policy_token)
|
||||
|
||||
1. Create a file named `fm-access-policy.tf` and add the following code block:
|
||||
|
||||
```terraform
|
||||
resource "grafana_cloud_access_policy" "fm_policy" {
|
||||
provider = grafana.cloud
|
||||
|
||||
name = "fleet-management-policy"
|
||||
region = data.grafana_cloud_stack.stack.region_slug
|
||||
|
||||
scopes = [
|
||||
"fleet-management:read",
|
||||
"fleet-management:write"
|
||||
]
|
||||
|
||||
realm {
|
||||
type = "stack"
|
||||
identifier = data.grafana_cloud_stack.stack.id
|
||||
}
|
||||
}
|
||||
|
||||
resource "grafana_cloud_access_policy_token" "fm_token" {
|
||||
provider = grafana.cloud
|
||||
|
||||
name = "fleet-management-token"
|
||||
region = grafana_cloud_access_policy.fm_policy.region
|
||||
access_policy_id = grafana_cloud_access_policy.fm_policy.policy_id
|
||||
}
|
||||
```
|
||||
|
||||
## Configure a provider for Fleet Management
|
||||
|
||||
This Terraform configuration configures the [Grafana provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs) to provide necessary authentication when interacting with the Fleet Management API.
|
||||
|
||||
1. Create a file named `fm-provider.tf` and add the following code block:
|
||||
|
||||
```terraform
|
||||
locals {
|
||||
fm_id = data.grafana_cloud_stack.stack.fleet_management_user_id
|
||||
fm_token = grafana_cloud_access_policy_token.fm_token.token
|
||||
fm_url = data.grafana_cloud_stack.stack.fleet_management_url
|
||||
}
|
||||
|
||||
provider "grafana" {
|
||||
alias = "fm"
|
||||
|
||||
fleet_management_auth = "${local.fm_id}:${local.fm_token}"
|
||||
fleet_management_url = local.fm_url
|
||||
}
|
||||
```
|
||||
|
||||
## Create a Fleet Management collector
|
||||
|
||||
This Terraform configuration creates a collector with a remote attribute, using [`grafana_fleet_management_collector` (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/fleet_management_collector).
|
||||
|
||||
This configuration only preregisters the collector.
|
||||
You must complete the [Run Alloy](#run-alloy) step for the collector to register with Fleet Management and be assigned remote attributes.
|
||||
|
||||
1. Create a file named `fm-collector.tf` and add the following code block:
|
||||
|
||||
```terraform
|
||||
resource "grafana_fleet_management_collector" "fm_collector" {
|
||||
provider = grafana.fm
|
||||
|
||||
id = "prod_collector"
|
||||
remote_attributes = {
|
||||
"env" = "PROD"
|
||||
}
|
||||
enabled = true
|
||||
}
|
||||
```
|
||||
|
||||
## Create a Fleet Management pipeline
|
||||
|
||||
This Terraform configuration creates a pipeline for Alloy profiling with a matcher for the collector declared in the previous step, using [`grafana_fleet_management_pipeline` (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/fleet_management_pipeline).
|
||||
The pipeline writes the profiles to [Grafana Cloud Profiles](https://grafana.com/docs/grafana-cloud/monitor-applications/profiles/).
|
||||
|
||||
1. Create a file named `profiling.alloy.tftpl` and add the following content:
|
||||
|
||||
```alloy
|
||||
// This pipeline scrapes pprof Go profiles from Alloy and sends them to Pyroscope.
|
||||
//
|
||||
// It requires the following environment variables to be set where Alloy is running:
|
||||
// Required:
|
||||
// * GCLOUD_RW_API_KEY: The Grafana Cloud API key with write access to Pyroscope.
|
||||
// Optional:
|
||||
// * ALLOY_ADDRESS: The address Alloy listens on. Defaults to 127.0.0.1:12345.
|
||||
pyroscope.scrape "alloy" {
|
||||
targets = [
|
||||
{
|
||||
"__address__" = coalesce(
|
||||
sys.env("ALLOY_ADDRESS"),
|
||||
"127.0.0.1:12345",
|
||||
),
|
||||
"service_name" = "alloy",
|
||||
},
|
||||
]
|
||||
forward_to = [pyroscope.write.grafana_cloud.receiver]
|
||||
|
||||
profiling_config {
|
||||
profile.process_cpu {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
profile.memory {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
profile.mutex {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
profile.block {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
profile.goroutine {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pyroscope.write "grafana_cloud" {
|
||||
endpoint {
|
||||
url = "${profiles_url}"
|
||||
|
||||
basic_auth {
|
||||
username = "${profiles_id}"
|
||||
password = sys.env("GCLOUD_RW_API_KEY")
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
1. Create a file named `fm-pipeline.tf` and add the following code block:
|
||||
|
||||
```terraform
|
||||
locals {
|
||||
profiles_id = data.grafana_cloud_stack.stack.profiles_user_id
|
||||
profiles_url = data.grafana_cloud_stack.stack.profiles_url
|
||||
}
|
||||
|
||||
resource "grafana_fleet_management_pipeline" "pipeline" {
|
||||
provider = grafana.fm
|
||||
|
||||
name = "profiling"
|
||||
contents = templatefile(
|
||||
"profiling.alloy.tftpl",
|
||||
{
|
||||
profiles_id = local.profiles_id,
|
||||
profiles_url = local.profiles_url,
|
||||
},
|
||||
)
|
||||
matchers = [
|
||||
"env=\"PROD\""
|
||||
]
|
||||
enabled = true
|
||||
}
|
||||
```
|
||||
|
||||
## Create an access policy and token for Alloy
|
||||
|
||||
This Terraform configuration creates the following:
|
||||
|
||||
- An access policy named `alloy-policy` with `set:alloy-data-write` scope, using [`grafana_cloud_access_policy` (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/cloud_access_policy)
|
||||
- A token named `alloy-token`, using [`grafana_cloud_access_policy_token` (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/cloud_access_policy_token)
|
||||
|
||||
1. Create a file named `alloy-access-policy.tf` and add the following code block:
|
||||
|
||||
```terraform
|
||||
resource "grafana_cloud_access_policy" "alloy_policy" {
|
||||
provider = grafana.cloud
|
||||
|
||||
name = "alloy-policy"
|
||||
region = data.grafana_cloud_stack.stack.region_slug
|
||||
|
||||
scopes = [
|
||||
"set:alloy-data-write"
|
||||
]
|
||||
|
||||
realm {
|
||||
type = "stack"
|
||||
identifier = data.grafana_cloud_stack.stack.id
|
||||
}
|
||||
}
|
||||
|
||||
resource "grafana_cloud_access_policy_token" "alloy_token" {
|
||||
provider = grafana.cloud
|
||||
|
||||
name = "alloy-token"
|
||||
region = grafana_cloud_access_policy.alloy_policy.region
|
||||
access_policy_id = grafana_cloud_access_policy.alloy_policy.policy_id
|
||||
}
|
||||
|
||||
output "alloy_token" {
|
||||
value = grafana_cloud_access_policy_token.alloy_token.token
|
||||
sensitive = true
|
||||
}
|
||||
```
|
||||
|
||||
## Create a configuration file for Alloy
|
||||
|
||||
This Terraform configuration creates an Alloy configuration file with the [`remotecfg` block](https://grafana.com/docs/grafana-cloud/send-data/alloy/reference/config-blocks/remotecfg/) for Fleet Management, using [`local_file` (Resource)](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file).
|
||||
|
||||
1. Create a file named `config.alloy.tftpl` and add the following content:
|
||||
|
||||
```alloy
|
||||
remotecfg {
|
||||
id = "${collector_id}"
|
||||
url = "${fm_url}"
|
||||
poll_frequency = "60s"
|
||||
|
||||
basic_auth {
|
||||
username = "${fm_id}"
|
||||
password = sys.env("GCLOUD_RW_API_KEY")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
1. Create a file named `alloy-config.tf` and add the following code block:
|
||||
|
||||
```terraform
|
||||
resource "local_file" "alloy_config" {
|
||||
filename = "<ALLOY_CONFIG_PATH>"
|
||||
content = templatefile(
|
||||
"config.alloy.tftpl",
|
||||
{
|
||||
collector_id = "prod_collector",
|
||||
fm_id = local.fm_id,
|
||||
fm_url = local.fm_url,
|
||||
},
|
||||
)
|
||||
directory_permission = "0644"
|
||||
file_permission = "0644"
|
||||
}
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<ALLOY_CONFIG_PATH>` with the path the Alloy configuration file should be written to, for example `config.alloy`
|
||||
|
||||
## Apply the Terraform configuration
|
||||
|
||||
In a terminal, run the following commands from the directory where all of the configuration files are located.
|
||||
|
||||
1. Initialize a working directory containing Terraform configuration files:
|
||||
|
||||
```shell
|
||||
terraform init
|
||||
```
|
||||
|
||||
1. Preview the Terraform changes:
|
||||
|
||||
```shell
|
||||
terraform plan
|
||||
```
|
||||
|
||||
1. Apply the configuration:
|
||||
|
||||
```shell
|
||||
terraform apply
|
||||
```
|
||||
|
||||
## Run Alloy
|
||||
|
||||
To learn how to start or restart Alloy, refer to [Run Grafana Alloy](https://grafana.com/docs/alloy/latest/set-up/run/).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The variable `GCLOUD_RW_API_KEY` must be set in the environment where Alloy is running for the remote configuration in this example to work.
|
||||
{{< /admonition >}}
|
||||
|
||||
1. Run the following command to view the Alloy token:
|
||||
|
||||
```shell
|
||||
terraform output -raw alloy_token
|
||||
```
|
||||
|
||||
1. Set the environment variable `GCLOUD_RW_API_KEY` to the value from the first step.
|
||||
1. Run Alloy.
|
||||
|
||||
## Validation
|
||||
|
||||
After you apply the changes in the Terraform configurations and run Alloy, you should be able to verify the following:
|
||||
|
||||
- A collector is added to the Fleet Management **Inventory tab**:
|
||||
|
||||
{{< figure alt="The Inventory screen in the Fleet Management interface in Grafana Cloud which shows that a new collector called `prod_collector` is registered with attribute `env=PROD`, has a healthy status, and was last modified a few seconds ago." src="/media/docs/fleet-management/screenshot-fleet-management-terraform-validate-collector.png" >}}
|
||||
|
||||
- A pipeline is added to the Fleet Management **Remote configuration tab**:
|
||||
|
||||
{{< figure alt="The Remote configuration screen in the Fleet Management interface in Grafana Cloud which shows that a new configuration pipeline called `profiling` is active and was last modified a few seconds ago." src="/media/docs/fleet-management/screenshot-fleet-management-terraform-validate-pipeline.png" >}}
|
||||
|
||||
- Alloy profiles are being exported to Grafana Cloud Profiles:
|
||||
|
||||
{{< figure alt="A dashboard in Grafana Cloud which shows Alloy profiling data, including graphs for CPU and memory." src="/media/docs/fleet-management/screenshot-fleet-management-terraform-validate-profiles.png" >}}
|
||||
|
||||
## Conclusion
|
||||
|
||||
In this guide, you created an access policy and a token for Fleet Management and Alloy, a collector with remote attributes, and a pipeline for profiling Alloy, all using Terraform.
|
||||
|
||||
To learn more about managing Grafana Cloud using Terraform, refer to [Grafana provider's documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs).
|
||||
-105
@@ -1,105 +0,0 @@
|
||||
---
|
||||
description: Learn how to manage Grafana Frontend Observability resources in Grafana Cloud using Terraform
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Terraform
|
||||
- Frontend Observability
|
||||
title: Manage Frontend Observability in Grafana Cloud with Terraform
|
||||
weight: 200
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/terraform-frontend-observability/
|
||||
---
|
||||
|
||||
# Manage Frontend Observability in Grafana Cloud with Terraform
|
||||
|
||||
Learn how to use Terraform to manage [Grafana Frontend Observability](https://grafana.com/docs/grafana-cloud/frontend-observability/) resources, such as your applications.
|
||||
This guide shows you how to create an access policy and a token for Frontend Observability so that you can connect to the Frontend Observability API.
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before you begin, you should have the following available:
|
||||
|
||||
- A Grafana Cloud account, as shown in [Get started](https://grafana.com/docs/grafana-cloud/get-started/)
|
||||
- [Terraform](https://www.terraform.io/downloads) installed on your machine
|
||||
- Administrator permissions in your Grafana instance
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
All of the following Terraform configuration files should be saved in the same directory.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Configure a provider for Grafana Cloud
|
||||
|
||||
This Terraform configuration configures the [Grafana provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs) to provide necessary authentication when interacting with the Cloud API.
|
||||
The [`grafana_cloud_stack` (Data Source)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/data-sources/cloud_stack) is used to retrieve the details of your instance.
|
||||
|
||||
1. Create a Grafana Cloud access policy and token.
|
||||
To create a new one, refer to [Grafana Cloud Access Policies](https://grafana.com/docs/grafana-cloud/security-and-account-management/authentication-and-permissions/access-policies/).
|
||||
Add your stack to the realms list.
|
||||
The scopes needed for the examples in this guide are:
|
||||
- `accesspolicies:read`
|
||||
- `accesspolicies:write`
|
||||
- `accesspolicies:delete`
|
||||
- `dashboards:read`
|
||||
- `dashboards:write`
|
||||
- `dashboards:delete`
|
||||
- `orgs:read`
|
||||
- `orgs:write`
|
||||
- `stacks:read`
|
||||
- `stacks:write`
|
||||
- `stacks:delete`
|
||||
- `stack-dashboards:read`
|
||||
- `stack-dashboards:write`
|
||||
- `stack-dashboards:delete`
|
||||
- `stack-service-accounts:write`
|
||||
|
||||
1. Create a file named `cloud-provider.tf` and add the following code block:
|
||||
|
||||
```terraform
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "grafana" {
|
||||
alias = "cloud"
|
||||
|
||||
cloud_access_policy_token = "<CLOUD_ACCESS_POLICY_TOKEN>"
|
||||
}
|
||||
|
||||
data "grafana_cloud_stack" "stack" {
|
||||
provider = grafana.cloud
|
||||
|
||||
slug = "<STACK_SLUG>"
|
||||
}
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<CLOUD_ACCESS_POLICY_TOKEN>` with the access policy token you created in the first step
|
||||
- `<STACK_SLUG>` with your stack slug, which is the subdomain where your Grafana Cloud instance is available: `https://<STACK_SLUG>.grafana.net`
|
||||
|
||||
## Create an access policy and token for Frontend Observability
|
||||
|
||||
You must create a Terraform configuration with the following:
|
||||
|
||||
- An access policy with `frontend-observability:read`, `frontend-observability:write`, and `frontend-observability:delete` scopes, using [`grafana_cloud_access_policy` (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/cloud_access_policy)
|
||||
- A token named `frontend_o11y_api_access_token`, using [`grafana_cloud_access_policy_token` (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/cloud_access_policy_token)
|
||||
|
||||
## Configure the provider to use the Frontend Observability API
|
||||
|
||||
After you have created the token, you can configure the provider as follows:
|
||||
|
||||
```terraform
|
||||
provider "grafana" {
|
||||
frontend_o11y_api_access_token = "<access token from previous step>"
|
||||
}
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
In this guide, you created an access policy and a token for Frontend Observability using Terraform.
|
||||
|
||||
To learn more about managing Grafana Cloud using Terraform, refer to [Grafana provider's documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs).
|
||||
-73
@@ -1,73 +0,0 @@
|
||||
---
|
||||
cards:
|
||||
items:
|
||||
- description: Learn how to set up Terraform provider and configure your environment for managing Knowledge Graph resources.
|
||||
height: 24
|
||||
href: ./getting-started/
|
||||
title: Get started with Terraform
|
||||
- description: Configure notification alerts to manage how alerts are processed and routed in your Knowledge Graph.
|
||||
height: 24
|
||||
href: ./notification-alerts/
|
||||
title: Notification alerts
|
||||
- description: Define suppression rules to temporarily disable specific alerts during maintenance windows or testing.
|
||||
height: 24
|
||||
href: ./suppressed-assertions/
|
||||
title: Suppressed assertions
|
||||
- description: Create custom entity models and define how entities are discovered based on Prometheus queries.
|
||||
height: 24
|
||||
href: ./custom-model-rules/
|
||||
title: Custom model rules
|
||||
- description: Configure log data correlation with entities using data source mappings and filtering options.
|
||||
height: 24
|
||||
href: ./log-configurations/
|
||||
title: Log configurations
|
||||
- description: Set custom thresholds for request, resource, and health assertions to monitor your services.
|
||||
height: 24
|
||||
href: ./thresholds/
|
||||
title: Thresholds
|
||||
- description: Configure knowledge graph SLOs with entity-centric monitoring and RCA workbench integration for root cause analysis.
|
||||
height: 24
|
||||
href: ./knowledge-graph-slo/
|
||||
title: Knowledge graph SLOs
|
||||
title_class: pt-0 lh-1
|
||||
description: Manage Grafana Cloud Knowledge Graph using Terraform
|
||||
hero:
|
||||
description: Use Terraform to manage Grafana Cloud Knowledge Graph resources as code. Configure notification alerts, suppressed assertions, custom model rules, log configurations, and threshold configurations using infrastructure as code best practices.
|
||||
level: 1
|
||||
title: Manage Knowledge Graph using Terraform
|
||||
menuTitle: Manage Knowledge Graph in Grafana Cloud using Terraform
|
||||
title: Manage Knowledge Graph in Grafana Cloud using Terraform
|
||||
weight: 130
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Terraform
|
||||
- Knowledge Graph
|
||||
- Alert Configuration
|
||||
- Suppressed Assertions
|
||||
- Custom Model Rules
|
||||
- Log Configuration
|
||||
- Threshold Configuration
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/terraform-knowledge-graph/
|
||||
---
|
||||
|
||||
{{< docs/hero-simple key="hero" >}}
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Terraform enables you to manage [Grafana Cloud Knowledge Graph](/docs/grafana-cloud/knowledge-graph/) resources using infrastructure as code. With Terraform, you can define, version control, and deploy Knowledge Graph configurations including alert rules, suppression policies, entity models, log correlations, and thresholds.
|
||||
|
||||
## Explore
|
||||
|
||||
{{< card-grid key="cards" type="simple" >}}
|
||||
|
||||
---
|
||||
|
||||
## Related resources
|
||||
|
||||
- [Grafana Terraform Provider Documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs)
|
||||
- [Knowledge Graph Documentation](/docs/grafana-cloud/knowledge-graph/)
|
||||
- [Terraform Best Practices](https://www.terraform.io/docs/cloud/guides/recommended-practices/index.html)
|
||||
-431
@@ -1,431 +0,0 @@
|
||||
---
|
||||
description: Define custom entity models for Knowledge Graph using Terraform
|
||||
menuTitle: Custom model rules
|
||||
title: Create custom model rules using Terraform
|
||||
weight: 400
|
||||
keywords:
|
||||
- Terraform
|
||||
- Knowledge Graph
|
||||
- Custom Model Rules
|
||||
- Entity Models
|
||||
- Prometheus
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/terraform-knowledge-graph/custom-model-rules/
|
||||
---
|
||||
|
||||
# Create custom model rules using Terraform
|
||||
|
||||
Custom model rules in [Knowledge Graph](/docs/grafana-cloud/knowledge-graph/) allow you to define how entities are discovered and modeled based on Prometheus queries. These rules enable you to create custom entity types, define their relationships, and specify how they should be enriched with additional data.
|
||||
|
||||
For information about managing entities and relations in the Knowledge Graph UI, refer to [Manage entities and relations](/docs/grafana-cloud/knowledge-graph/configure/manage-entities-relations/).
|
||||
|
||||
## Basic custom model rules
|
||||
|
||||
Create a file named `custom-model-rules.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
# Basic custom model rule for services
|
||||
resource "grafana_asserts_custom_model_rules" "basic_service" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "basic-service-model"
|
||||
|
||||
rules {
|
||||
entity {
|
||||
type = "Service"
|
||||
name = "service"
|
||||
|
||||
defined_by {
|
||||
query = "up{job!=''}"
|
||||
label_values = {
|
||||
service = "job"
|
||||
}
|
||||
literals = {
|
||||
_source = "up_query"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Advanced service model with scope and lookup
|
||||
|
||||
Define service entities with environment scoping and relationship mappings:
|
||||
|
||||
```terraform
|
||||
# Advanced service model with environment scoping
|
||||
resource "grafana_asserts_custom_model_rules" "advanced_service" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "advanced-service-model"
|
||||
|
||||
rules {
|
||||
entity {
|
||||
type = "Service"
|
||||
name = "workload | service | job"
|
||||
|
||||
scope = {
|
||||
namespace = "namespace"
|
||||
env = "asserts_env"
|
||||
site = "asserts_site"
|
||||
}
|
||||
|
||||
lookup = {
|
||||
workload = "workload | deployment | statefulset | daemonset | replicaset"
|
||||
service = "service"
|
||||
job = "job"
|
||||
proxy_job = "job"
|
||||
}
|
||||
|
||||
defined_by {
|
||||
query = "up{job!='', asserts_env!=''}"
|
||||
label_values = {
|
||||
service = "service"
|
||||
job = "job"
|
||||
workload = "workload"
|
||||
namespace = "namespace"
|
||||
}
|
||||
literals = {
|
||||
_source = "up_with_workload"
|
||||
}
|
||||
}
|
||||
|
||||
defined_by {
|
||||
query = "up{job='maintenance'}"
|
||||
disabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Multi-entity model configuration
|
||||
|
||||
Define multiple entity types in a single configuration:
|
||||
|
||||
```terraform
|
||||
# Multiple entity types in a single model
|
||||
resource "grafana_asserts_custom_model_rules" "multi_entity" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "kubernetes-entities"
|
||||
|
||||
rules {
|
||||
# Service entity
|
||||
entity {
|
||||
type = "Service"
|
||||
name = "service"
|
||||
|
||||
scope = {
|
||||
namespace = "namespace"
|
||||
cluster = "cluster"
|
||||
}
|
||||
|
||||
defined_by {
|
||||
query = "up{service!=''}"
|
||||
label_values = {
|
||||
service = "service"
|
||||
namespace = "namespace"
|
||||
cluster = "cluster"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Pod entity
|
||||
entity {
|
||||
type = "Pod"
|
||||
name = "Pod"
|
||||
|
||||
scope = {
|
||||
namespace = "namespace"
|
||||
cluster = "cluster"
|
||||
}
|
||||
|
||||
lookup = {
|
||||
service = "service"
|
||||
workload = "workload"
|
||||
}
|
||||
|
||||
defined_by {
|
||||
query = "kube_pod_info{pod!=''}"
|
||||
label_values = {
|
||||
Pod = "pod"
|
||||
namespace = "namespace"
|
||||
cluster = "cluster"
|
||||
service = "service"
|
||||
}
|
||||
literals = {
|
||||
_entity_type = "Pod"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Namespace entity
|
||||
entity {
|
||||
type = "Namespace"
|
||||
name = "namespace"
|
||||
|
||||
scope = {
|
||||
cluster = "cluster"
|
||||
}
|
||||
|
||||
defined_by {
|
||||
query = "kube_namespace_status_phase{namespace!=''}"
|
||||
label_values = {
|
||||
namespace = "namespace"
|
||||
cluster = "cluster"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Complex entity with enrichment
|
||||
|
||||
Create service entities with multiple data sources and enrichment:
|
||||
|
||||
```terraform
|
||||
# Service entity with enrichment from multiple sources
|
||||
resource "grafana_asserts_custom_model_rules" "enriched_service" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "enriched-service-model"
|
||||
|
||||
rules {
|
||||
entity {
|
||||
type = "Service"
|
||||
name = "service"
|
||||
|
||||
enriched_by = [
|
||||
"prometheus_metrics",
|
||||
"kubernetes_metadata",
|
||||
"application_logs"
|
||||
]
|
||||
|
||||
scope = {
|
||||
environment = "asserts_env"
|
||||
region = "asserts_site"
|
||||
team = "team"
|
||||
}
|
||||
|
||||
lookup = {
|
||||
deployment = "workload"
|
||||
Pod = "pod"
|
||||
container = "container"
|
||||
}
|
||||
|
||||
# Primary definition from service up metrics
|
||||
defined_by {
|
||||
query = "up{service!='', asserts_env!=''}"
|
||||
label_values = {
|
||||
service = "service"
|
||||
environment = "asserts_env"
|
||||
region = "asserts_site"
|
||||
team = "team"
|
||||
}
|
||||
literals = {
|
||||
_primary_source = "service_up"
|
||||
}
|
||||
}
|
||||
|
||||
# Secondary definition from application metrics
|
||||
defined_by {
|
||||
query = "http_requests_total{service!=''}"
|
||||
label_values = {
|
||||
service = "service"
|
||||
environment = "environment"
|
||||
version = "version"
|
||||
}
|
||||
literals = {
|
||||
_secondary_source = "http_metrics"
|
||||
}
|
||||
}
|
||||
|
||||
# Disabled definition for testing
|
||||
defined_by {
|
||||
query = "test_metric{service!=''}"
|
||||
disabled = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Database and infrastructure entities
|
||||
|
||||
Define database and infrastructure entity models:
|
||||
|
||||
```terraform
|
||||
# Database and infrastructure entity models
|
||||
resource "grafana_asserts_custom_model_rules" "infrastructure" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "infrastructure-entities"
|
||||
|
||||
rules {
|
||||
# Database entity
|
||||
entity {
|
||||
type = "Database"
|
||||
name = "database_instance"
|
||||
|
||||
scope = {
|
||||
environment = "env"
|
||||
region = "region"
|
||||
}
|
||||
|
||||
lookup = {
|
||||
host = "instance"
|
||||
port = "port"
|
||||
db_name = "database"
|
||||
}
|
||||
|
||||
defined_by {
|
||||
query = "mysql_up{instance!=''}"
|
||||
label_values = {
|
||||
database_instance = "instance"
|
||||
database = "database"
|
||||
env = "environment"
|
||||
region = "region"
|
||||
}
|
||||
literals = {
|
||||
_db_type = "mysql"
|
||||
}
|
||||
metric_value = "1"
|
||||
}
|
||||
|
||||
defined_by {
|
||||
query = "postgres_up{instance!=''}"
|
||||
label_values = {
|
||||
database_instance = "instance"
|
||||
database = "datname"
|
||||
env = "environment"
|
||||
}
|
||||
literals = {
|
||||
_db_type = "postgresql"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Load balancer entity
|
||||
entity {
|
||||
type = "LoadBalancer"
|
||||
name = "lb_instance"
|
||||
|
||||
scope = {
|
||||
environment = "env"
|
||||
}
|
||||
|
||||
defined_by {
|
||||
query = "haproxy_up{proxy!=''}"
|
||||
label_values = {
|
||||
lb_instance = "instance"
|
||||
proxy = "proxy"
|
||||
env = "environment"
|
||||
}
|
||||
literals = {
|
||||
_lb_type = "haproxy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Resource reference
|
||||
|
||||
### `grafana_asserts_custom_model_rules`
|
||||
|
||||
Manage Knowledge Graph custom model rules through the Grafana API. This resource allows you to define custom entity models based on Prometheus queries with advanced mapping and enrichment capabilities.
|
||||
|
||||
#### Arguments
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ------- | -------------- | -------- | -------------------------------------------------------------------------------------------------------- |
|
||||
| `name` | `string` | Yes | The name of the custom model rules. This field is immutable and forces recreation if changed. |
|
||||
| `rules` | `list(object)` | Yes | The rules configuration containing entity definitions. Refer to [rules block](#rules-block) for details. |
|
||||
|
||||
#### Rules block
|
||||
|
||||
Each `rules` block supports the following:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| -------- | -------------- | -------- | ------------------------------------------------------------------------------- |
|
||||
| `entity` | `list(object)` | Yes | List of entity definitions. Refer to [entity block](#entity-block) for details. |
|
||||
|
||||
#### Entity block
|
||||
|
||||
Each `entity` block supports the following:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ------------- | -------------- | -------- | ------------------------------------------------------------------------------------------------------ |
|
||||
| `type` | `string` | Yes | The type of the entity (for example, Service, Pod, Namespace). |
|
||||
| `name` | `string` | Yes | The name pattern for the entity. Can include pipe-separated alternatives. |
|
||||
| `defined_by` | `list(object)` | Yes | List of queries that define this entity. Refer to [`defined_by` block](#defined_by-block) for details. |
|
||||
| `disabled` | `bool` | No | Whether this entity is disabled. Defaults to `false`. |
|
||||
| `enriched_by` | `list(string)` | No | List of enrichment sources for the entity. |
|
||||
| `lookup` | `map(string)` | No | Lookup mappings for the entity to relate different label names. |
|
||||
| `scope` | `map(string)` | No | Scope labels that define the boundaries of this entity type. |
|
||||
|
||||
#### `defined_by` block
|
||||
|
||||
Each `defined_by` block supports the following:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| -------------- | ------------- | -------- | ------------------------------------------------------------------------- |
|
||||
| `query` | `string` | Yes | The Prometheus query that defines this entity. |
|
||||
| `disabled` | `bool` | No | Whether this query is disabled. Defaults to `false`. |
|
||||
| `label_values` | `map(string)` | No | Label value mappings for extracting entity attributes from query results. |
|
||||
| `literals` | `map(string)` | No | Literal value mappings for adding static attributes to entities. |
|
||||
| `metric_value` | `string` | No | Metric value to use from the query result. |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
When `disabled = true` is set for a `defined_by` query, only the `query` field is used for matching. All other fields in the block are ignored.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Best practices
|
||||
|
||||
### Entity models
|
||||
|
||||
- Design your entity models to reflect your actual infrastructure and application architecture
|
||||
- Use descriptive names for custom model rules that indicate their purpose and scope
|
||||
- Start with basic entity definitions and gradually add complexity as needed
|
||||
- Define clear entity scopes using the `scope` parameter to organize entities by environment, region, or team
|
||||
|
||||
### Query design and performance
|
||||
|
||||
- Write efficient Prometheus queries that don't overload your monitoring system
|
||||
- Test your Prometheus queries independently before using them in model rules
|
||||
- Use specific label filters to reduce the scope of your queries where possible
|
||||
- Consider the cardinality implications of your entity definitions
|
||||
- Use the `disabled` flag to temporarily disable problematic queries during debugging
|
||||
|
||||
### Relationships and enrichment
|
||||
|
||||
- Use `lookup` mappings to establish relationships between different entity types
|
||||
- Leverage `enriched_by` to specify additional data sources for entity enrichment
|
||||
- Map Prometheus labels to entity attributes using clear and descriptive names
|
||||
- Use meaningful `literals` to add static metadata that helps with entity identification
|
||||
|
||||
### Label and attribute management
|
||||
|
||||
- Establish consistent labeling conventions across your infrastructure
|
||||
- Use `label_values` to extract dynamic attributes from your metrics
|
||||
- Document the meaning and expected values of custom literals
|
||||
- Ensure label names match across different entity definitions for proper relationship discovery
|
||||
|
||||
## Validation
|
||||
|
||||
After applying the Terraform configuration, verify that:
|
||||
|
||||
- Custom model rules are applied in your Knowledge Graph instance
|
||||
- Entities are being discovered according to your defined queries
|
||||
- Entity relationships and enrichment are working as expected
|
||||
- Entity graphs display the correct entity types and connections
|
||||
- Queries perform well without causing excessive load
|
||||
|
||||
## Related documentation
|
||||
|
||||
- [Manage entities and relations in Knowledge Graph](/docs/grafana-cloud/knowledge-graph/configure/manage-entities-relations/)
|
||||
- [Get started with Terraform for Knowledge Graph](../getting-started/)
|
||||
- [Knowledge graph basics](/docs/grafana-cloud/knowledge-graph/knowledge-graph-basics/)
|
||||
-140
@@ -1,140 +0,0 @@
|
||||
---
|
||||
description: Learn how to configure Terraform to manage Knowledge Graph resources
|
||||
menuTitle: Get started
|
||||
title: Get started with Terraform for Knowledge Graph
|
||||
weight: 100
|
||||
keywords:
|
||||
- Terraform
|
||||
- Knowledge Graph
|
||||
- Provider Setup
|
||||
- Getting Started
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/terraform-knowledge-graph/getting-started/
|
||||
---
|
||||
|
||||
# Get started with Terraform for Knowledge Graph
|
||||
|
||||
Learn how to configure Terraform to manage [Grafana Cloud Knowledge Graph](/docs/grafana-cloud/knowledge-graph/) resources. This guide walks you through setting up the Grafana Terraform provider and preparing your environment.
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before you begin, ensure you have the following:
|
||||
|
||||
- A Grafana Cloud account, as shown in [Get started](/docs/grafana-cloud/get-started/)
|
||||
- [Terraform](https://www.terraform.io/downloads) installed on your machine
|
||||
- Administrator permissions in your Grafana instance
|
||||
- [Knowledge Graph enabled](/docs/grafana-cloud/knowledge-graph/get-started/) in your Grafana Cloud stack
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
All Terraform configuration files should be saved in the same directory.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Configure the Grafana provider
|
||||
|
||||
This Terraform configuration sets up the [Grafana provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs) to provide necessary authentication when managing knowledge graph resources.
|
||||
|
||||
You can reuse a similar setup to the one described in [Creating and managing a Grafana Cloud stack using Terraform](/docs/grafana-cloud/as-code/infrastructure-as-code/terraform/terraform-cloud-stack/) to set up a service account and a token.
|
||||
|
||||
### Steps
|
||||
|
||||
1. Create a Service account and token in Grafana.
|
||||
|
||||
To create a new one, refer to [Service account tokens](/docs/grafana/latest/administration/service-accounts/#service-account-tokens).
|
||||
|
||||
1. Create a file named `main.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
version = ">= 2.9.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "grafana" {
|
||||
alias = "asserts"
|
||||
|
||||
url = "<Stack-URL>"
|
||||
auth = "<Service-account-token>"
|
||||
stack_id = "<Stack-ID>"
|
||||
}
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<Stack-URL>` with the URL of your Grafana stack (for example, `https://my-stack.grafana.net/`)
|
||||
- `<Service-account-token>` with the service account token that you created
|
||||
- `<Stack-ID>` with your Grafana Cloud stack ID
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The `stack_id` parameter is required for Knowledge Graph resources to identify the stack where the resources belong.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Apply Terraform configurations
|
||||
|
||||
After creating your Terraform configuration files, apply them using the following commands:
|
||||
|
||||
1. Initialize a working directory containing Terraform configuration files:
|
||||
|
||||
```shell
|
||||
terraform init
|
||||
```
|
||||
|
||||
1. Preview the changes that Terraform makes:
|
||||
|
||||
```shell
|
||||
terraform plan
|
||||
```
|
||||
|
||||
1. Apply the configuration files:
|
||||
|
||||
```shell
|
||||
terraform apply
|
||||
```
|
||||
|
||||
## Verify your setup
|
||||
|
||||
After applying the configuration, verify your setup by checking that:
|
||||
|
||||
- Terraform can authenticate with your Grafana Cloud stack
|
||||
- The provider is properly configured with the correct stack ID
|
||||
- No errors appear in the Terraform output
|
||||
|
||||
## Best practices
|
||||
|
||||
When managing Knowledge Graph resources with Terraform, consider the following best practices:
|
||||
|
||||
### Name conventions
|
||||
|
||||
- Use descriptive names that clearly indicate the purpose of each resource
|
||||
- Follow a consistent naming pattern across your organization
|
||||
- Include environment or team identifiers in names when appropriate
|
||||
|
||||
### Version control
|
||||
|
||||
- Store your Terraform configurations in version control (Git)
|
||||
- Use separate directories or workspaces for different environments
|
||||
- Document changes in commit messages
|
||||
|
||||
### State management
|
||||
|
||||
- Use remote state backends for team collaboration
|
||||
- Enable state locking to prevent concurrent modifications
|
||||
- Regularly back up your Terraform state files
|
||||
|
||||
### Security
|
||||
|
||||
- Never commit service account tokens or sensitive data to version control
|
||||
- Use environment variables or secret management tools for credentials
|
||||
- Rotate service account tokens regularly
|
||||
|
||||
## Next steps
|
||||
|
||||
Now that you have configured the Terraform provider, you can start managing knowledge graph resources:
|
||||
|
||||
- [Configure notification alerts](../notification-alerts/)
|
||||
- [Define suppressed assertions](../suppressed-assertions/)
|
||||
- [Create custom model rules](../custom-model-rules/)
|
||||
- [Set up log configurations](../log-configurations/)
|
||||
- [Configure thresholds](../thresholds/)
|
||||
- [Configure knowledge graph SLOs](../knowledge-graph-slo/)
|
||||
-696
@@ -1,696 +0,0 @@
|
||||
---
|
||||
description: Learn how to configure knowledge graph SLOs in Grafana using Terraform for entity-centric monitoring and root cause analysis
|
||||
menuTitle: Knowledge graph SLOs
|
||||
title: Configure knowledge graph SLOs using Terraform
|
||||
weight: 650
|
||||
keywords:
|
||||
- Terraform
|
||||
- Knowledge graph
|
||||
- SLO
|
||||
- Service Level Objectives
|
||||
- RCA workbench
|
||||
---
|
||||
|
||||
# Configure knowledge graph SLOs using Terraform
|
||||
|
||||
Service level objectives (SLOs) in the [knowledge graph](/docs/grafana-cloud/knowledge-graph/) provide entity-centric service level monitoring with integrated root cause analysis capabilities. By using the `grafana_slo_provenance` label with the value `asserts`, you can create SLOs that display the "asserts" badge in the UI and enable the **Open RCA workbench** button for seamless troubleshooting.
|
||||
|
||||
For details about creating and managing SLOs in the knowledge graph UI, refer to [Create and manage the knowledge graph SLOs](/docs/grafana-cloud/knowledge-graph/configure/manage-slos/).
|
||||
|
||||
## Overview
|
||||
|
||||
Knowledge graph SLOs extend standard Grafana SLOs with entity-centric monitoring and root cause analysis features:
|
||||
|
||||
- **Entity-centric monitoring:** SLOs are tied to specific services, applications, or infrastructure entities tracked by the knowledge graph
|
||||
- **RCA workbench integration:** The **Open RCA workbench** button enables deep-linking to pre-filtered troubleshooting views
|
||||
- **Knowledge graph provenance badge:** SLOs display an "asserts" badge instead of "provisioned" in the UI
|
||||
- **Search expressions:** Define custom search expressions to filter entities in RCA workbench when troubleshooting an SLO breach
|
||||
|
||||
## Before you begin
|
||||
|
||||
To create a knowledge graph SLO using Terraform, you need to:
|
||||
|
||||
- Configure the knowledge graph and have metrics flowing into Grafana Cloud
|
||||
- [Set up Terraform for the knowledge Graph](../getting-started/)
|
||||
- Possess knowledge of and have experience with defining SLOs, SLIs, SLAs, and error budgets
|
||||
- Have an understanding of PromQL
|
||||
|
||||
## Create a basic knowledge graph SLO
|
||||
|
||||
Create a file named `kg-slo.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
# Basic knowledge graph SLO with entity-centric monitoring
|
||||
resource "grafana_slo" "kg_example" {
|
||||
name = "API Service Availability"
|
||||
description = "SLO managed by knowledge graph for entity-centric monitoring and RCA"
|
||||
|
||||
query {
|
||||
freeform {
|
||||
query = "sum(rate(http_requests_total{code!~\"5..\"}[$__rate_interval])) / sum(rate(http_requests_total[$__rate_interval]))"
|
||||
}
|
||||
type = "freeform"
|
||||
}
|
||||
|
||||
objectives {
|
||||
value = 0.995
|
||||
window = "30d"
|
||||
}
|
||||
|
||||
destination_datasource {
|
||||
uid = "grafanacloud-prom"
|
||||
}
|
||||
|
||||
# Knowledge graph integration labels
|
||||
# The grafana_slo_provenance label triggers knowledge graph-specific behavior:
|
||||
# - Displays "asserts" badge instead of "provisioned"
|
||||
# - Shows "Open RCA workbench" button in the SLO UI
|
||||
# - Enables correlation with knowledge graph entity-centric monitoring
|
||||
label {
|
||||
key = "grafana_slo_provenance"
|
||||
value = "asserts"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "service_name"
|
||||
value = "api-service"
|
||||
}
|
||||
|
||||
# Search expression for RCA workbench
|
||||
# This enables the "Open RCA workbench" button to deep-link with pre-filtered context
|
||||
search_expression = "service=api-service"
|
||||
|
||||
alerting {
|
||||
fastburn {
|
||||
annotation {
|
||||
key = "name"
|
||||
value = "SLO Burn Rate Very High"
|
||||
}
|
||||
annotation {
|
||||
key = "description"
|
||||
value = "Error budget is burning too fast"
|
||||
}
|
||||
}
|
||||
slowburn {
|
||||
annotation {
|
||||
key = "name"
|
||||
value = "SLO Burn Rate High"
|
||||
}
|
||||
annotation {
|
||||
key = "description"
|
||||
value = "Error budget is burning too fast"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Configure an SLO with multiple entity labels
|
||||
|
||||
Configure SLOs with multiple entity labels for fine-grained filtering in RCA workbench:
|
||||
|
||||
```terraform
|
||||
# Knowledge graph SLO with comprehensive entity labels
|
||||
resource "grafana_slo" "payment_service" {
|
||||
name = "Payment Service Latency SLO"
|
||||
description = "Latency SLO for payment processing with team and environment context"
|
||||
|
||||
query {
|
||||
freeform {
|
||||
query = "histogram_quantile(0.99, sum(rate(http_request_duration_seconds_bucket{service=\"payment\"}[$__rate_interval])) by (le)) < 0.5"
|
||||
}
|
||||
type = "freeform"
|
||||
}
|
||||
|
||||
objectives {
|
||||
value = 0.99
|
||||
window = "7d"
|
||||
}
|
||||
|
||||
destination_datasource {
|
||||
uid = "grafanacloud-prom"
|
||||
}
|
||||
|
||||
# Knowledge graph provenance - required for RCA workbench integration
|
||||
label {
|
||||
key = "grafana_slo_provenance"
|
||||
value = "asserts"
|
||||
}
|
||||
|
||||
# Service identification
|
||||
label {
|
||||
key = "service_name"
|
||||
value = "payment-service"
|
||||
}
|
||||
|
||||
# Team ownership
|
||||
label {
|
||||
key = "team_name"
|
||||
value = "payments-team"
|
||||
}
|
||||
|
||||
# Environment
|
||||
label {
|
||||
key = "environment"
|
||||
value = "production"
|
||||
}
|
||||
|
||||
# Business unit
|
||||
label {
|
||||
key = "business_unit"
|
||||
value = "fintech"
|
||||
}
|
||||
|
||||
# Search expression with multiple filters
|
||||
search_expression = "service=payment-service AND environment=production"
|
||||
|
||||
alerting {
|
||||
fastburn {
|
||||
annotation {
|
||||
key = "name"
|
||||
value = "Payment Latency Critical"
|
||||
}
|
||||
annotation {
|
||||
key = "description"
|
||||
value = "Payment service P99 latency exceeding SLO - immediate attention required"
|
||||
}
|
||||
annotation {
|
||||
key = "runbook_url"
|
||||
value = "https://docs.example.com/runbooks/payment-latency"
|
||||
}
|
||||
}
|
||||
slowburn {
|
||||
annotation {
|
||||
key = "name"
|
||||
value = "Payment Latency Warning"
|
||||
}
|
||||
annotation {
|
||||
key = "description"
|
||||
value = "Payment service experiencing elevated latency"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Configure a Kubernetes service SLO
|
||||
|
||||
Configure knowledge graph SLOs for Kubernetes services with Pod and namespace context:
|
||||
|
||||
```terraform
|
||||
# Knowledge graph SLO for Kubernetes service
|
||||
resource "grafana_slo" "k8s_frontend" {
|
||||
name = "Frontend Service Availability"
|
||||
description = "Availability SLO for frontend service in Kubernetes"
|
||||
|
||||
query {
|
||||
freeform {
|
||||
query = "sum(rate(http_requests_total{namespace=\"frontend\",code!~\"5..\"}[$__rate_interval])) / sum(rate(http_requests_total{namespace=\"frontend\"}[$__rate_interval]))"
|
||||
}
|
||||
type = "freeform"
|
||||
}
|
||||
|
||||
objectives {
|
||||
value = 0.999
|
||||
window = "30d"
|
||||
}
|
||||
|
||||
destination_datasource {
|
||||
uid = "grafanacloud-prom"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "grafana_slo_provenance"
|
||||
value = "asserts"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "service_name"
|
||||
value = "frontend"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "namespace"
|
||||
value = "frontend"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "cluster"
|
||||
value = "prod-us-west-2"
|
||||
}
|
||||
|
||||
# Search expression targeting Kubernetes entities
|
||||
search_expression = "namespace=frontend AND cluster=prod-us-west-2"
|
||||
|
||||
alerting {
|
||||
fastburn {
|
||||
annotation {
|
||||
key = "name"
|
||||
value = "Frontend Service Critical"
|
||||
}
|
||||
annotation {
|
||||
key = "description"
|
||||
value = "Frontend service availability below SLO"
|
||||
}
|
||||
annotation {
|
||||
key = "severity"
|
||||
value = "critical"
|
||||
}
|
||||
}
|
||||
slowburn {
|
||||
annotation {
|
||||
key = "name"
|
||||
value = "Frontend Service Degraded"
|
||||
}
|
||||
annotation {
|
||||
key = "description"
|
||||
value = "Frontend service showing signs of degradation"
|
||||
}
|
||||
annotation {
|
||||
key = "severity"
|
||||
value = "warning"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Configure an API endpoint-specific SLO
|
||||
|
||||
Configure knowledge graph SLOs for specific API endpoints with request context:
|
||||
|
||||
```terraform
|
||||
# Knowledge graph SLO for critical API endpoint
|
||||
resource "grafana_slo" "checkout_api" {
|
||||
name = "Checkout API Availability"
|
||||
description = "Availability SLO for /api/checkout endpoint"
|
||||
|
||||
query {
|
||||
freeform {
|
||||
query = "sum(rate(http_requests_total{path=\"/api/checkout\",code!~\"5..\"}[$__rate_interval])) / sum(rate(http_requests_total{path=\"/api/checkout\"}[$__rate_interval]))"
|
||||
}
|
||||
type = "freeform"
|
||||
}
|
||||
|
||||
objectives {
|
||||
value = 0.9999
|
||||
window = "30d"
|
||||
}
|
||||
|
||||
destination_datasource {
|
||||
uid = "grafanacloud-prom"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "grafana_slo_provenance"
|
||||
value = "asserts"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "service_name"
|
||||
value = "checkout-service"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "endpoint"
|
||||
value = "/api/checkout"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "criticality"
|
||||
value = "high"
|
||||
}
|
||||
|
||||
# Search expression with endpoint context
|
||||
search_expression = "service=checkout-service AND path=/api/checkout"
|
||||
|
||||
alerting {
|
||||
fastburn {
|
||||
annotation {
|
||||
key = "name"
|
||||
value = "Checkout API Critical Failure"
|
||||
}
|
||||
annotation {
|
||||
key = "description"
|
||||
value = "Checkout API experiencing high error rates - revenue impact"
|
||||
}
|
||||
annotation {
|
||||
key = "severity"
|
||||
value = "critical"
|
||||
}
|
||||
annotation {
|
||||
key = "alert_priority"
|
||||
value = "P0"
|
||||
}
|
||||
}
|
||||
slowburn {
|
||||
annotation {
|
||||
key = "name"
|
||||
value = "Checkout API Degradation"
|
||||
}
|
||||
annotation {
|
||||
key = "description"
|
||||
value = "Checkout API showing elevated error rates"
|
||||
}
|
||||
annotation {
|
||||
key = "severity"
|
||||
value = "warning"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Configure a multi-environment SLO
|
||||
|
||||
Manage knowledge graph SLOs across multiple environments using Terraform workspaces or modules:
|
||||
|
||||
```terraform
|
||||
# Variable for environment-specific configuration
|
||||
variable "environment" {
|
||||
description = "Environment name"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "slo_target" {
|
||||
description = "SLO target percentage"
|
||||
type = number
|
||||
}
|
||||
|
||||
# Environment-aware knowledge graph SLO
|
||||
resource "grafana_slo" "api_service" {
|
||||
name = "${var.environment} - API Service Availability"
|
||||
description = "API service availability SLO for ${var.environment} environment"
|
||||
|
||||
query {
|
||||
freeform {
|
||||
query = "sum(rate(http_requests_total{environment=\"${var.environment}\",code!~\"5..\"}[$__rate_interval])) / sum(rate(http_requests_total{environment=\"${var.environment}\"}[$__rate_interval]))"
|
||||
}
|
||||
type = "freeform"
|
||||
}
|
||||
|
||||
objectives {
|
||||
value = var.slo_target
|
||||
window = "30d"
|
||||
}
|
||||
|
||||
destination_datasource {
|
||||
uid = "grafanacloud-prom"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "grafana_slo_provenance"
|
||||
value = "asserts"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "service_name"
|
||||
value = "api-service"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "environment"
|
||||
value = var.environment
|
||||
}
|
||||
|
||||
search_expression = "service=api-service AND environment=${var.environment}"
|
||||
|
||||
alerting {
|
||||
fastburn {
|
||||
annotation {
|
||||
key = "name"
|
||||
value = "${var.environment} API Critical"
|
||||
}
|
||||
annotation {
|
||||
key = "description"
|
||||
value = "API service in ${var.environment} experiencing critical errors"
|
||||
}
|
||||
}
|
||||
slowburn {
|
||||
annotation {
|
||||
key = "name"
|
||||
value = "${var.environment} API Warning"
|
||||
}
|
||||
annotation {
|
||||
key = "description"
|
||||
value = "API service in ${var.environment} showing elevated errors"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Resource reference
|
||||
|
||||
### `grafana_slo` with knowledge graph provenance
|
||||
|
||||
When creating knowledge graph-managed SLOs, the `grafana_slo` resource requires the `grafana_slo_provenance` label set to `asserts` to enable RCA workbench integration.
|
||||
|
||||
#### Required knowledge graph configuration
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ------------------------------ | -------- | ----------- | -------------------------------------------------------------------------------------------------- |
|
||||
| `grafana_slo_provenance` label | `string` | Yes | Must be set to `asserts` to enable knowledge graph-specific features and RCA workbench integration |
|
||||
| `search_expression` | `string` | Recommended | Search expression for filtering entities in RCA workbench |
|
||||
|
||||
#### Key arguments for knowledge graph SLOs
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ------------------------ | -------------- | -------- | ----------------------------------------------------------------- |
|
||||
| `name` | `string` | Yes | The name of the SLO |
|
||||
| `description` | `string` | No | Description of the SLO purpose and scope |
|
||||
| `query` | `object` | Yes | Query configuration defining how SLO is calculated |
|
||||
| `objectives` | `object` | Yes | Target objectives including value and time window |
|
||||
| `destination_datasource` | `object` | Yes | Destination data source for SLO metrics |
|
||||
| `label` | `list(object)` | Yes | Labels for the SLO, must include `grafana_slo_provenance=asserts` |
|
||||
| `search_expression` | `string` | No | Search expression for RCA workbench filtering |
|
||||
| `alerting` | `object` | No | Alerting configuration for fast burn and slow burn alerts |
|
||||
|
||||
#### Query block
|
||||
|
||||
The `query` block supports the following:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ---------- | -------- | -------- | --------------------------------------------------------- |
|
||||
| `type` | `string` | Yes | Query type, typically `freeform` for knowledge graph SLOs |
|
||||
| `freeform` | `object` | Yes | Freeform query configuration |
|
||||
|
||||
The `freeform` block supports:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ------- | -------- | -------- | -------------------------------- |
|
||||
| `query` | `string` | Yes | PromQL query for SLO calculation |
|
||||
|
||||
#### Objectives block
|
||||
|
||||
The `objectives` block supports the following:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| -------- | -------- | -------- | --------------------------------------------------- |
|
||||
| `value` | `number` | Yes | Target SLO value (for example, 0.995 for 99.5%) |
|
||||
| `window` | `string` | Yes | Time window for SLO evaluation (for example, "30d") |
|
||||
|
||||
#### Label block
|
||||
|
||||
Each `label` block supports the following:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ------- | -------- | -------- | ----------- |
|
||||
| `key` | `string` | Yes | Label key |
|
||||
| `value` | `string` | Yes | Label value |
|
||||
|
||||
**Required label for knowledge graph SLOs:**
|
||||
|
||||
- `grafana_slo_provenance` = `asserts` (enables knowledge graph features)
|
||||
|
||||
**Recommended labels for entity tracking:**
|
||||
|
||||
- `service_name` - Name of the service
|
||||
- `team_name` - Team responsible for the service
|
||||
- `environment` - Environment (prod, staging, development)
|
||||
- `namespace` - Kubernetes namespace
|
||||
- `cluster` - Kubernetes cluster name
|
||||
|
||||
<!-- vale Grafana.Gerunds = NO -->
|
||||
|
||||
#### Alerting block
|
||||
|
||||
The `alerting` block supports the following:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ---------- | -------- | -------- | ---------------------------------- |
|
||||
| `fastburn` | `object` | No | Fast burn rate alert configuration |
|
||||
| `slowburn` | `object` | No | Slow burn rate alert configuration |
|
||||
|
||||
Each alert block (`fastburn`, `slowburn`) supports:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ------------ | -------------- | -------- | ------------------------------- |
|
||||
| `annotation` | `list(object)` | No | Annotations to add to the alert |
|
||||
|
||||
Each `annotation` block supports:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ------- | -------- | -------- | ---------------- |
|
||||
| `key` | `string` | Yes | Annotation key |
|
||||
| `value` | `string` | Yes | Annotation value |
|
||||
|
||||
Common annotation keys:
|
||||
|
||||
- `name` - Alert name
|
||||
- `description` - Alert description
|
||||
- `severity` - Alert severity level
|
||||
- `runbook_url` - Link to runbook documentation
|
||||
<!-- vale Grafana.Gerunds = YES -->
|
||||
|
||||
#### Example
|
||||
|
||||
```terraform
|
||||
resource "grafana_slo" "kg_example" {
|
||||
name = "My Service SLO"
|
||||
description = "SLO with knowledge graph RCA integration"
|
||||
|
||||
query {
|
||||
freeform {
|
||||
query = "sum(rate(http_requests_total{code!~\"5..\"}[$__rate_interval])) / sum(rate(http_requests_total[$__rate_interval]))"
|
||||
}
|
||||
type = "freeform"
|
||||
}
|
||||
|
||||
objectives {
|
||||
value = 0.995
|
||||
window = "30d"
|
||||
}
|
||||
|
||||
destination_datasource {
|
||||
uid = "grafanacloud-prom"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "grafana_slo_provenance"
|
||||
value = "asserts"
|
||||
}
|
||||
|
||||
label {
|
||||
key = "service_name"
|
||||
value = "my-service"
|
||||
}
|
||||
|
||||
search_expression = "service=my-service"
|
||||
|
||||
alerting {
|
||||
fastburn {
|
||||
annotation {
|
||||
key = "name"
|
||||
value = "SLO Fast Burn"
|
||||
}
|
||||
}
|
||||
slowburn {
|
||||
annotation {
|
||||
key = "name"
|
||||
value = "SLO Slow Burn"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these best practices when setting knowledge graph SLOs.
|
||||
|
||||
### Use the knowledge graph provenance label
|
||||
|
||||
- Always include the `grafana_slo_provenance` label with value `asserts` for knowledge graph-managed SLOs
|
||||
- This label enables the "asserts" badge in the UI instead of "provisioned"
|
||||
- It also enables the **Open RCA workbench** button for troubleshooting SLO breaches
|
||||
|
||||
### Define search expressions
|
||||
|
||||
- Define meaningful search expressions that filter relevant entities in RCA workbench
|
||||
- The search expression defines which entities populate RCA workbench when you troubleshoot an SLO breach
|
||||
- Use entity attributes like service name, environment, namespace, and cluster
|
||||
- Combine multiple filters with `AND` operators for precise filtering
|
||||
- Test search expressions in RCA workbench before codifying them in Terraform
|
||||
|
||||
### Add entity labels
|
||||
|
||||
- Add descriptive labels to track service ownership, environment, and criticality
|
||||
- Use consistent label naming conventions across all SLOs
|
||||
- Include team names to enable quick identification of ownership
|
||||
- Tag critical business services with appropriate labels
|
||||
|
||||
### Set SLO targets
|
||||
|
||||
- Set realistic SLO targets based on service requirements and capabilities
|
||||
- Use higher targets (0.999+) for critical user-facing services
|
||||
- Consider different targets for different environments (production vs staging)
|
||||
- Review and adjust targets based on actual service performance
|
||||
|
||||
### Add alert annotations
|
||||
|
||||
- Add comprehensive descriptions to help on-call engineers understand the alert
|
||||
- Include runbook URLs in annotations for quick access to troubleshooting guides
|
||||
- Set appropriate severity levels (critical, warning) based on business impact
|
||||
- Customize alert names to clearly identify the affected service and issue
|
||||
|
||||
### Configure queries
|
||||
|
||||
- Use PromQL queries that accurately represent service health
|
||||
- Exclude expected error codes, such as 404, from error calculations when appropriate
|
||||
- Leverage rate intervals with `$__rate_interval` for dynamic time range support
|
||||
- Test queries in Grafana before adding them to Terraform configurations
|
||||
|
||||
### Set compliance windows
|
||||
|
||||
- Use 30-day windows for production SLOs to align with monthly reporting
|
||||
- Consider shorter windows (7d) for development or testing environments
|
||||
- Ensure compliance windows align with business requirements and error budget policies
|
||||
|
||||
## Verify the configuration
|
||||
|
||||
After applying the Terraform configuration, verify that:
|
||||
|
||||
- SLOs are created in your Grafana Cloud stack
|
||||
- SLOs appear in **Observability > SLO** with the "asserts" badge
|
||||
- The **Open RCA workbench** button is visible when you expand **Objective** for an SLO
|
||||
- You can select a time range in the **Error Budget Burndown** panel and click **Open in RCA workbench**
|
||||
- Search expressions correctly filter entities in RCA workbench
|
||||
- Fast burn and slow burn alerts are configured with appropriate thresholds
|
||||
- Labels are correctly applied and visible in the SLO details
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Follow these troubleshooting steps if you experience issues setting knowledge graph SLOs.
|
||||
|
||||
### SLO shows "provisioned" instead of "asserts" badge
|
||||
|
||||
Ensure the `grafana_slo_provenance` label is set to `asserts`:
|
||||
|
||||
```terraform
|
||||
label {
|
||||
key = "grafana_slo_provenance"
|
||||
value = "asserts"
|
||||
}
|
||||
```
|
||||
|
||||
### Open RCA workbench button not appearing
|
||||
|
||||
- Verify the `search_expression` field is populated
|
||||
- The **Open RCA workbench** button appears after you have added a search expression in the **RCA workbench Context** section
|
||||
- Ensure the search expression uses valid entity attributes
|
||||
- Check that the knowledge graph is properly configured and receiving data
|
||||
|
||||
### Alerts not triggering
|
||||
|
||||
- Verify the PromQL query returns valid results in Grafana
|
||||
- Check that the destination data source is correctly configured
|
||||
- Ensure alerting blocks are properly defined with annotations
|
||||
|
||||
## Related documentation
|
||||
|
||||
- [Create and manage knowledge graph SLOs](/docs/grafana-cloud/knowledge-graph/configure/manage-slos/)
|
||||
- [Troubleshoot an SLO breach with the knowledge graph](/docs/grafana-cloud/knowledge-graph/troubleshoot-infra-apps/slos/)
|
||||
- [Get started with Terraform for the knowledge graph](../getting-started/)
|
||||
- [Introduction to Grafana SLO](/docs/grafana-cloud/alerting-and-irm/slo/introduction/)
|
||||
- [Configure notifications in the knowledge graph](/docs/grafana-cloud/knowledge-graph/configure/notifications/)
|
||||
-290
@@ -1,290 +0,0 @@
|
||||
---
|
||||
description: Configure log correlation for Knowledge Graph using Terraform
|
||||
menuTitle: Log configurations
|
||||
title: Configure log correlation using Terraform
|
||||
weight: 500
|
||||
keywords:
|
||||
- Terraform
|
||||
- Knowledge Graph
|
||||
- Log Configuration
|
||||
- Log Correlation
|
||||
- Loki
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/terraform-knowledge-graph/log-configurations/
|
||||
---
|
||||
|
||||
# Configure log correlation using Terraform
|
||||
|
||||
Log configurations in [Knowledge Graph](/docs/grafana-cloud/knowledge-graph/) allow you to define how log data is queried and correlated with entities. You can specify data sources, entity matching rules, label mappings, and filtering options for spans and traces.
|
||||
|
||||
For information about configuring log correlation in the Knowledge Graph UI, refer to [Configure logs correlation](/docs/grafana-cloud/knowledge-graph/configure/logs-correlation/).
|
||||
|
||||
## Basic log configuration
|
||||
|
||||
Create a file named `log-configs.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
# Basic log configuration for services
|
||||
resource "grafana_asserts_log_config" "production" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "production"
|
||||
priority = 1000
|
||||
default_config = false
|
||||
data_source_uid = "grafanacloud-logs"
|
||||
error_label = "error"
|
||||
|
||||
match {
|
||||
property = "asserts_entity_type"
|
||||
op = "EQUALS"
|
||||
values = ["Service"]
|
||||
}
|
||||
|
||||
match {
|
||||
property = "environment"
|
||||
op = "EQUALS"
|
||||
values = ["production", "staging"]
|
||||
}
|
||||
|
||||
entity_property_to_log_label_mapping = {
|
||||
"otel_namespace" = "service_namespace"
|
||||
"otel_service" = "service_name"
|
||||
"environment" = "env"
|
||||
"site" = "region"
|
||||
}
|
||||
|
||||
filter_by_span_id = true
|
||||
filter_by_trace_id = true
|
||||
}
|
||||
```
|
||||
|
||||
## Log configuration with multiple match rules
|
||||
|
||||
Configure log correlation with multiple entity matching criteria:
|
||||
|
||||
```terraform
|
||||
# Development environment log configuration
|
||||
resource "grafana_asserts_log_config" "development" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "development"
|
||||
priority = 2000
|
||||
default_config = true
|
||||
data_source_uid = "elasticsearch-dev"
|
||||
error_label = "error"
|
||||
|
||||
match {
|
||||
property = "asserts_entity_type"
|
||||
op = "EQUALS"
|
||||
values = ["Service"]
|
||||
}
|
||||
|
||||
match {
|
||||
property = "environment"
|
||||
op = "EQUALS"
|
||||
values = ["development", "testing"]
|
||||
}
|
||||
|
||||
match {
|
||||
property = "site"
|
||||
op = "EQUALS"
|
||||
values = ["us-east-1"]
|
||||
}
|
||||
|
||||
match {
|
||||
property = "service"
|
||||
op = "EQUALS"
|
||||
values = ["api"]
|
||||
}
|
||||
|
||||
entity_property_to_log_label_mapping = {
|
||||
"otel_namespace" = "service_namespace"
|
||||
"otel_service" = "service_name"
|
||||
"environment" = "env"
|
||||
"site" = "region"
|
||||
"service" = "app"
|
||||
}
|
||||
|
||||
filter_by_span_id = true
|
||||
filter_by_trace_id = true
|
||||
}
|
||||
```
|
||||
|
||||
## Minimal log configuration
|
||||
|
||||
Create a minimal configuration for all entities:
|
||||
|
||||
```terraform
|
||||
# Minimal configuration for all entities
|
||||
resource "grafana_asserts_log_config" "minimal" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "minimal"
|
||||
priority = 3000
|
||||
default_config = false
|
||||
data_source_uid = "loki-minimal"
|
||||
|
||||
match {
|
||||
property = "asserts_entity_type"
|
||||
op = "IS_NOT_NULL"
|
||||
values = []
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Advanced log configuration with complex match rules
|
||||
|
||||
Configure logs with multiple operations and advanced match rules:
|
||||
|
||||
```terraform
|
||||
# Advanced configuration with multiple operations
|
||||
resource "grafana_asserts_log_config" "advanced" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "advanced"
|
||||
priority = 1500
|
||||
default_config = false
|
||||
data_source_uid = "loki-advanced"
|
||||
error_label = "level"
|
||||
|
||||
match {
|
||||
property = "service_type"
|
||||
op = "CONTAINS"
|
||||
values = ["web", "api"]
|
||||
}
|
||||
|
||||
match {
|
||||
property = "environment"
|
||||
op = "NOT_EQUALS"
|
||||
values = ["test"]
|
||||
}
|
||||
|
||||
match {
|
||||
property = "team"
|
||||
op = "IS_NOT_NULL"
|
||||
values = []
|
||||
}
|
||||
|
||||
entity_property_to_log_label_mapping = {
|
||||
"service_type" = "type"
|
||||
"team" = "owner"
|
||||
"environment" = "env"
|
||||
"version" = "app_version"
|
||||
}
|
||||
|
||||
filter_by_span_id = true
|
||||
filter_by_trace_id = false
|
||||
}
|
||||
```
|
||||
|
||||
## Resource reference
|
||||
|
||||
### `grafana_asserts_log_config`
|
||||
|
||||
Manage Knowledge Graph log configurations through the Grafana API.
|
||||
|
||||
#### Arguments
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| -------------------------------------- | -------------- | -------- | -------------------------------------------------------------------------------------------- |
|
||||
| `name` | `string` | Yes | The name of the log configuration. This field is immutable and forces recreation if changed. |
|
||||
| `priority` | `number` | Yes | Priority of the log configuration. Higher priority configurations are evaluated first. |
|
||||
| `default_config` | `bool` | Yes | Whether this is the default configuration. Default configurations cannot be deleted. |
|
||||
| `data_source_uid` | `string` | Yes | DataSource UID to be queried (for example, a Loki instance). |
|
||||
| `match` | `list(object)` | No | List of match rules for entity properties. Refer to [match block](#match-block) for details. |
|
||||
| `error_label` | `string` | No | Label name used to identify error logs. |
|
||||
| `entity_property_to_log_label_mapping` | `map(string)` | No | Mapping of entity properties to log labels for correlation. |
|
||||
| `filter_by_span_id` | `bool` | No | Whether to filter logs by span ID for distributed tracing correlation. |
|
||||
| `filter_by_trace_id` | `bool` | No | Whether to filter logs by trace ID for distributed tracing correlation. |
|
||||
|
||||
#### Match block
|
||||
|
||||
Each `match` block supports the following:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ---------- | -------------- | -------- | ------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `property` | `string` | Yes | Entity property to match against. |
|
||||
| `op` | `string` | Yes | Operation to use for matching. One of: `EQUALS`, `NOT_EQUALS`, `CONTAINS`, `DOES_NOT_CONTAIN`, `IS_NULL`, `IS_NOT_NULL`. |
|
||||
| `values` | `list(string)` | Yes | Values to match against. Can be empty for `IS_NULL` and `IS_NOT_NULL` operations. |
|
||||
|
||||
#### Example
|
||||
|
||||
```terraform
|
||||
resource "grafana_asserts_log_config" "example" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "example-logs"
|
||||
priority = 1000
|
||||
default_config = false
|
||||
data_source_uid = "loki-prod"
|
||||
error_label = "level"
|
||||
|
||||
match {
|
||||
property = "asserts_entity_type"
|
||||
op = "EQUALS"
|
||||
values = ["Service", "Pod"]
|
||||
}
|
||||
|
||||
entity_property_to_log_label_mapping = {
|
||||
"service" = "app"
|
||||
"namespace" = "k8s_namespace"
|
||||
"environment" = "env"
|
||||
}
|
||||
|
||||
filter_by_span_id = true
|
||||
filter_by_trace_id = true
|
||||
}
|
||||
```
|
||||
|
||||
## Best practices
|
||||
|
||||
### Priority management
|
||||
|
||||
- Assign lower priority numbers to more specific configurations
|
||||
- Higher priority configurations are evaluated first
|
||||
- Use consistent priority ranges for different configuration types
|
||||
- Document the reasoning behind priority assignments
|
||||
|
||||
### Data source configuration
|
||||
|
||||
- Ensure the data source UID matches your actual Loki or log aggregation system
|
||||
- Test data source connectivity before applying configurations
|
||||
- Use descriptive names for log configurations to indicate their purpose
|
||||
- Consider using separate data sources for different environments
|
||||
|
||||
### Label map strategy
|
||||
|
||||
- Map entity properties consistently across all log configurations
|
||||
- Use meaningful log label names that match your logging standards
|
||||
- Document the mapping relationships in configuration comments
|
||||
- Verify that mapped labels exist in your log data
|
||||
|
||||
### Match rules design
|
||||
|
||||
- Start with broad match rules and refine based on needs
|
||||
- Use specific property names that exist in your entity model
|
||||
- Test match rules with sample data before deploying
|
||||
- Combine multiple match rules for precise entity targeting
|
||||
|
||||
### Distributed trace integration
|
||||
|
||||
- Enable `filter_by_span_id` and `filter_by_trace_id` when using OpenTelemetry
|
||||
- Ensure your logs contain the appropriate trace and span ID labels
|
||||
- Use consistent label names for trace IDs across your logging infrastructure
|
||||
- Test trace correlation to verify it works as expected
|
||||
|
||||
## Validation
|
||||
|
||||
After applying the Terraform configuration, verify that:
|
||||
|
||||
- Log configurations are created in your Knowledge Graph instance
|
||||
- Configurations appear in the Knowledge Graph UI under **Observability > Configuration > Logs**
|
||||
- Log correlation works when drilling down from entities
|
||||
- Label mappings correctly translate entity properties to log labels
|
||||
- Match rules properly filter entities
|
||||
- Trace and span ID filtering works for distributed tracing
|
||||
|
||||
## Related documentation
|
||||
|
||||
- [Configure logs correlation in Knowledge Graph](/docs/grafana-cloud/knowledge-graph/configure/logs-correlation/)
|
||||
- [Get started with Terraform for Knowledge Graph](../getting-started/)
|
||||
- [Loki documentation](/docs/loki/latest/)
|
||||
-224
@@ -1,224 +0,0 @@
|
||||
---
|
||||
description: Configure notification alerts for Knowledge Graph using Terraform
|
||||
menuTitle: Notification alerts
|
||||
title: Configure notification alerts using Terraform
|
||||
weight: 200
|
||||
keywords:
|
||||
- Terraform
|
||||
- Knowledge Graph
|
||||
- Notification Alerts
|
||||
- Alert Configuration
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/terraform-knowledge-graph/notification-alerts/
|
||||
---
|
||||
|
||||
# Configure notification alerts using Terraform
|
||||
|
||||
Notification alerts configurations in [Knowledge Graph](/docs/grafana-cloud/knowledge-graph/) allow you to manage how alerts are processed and routed. You can specify match labels to filter alerts, add custom labels, set duration requirements, and control silencing.
|
||||
|
||||
For information about configuring notification alerts in the Knowledge Graph UI, refer to [Configure notifications](/docs/grafana-cloud/knowledge-graph/configure/notifications/).
|
||||
|
||||
## Basic notification alerts configuration
|
||||
|
||||
Create a file named `alert-configs.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
# Basic alert configuration with silencing
|
||||
resource "grafana_asserts_notification_alerts_config" "prometheus_remote_storage_failures" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "PrometheusRemoteStorageFailures"
|
||||
|
||||
match_labels = {
|
||||
alertname = "PrometheusRemoteStorageFailures"
|
||||
alertgroup = "prometheus.alerts"
|
||||
asserts_env = "prod"
|
||||
}
|
||||
|
||||
silenced = true
|
||||
}
|
||||
|
||||
# High severity alert with specific job and context matching
|
||||
resource "grafana_asserts_notification_alerts_config" "error_buildup_notify" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "ErrorBuildupNotify"
|
||||
|
||||
match_labels = {
|
||||
alertname = "ErrorBuildup"
|
||||
job = "acai"
|
||||
asserts_request_type = "inbound"
|
||||
asserts_request_context = "/auth"
|
||||
}
|
||||
|
||||
silenced = false
|
||||
}
|
||||
```
|
||||
|
||||
## Notification alerts with additional labels and duration
|
||||
|
||||
Configure alerts with custom labels and timing requirements:
|
||||
|
||||
```terraform
|
||||
# Alert with additional labels and custom duration
|
||||
resource "grafana_asserts_notification_alerts_config" "payment_test_alert" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "PaymentTestAlert"
|
||||
|
||||
match_labels = {
|
||||
alertname = "PaymentTestAlert"
|
||||
additional_labels = "asserts_severity=~\"critical\""
|
||||
alertgroup = "alex-k8s-integration-test.alerts"
|
||||
}
|
||||
|
||||
alert_labels = {
|
||||
testing = "onetwothree"
|
||||
}
|
||||
|
||||
duration = "5m"
|
||||
silenced = false
|
||||
}
|
||||
```
|
||||
|
||||
## Latency and performance notification alerts
|
||||
|
||||
Monitor and alert on latency and performance issues:
|
||||
|
||||
```terraform
|
||||
# Latency alert for shipping service
|
||||
resource "grafana_asserts_notification_alerts_config" "high_shipping_latency" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "high shipping latency"
|
||||
|
||||
match_labels = {
|
||||
alertname = "LatencyP99ErrorBuildup"
|
||||
job = "shipping"
|
||||
asserts_request_type = "inbound"
|
||||
}
|
||||
|
||||
silenced = false
|
||||
}
|
||||
|
||||
# CPU throttling alert with warning severity
|
||||
resource "grafana_asserts_notification_alerts_config" "cpu_throttling_sustained" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "CPUThrottlingSustained"
|
||||
|
||||
match_labels = {
|
||||
alertname = "CPUThrottlingSustained"
|
||||
additional_labels = "asserts_severity=~\"warning\""
|
||||
}
|
||||
|
||||
silenced = true
|
||||
}
|
||||
```
|
||||
|
||||
## Infrastructure and service notification alerts
|
||||
|
||||
Configure alerts for infrastructure components and services:
|
||||
|
||||
```terraform
|
||||
# Ingress error rate alert
|
||||
resource "grafana_asserts_notification_alerts_config" "ingress_error" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "ingress error"
|
||||
|
||||
match_labels = {
|
||||
alertname = "ErrorRatioBreach"
|
||||
job = "ingress-nginx-controller-metrics"
|
||||
asserts_request_type = "inbound"
|
||||
}
|
||||
|
||||
silenced = false
|
||||
}
|
||||
|
||||
# MySQL Galera cluster alert
|
||||
resource "grafana_asserts_notification_alerts_config" "mysql_galera_not_ready" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "MySQLGaleraNotReady"
|
||||
|
||||
match_labels = {
|
||||
alertname = "MySQLGaleraNotReady"
|
||||
}
|
||||
|
||||
silenced = false
|
||||
}
|
||||
```
|
||||
|
||||
## Resource reference
|
||||
|
||||
### `grafana_asserts_notification_alerts_config`
|
||||
|
||||
Manage Knowledge Graph notification alerts configurations through the Grafana API.
|
||||
|
||||
#### Arguments
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| -------------- | ------------- | -------- | ----------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `name` | `string` | Yes | The name of the notification alerts configuration. This field is immutable and forces recreation if changed. |
|
||||
| `match_labels` | `map(string)` | No | Labels to match for this notification alerts configuration. Used to filter which alerts this configuration applies to. |
|
||||
| `alert_labels` | `map(string)` | No | Labels to add to alerts generated by this notification alerts configuration. |
|
||||
| `duration` | `string` | No | Duration for which the condition must be true before firing (for example, '5m', '30s'). Maps to 'for' in Knowledge Graph API. |
|
||||
| `silenced` | `bool` | No | Whether this notification alerts configuration is silenced. Defaults to `false`. |
|
||||
|
||||
#### Example
|
||||
|
||||
```terraform
|
||||
resource "grafana_asserts_notification_alerts_config" "example" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "ExampleAlert"
|
||||
|
||||
match_labels = {
|
||||
alertname = "HighCPUUsage"
|
||||
job = "monitoring"
|
||||
}
|
||||
|
||||
alert_labels = {
|
||||
severity = "warning"
|
||||
team = "platform"
|
||||
}
|
||||
|
||||
duration = "5m"
|
||||
silenced = false
|
||||
}
|
||||
```
|
||||
|
||||
## Best practices
|
||||
|
||||
### Label management
|
||||
|
||||
- Use specific and meaningful labels in `match_labels` to ensure precise alert filtering
|
||||
- Leverage existing label conventions from your monitoring setup
|
||||
- Consider using `asserts_env` and `asserts_site` labels for multi-environment setups
|
||||
|
||||
### Silence strategy
|
||||
|
||||
- Use the `silenced` parameter for temporary suppression rather than deleting notification alerts configurations
|
||||
- Document the reason for silencing in your Terraform configuration comments
|
||||
- Regularly review silenced configurations to ensure they're still needed
|
||||
|
||||
### Duration configuration
|
||||
|
||||
- Set appropriate duration values based on your alerting requirements
|
||||
- Consider the nature of the monitored condition when choosing duration
|
||||
- Use consistent duration formats across similar alert types
|
||||
|
||||
## Validation
|
||||
|
||||
After applying the Terraform configuration, verify that:
|
||||
|
||||
- Notification alerts configurations are created in your Knowledge Graph instance
|
||||
- Configurations appear in the Knowledge Graph UI under **Observability > Rules > Notify**
|
||||
- Match labels correctly filter the intended alerts
|
||||
- Custom labels are properly applied to generated alerts
|
||||
|
||||
## Related documentation
|
||||
|
||||
- [Configure notifications in Knowledge Graph](/docs/grafana-cloud/knowledge-graph/configure/notifications/)
|
||||
- [Get started with Terraform for Knowledge Graph](../getting-started/)
|
||||
- [Configure alerts in Knowledge Graph](/docs/grafana-cloud/knowledge-graph/configure/alerts/)
|
||||
-308
@@ -1,308 +0,0 @@
|
||||
---
|
||||
description: Configure suppressed assertions for Knowledge Graph using Terraform
|
||||
menuTitle: Suppressed assertions
|
||||
title: Configure suppressed assertions using Terraform
|
||||
weight: 300
|
||||
keywords:
|
||||
- Terraform
|
||||
- Knowledge Graph
|
||||
- Suppressed Assertions
|
||||
- Alert Suppression
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/terraform-knowledge-graph/suppressed-assertions/
|
||||
---
|
||||
|
||||
# Configure suppressed assertions using Terraform
|
||||
|
||||
Suppressed assertions configurations allow you to disable specific alerts or assertions based on label matching in [Knowledge Graph](/docs/grafana-cloud/knowledge-graph/). This is useful for maintenance windows, test environments, or when you want to temporarily suppress certain types of alerts.
|
||||
|
||||
For information about suppressing insights in the Knowledge Graph UI, refer to [Suppress insights](/docs/grafana-cloud/knowledge-graph/troubleshoot-infra-apps/suppress-insights/).
|
||||
|
||||
## Basic suppressed assertions configuration
|
||||
|
||||
Create a file named `suppressed-assertions.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
# Basic suppressed alert configuration for maintenance
|
||||
resource "grafana_asserts_suppressed_assertions_config" "maintenance_window" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "MaintenanceWindow"
|
||||
|
||||
match_labels = {
|
||||
service = "api-service"
|
||||
maintenance = "true"
|
||||
}
|
||||
}
|
||||
|
||||
# Suppress specific alertname during deployment
|
||||
resource "grafana_asserts_suppressed_assertions_config" "deployment_suppression" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "DeploymentSuppression"
|
||||
|
||||
match_labels = {
|
||||
alertname = "HighLatency"
|
||||
job = "web-service"
|
||||
env = "staging"
|
||||
}
|
||||
}
|
||||
|
||||
# Suppress alerts for specific test environment
|
||||
resource "grafana_asserts_suppressed_assertions_config" "test_environment_suppression" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "TestEnvironmentSuppression"
|
||||
|
||||
match_labels = {
|
||||
alertgroup = "test.alerts"
|
||||
environment = "test"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Service-specific suppression configurations
|
||||
|
||||
Suppress alerts for specific services during maintenance or operational activities:
|
||||
|
||||
```terraform
|
||||
# Suppress alerts for specific services during maintenance
|
||||
resource "grafana_asserts_suppressed_assertions_config" "api_service_maintenance" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "APIServiceMaintenance"
|
||||
|
||||
match_labels = {
|
||||
service = "api-gateway"
|
||||
job = "api-gateway"
|
||||
maintenance = "scheduled"
|
||||
}
|
||||
}
|
||||
|
||||
# Suppress database alerts during backup operations
|
||||
resource "grafana_asserts_suppressed_assertions_config" "database_backup" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "DatabaseBackupSuppression"
|
||||
|
||||
match_labels = {
|
||||
service = "postgresql"
|
||||
job = "postgres-exporter"
|
||||
backup_mode = "active"
|
||||
}
|
||||
}
|
||||
|
||||
# Suppress monitoring system alerts during updates
|
||||
resource "grafana_asserts_suppressed_assertions_config" "monitoring_update" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "MonitoringSystemUpdate"
|
||||
|
||||
match_labels = {
|
||||
service = "prometheus"
|
||||
job = "prometheus"
|
||||
update = "in_progress"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Environment and team-based suppression
|
||||
|
||||
Create suppression rules based on environment or team:
|
||||
|
||||
```terraform
|
||||
# Suppress all alerts for development environment
|
||||
resource "grafana_asserts_suppressed_assertions_config" "dev_environment" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "DevelopmentEnvironmentSuppression"
|
||||
|
||||
match_labels = {
|
||||
environment = "development"
|
||||
team = "platform"
|
||||
}
|
||||
}
|
||||
|
||||
# Suppress alerts for specific team during their maintenance window
|
||||
resource "grafana_asserts_suppressed_assertions_config" "team_maintenance" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "TeamMaintenanceWindow"
|
||||
|
||||
match_labels = {
|
||||
team = "backend"
|
||||
maintenance = "team_scheduled"
|
||||
timezone = "UTC"
|
||||
}
|
||||
}
|
||||
|
||||
# Suppress alerts for staging environment during testing
|
||||
resource "grafana_asserts_suppressed_assertions_config" "staging_testing" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "StagingTestingSuppression"
|
||||
|
||||
match_labels = {
|
||||
environment = "staging"
|
||||
testing = "automated"
|
||||
job = "integration-tests"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Alert type and severity-based suppression
|
||||
|
||||
Suppress alerts based on their type or severity:
|
||||
|
||||
```terraform
|
||||
# Suppress low severity alerts during business hours
|
||||
resource "grafana_asserts_suppressed_assertions_config" "low_severity_business_hours" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "LowSeverityBusinessHours"
|
||||
|
||||
match_labels = {
|
||||
severity = "warning"
|
||||
timezone = "business_hours"
|
||||
}
|
||||
}
|
||||
|
||||
# Suppress specific alert types during known issues
|
||||
resource "grafana_asserts_suppressed_assertions_config" "known_issue_suppression" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "KnownIssueSuppression"
|
||||
|
||||
match_labels = {
|
||||
alertname = "HighMemoryUsage"
|
||||
service = "legacy-service"
|
||||
issue_id = "LEG-123"
|
||||
}
|
||||
}
|
||||
|
||||
# Suppress infrastructure alerts during planned maintenance
|
||||
resource "grafana_asserts_suppressed_assertions_config" "infrastructure_maintenance" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "InfrastructureMaintenance"
|
||||
|
||||
match_labels = {
|
||||
alertgroup = "infrastructure.alerts"
|
||||
maintenance_type = "planned"
|
||||
affected_services = "all"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Complex multi-label suppression
|
||||
|
||||
Define complex suppression rules with multiple labels:
|
||||
|
||||
```terraform
|
||||
# Complex suppression for multi-service deployments
|
||||
resource "grafana_asserts_suppressed_assertions_config" "multi_service_deployment" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "MultiServiceDeploymentSuppression"
|
||||
|
||||
match_labels = {
|
||||
deployment_id = "deploy-2024-01-15"
|
||||
services = "api,worker,frontend"
|
||||
environment = "production"
|
||||
deployment_type = "blue_green"
|
||||
}
|
||||
}
|
||||
|
||||
# Suppress alerts for specific cluster during maintenance
|
||||
resource "grafana_asserts_suppressed_assertions_config" "cluster_maintenance" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "ClusterMaintenanceSuppression"
|
||||
|
||||
match_labels = {
|
||||
cluster = "production-cluster-1"
|
||||
maintenance = "cluster_upgrade"
|
||||
affected_nodes = "all"
|
||||
estimated_duration = "2h"
|
||||
}
|
||||
}
|
||||
|
||||
# Suppress alerts for specific region during network issues
|
||||
resource "grafana_asserts_suppressed_assertions_config" "regional_network_issue" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "RegionalNetworkIssueSuppression"
|
||||
|
||||
match_labels = {
|
||||
region = "us-west-2"
|
||||
issue_type = "network"
|
||||
affected_services = "external_dependencies"
|
||||
incident_id = "NET-456"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Resource reference
|
||||
|
||||
### `grafana_asserts_suppressed_assertions_config`
|
||||
|
||||
Manage Knowledge Graph suppressed assertions configurations through the Grafana API.
|
||||
|
||||
#### Arguments
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| -------------- | ------------- | -------- | ------------------------------------------------------------------------------------------------------------------ |
|
||||
| `name` | `string` | Yes | The name of the suppressed assertions configuration. This field is immutable and forces recreation if changed. |
|
||||
| `match_labels` | `map(string)` | No | Labels to match for this suppressed assertions configuration. Used to determine which alerts should be suppressed. |
|
||||
|
||||
#### Example
|
||||
|
||||
```terraform
|
||||
resource "grafana_asserts_suppressed_assertions_config" "example" {
|
||||
provider = grafana.asserts
|
||||
|
||||
name = "ExampleSuppression"
|
||||
|
||||
match_labels = {
|
||||
alertname = "TestAlert"
|
||||
env = "development"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best practices
|
||||
|
||||
### Suppression strategy
|
||||
|
||||
- Use suppression rules for temporary situations rather than permanent solutions
|
||||
- Document the reason for suppression in your Terraform configuration comments
|
||||
- Set expiration dates or reminders to review suppression rules
|
||||
- Prefer fixing alert thresholds over suppressing recurring false positives
|
||||
|
||||
### Label match rules
|
||||
|
||||
- Be specific with match labels to avoid suppressing unintended alerts
|
||||
- Test suppression rules in non-production environments first
|
||||
- Use descriptive names that indicate the purpose and scope of the suppression
|
||||
- Include relevant context in labels (for example, incident IDs, maintenance windows)
|
||||
|
||||
### Lifecycle management
|
||||
|
||||
- Regularly review active suppression rules to ensure they're still needed
|
||||
- Remove or update suppression rules after maintenance windows or deployments
|
||||
- Use version control to track when suppression rules were added and why
|
||||
- Consider using time-based automation to enable or disable suppression rules
|
||||
|
||||
## Validation
|
||||
|
||||
After applying the Terraform configuration, verify that:
|
||||
|
||||
- Suppressed assertions configurations are active in your Knowledge Graph instance
|
||||
- Configurations appear in the Knowledge Graph UI under **Observability > Rules > Suppress**
|
||||
- Matching alerts are properly suppressed
|
||||
- Suppression rules don't affect unintended alerts
|
||||
|
||||
## Related documentation
|
||||
|
||||
- [Suppress insights in Knowledge Graph](/docs/grafana-cloud/knowledge-graph/troubleshoot-infra-apps/suppress-insights/)
|
||||
- [Get started with Terraform for Knowledge Graph](../getting-started/)
|
||||
- [Configure notifications](/docs/grafana-cloud/knowledge-graph/configure/notifications/)
|
||||
-355
@@ -1,355 +0,0 @@
|
||||
---
|
||||
description: Configure thresholds for Knowledge Graph using Terraform
|
||||
menuTitle: Thresholds
|
||||
title: Configure thresholds using Terraform
|
||||
weight: 600
|
||||
keywords:
|
||||
- Terraform
|
||||
- Knowledge Graph
|
||||
- Thresholds
|
||||
- Request Thresholds
|
||||
- Resource Thresholds
|
||||
- Health Thresholds
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/terraform-knowledge-graph/thresholds/
|
||||
---
|
||||
|
||||
# Configure thresholds using Terraform
|
||||
|
||||
Threshold configurations in [Knowledge Graph](/docs/grafana-cloud/knowledge-graph/) allow you to define custom thresholds for request, resource, and health assertions. These configurations help you set specific limits and conditions for monitoring your services and infrastructure.
|
||||
|
||||
For information about managing thresholds in the Knowledge Graph UI, refer to [Manage thresholds](/docs/grafana-cloud/knowledge-graph/configure/manage-thresholds/).
|
||||
|
||||
## Basic threshold configuration
|
||||
|
||||
Create a file named `thresholds.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
# Basic threshold configuration with all three types
|
||||
resource "grafana_asserts_thresholds" "basic" {
|
||||
provider = grafana.asserts
|
||||
|
||||
request_thresholds = [{
|
||||
entity_name = "payment-service"
|
||||
assertion_name = "ErrorRatioBreach"
|
||||
request_type = "inbound"
|
||||
request_context = "/charge"
|
||||
value = 0.01
|
||||
}]
|
||||
|
||||
resource_thresholds = [{
|
||||
assertion_name = "Saturation"
|
||||
resource_type = "container"
|
||||
container_name = "worker"
|
||||
source = "metrics"
|
||||
severity = "warning"
|
||||
value = 75
|
||||
}]
|
||||
|
||||
health_thresholds = [{
|
||||
assertion_name = "ServiceDown"
|
||||
expression = "up < 1"
|
||||
entity_type = "Service"
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
## Request threshold configurations
|
||||
|
||||
Configure thresholds for different service request types and contexts:
|
||||
|
||||
```terraform
|
||||
# Multiple request thresholds for different services
|
||||
resource "grafana_asserts_thresholds" "request_thresholds" {
|
||||
provider = grafana.asserts
|
||||
|
||||
request_thresholds = [
|
||||
{
|
||||
entity_name = "api-service"
|
||||
assertion_name = "ErrorRatioBreach"
|
||||
request_type = "inbound"
|
||||
request_context = "/api/v1/users"
|
||||
value = 0.02
|
||||
},
|
||||
{
|
||||
entity_name = "api-service"
|
||||
assertion_name = "LatencyP99ErrorBuildup"
|
||||
request_type = "inbound"
|
||||
request_context = "/api/v1/orders"
|
||||
value = 500
|
||||
},
|
||||
{
|
||||
entity_name = "payment-gateway"
|
||||
assertion_name = "RequestRateAnomaly"
|
||||
request_type = "outbound"
|
||||
request_context = "/payment/process"
|
||||
value = 1000
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Resource threshold configurations
|
||||
|
||||
Define resource thresholds for different severity levels:
|
||||
|
||||
```terraform
|
||||
# Resource thresholds for different severity levels
|
||||
resource "grafana_asserts_thresholds" "resource_thresholds" {
|
||||
provider = grafana.asserts
|
||||
|
||||
resource_thresholds = [
|
||||
{
|
||||
assertion_name = "Saturation"
|
||||
resource_type = "container"
|
||||
container_name = "web-server"
|
||||
source = "metrics"
|
||||
severity = "warning"
|
||||
value = 75
|
||||
},
|
||||
{
|
||||
assertion_name = "Saturation"
|
||||
resource_type = "container"
|
||||
container_name = "web-server"
|
||||
source = "metrics"
|
||||
severity = "critical"
|
||||
value = 90
|
||||
},
|
||||
{
|
||||
assertion_name = "ResourceRateBreach"
|
||||
resource_type = "Pod"
|
||||
container_name = "database"
|
||||
source = "logs"
|
||||
severity = "warning"
|
||||
value = 80
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Health threshold configurations
|
||||
|
||||
Configure health checks with Prometheus expressions:
|
||||
|
||||
```terraform
|
||||
# Health thresholds with Prometheus expressions
|
||||
resource "grafana_asserts_thresholds" "health_thresholds" {
|
||||
provider = grafana.asserts
|
||||
|
||||
health_thresholds = [
|
||||
{
|
||||
assertion_name = "ServiceDown"
|
||||
expression = "up{job=\"api-service\"} < 1"
|
||||
entity_type = "Service"
|
||||
},
|
||||
{
|
||||
assertion_name = "HighMemoryUsage"
|
||||
expression = "memory_usage_percent > 85"
|
||||
entity_type = "Service"
|
||||
},
|
||||
{
|
||||
assertion_name = "DatabaseConnectivity"
|
||||
expression = "db_connection_pool_active / db_connection_pool_max > 0.9"
|
||||
entity_type = "Service"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Comprehensive threshold configuration
|
||||
|
||||
Define comprehensive thresholds for production environments:
|
||||
|
||||
```terraform
|
||||
# Production environment with comprehensive thresholds
|
||||
resource "grafana_asserts_thresholds" "production" {
|
||||
provider = grafana.asserts
|
||||
|
||||
request_thresholds = [
|
||||
{
|
||||
entity_name = "frontend"
|
||||
assertion_name = "ErrorRatioBreach"
|
||||
request_type = "inbound"
|
||||
request_context = "/"
|
||||
value = 0.005
|
||||
},
|
||||
{
|
||||
entity_name = "backend-api"
|
||||
assertion_name = "LatencyP99ErrorBuildup"
|
||||
request_type = "inbound"
|
||||
request_context = "/api"
|
||||
value = 200
|
||||
}
|
||||
]
|
||||
|
||||
resource_thresholds = [
|
||||
{
|
||||
assertion_name = "Saturation"
|
||||
resource_type = "container"
|
||||
container_name = "frontend"
|
||||
source = "metrics"
|
||||
severity = "warning"
|
||||
value = 70
|
||||
},
|
||||
{
|
||||
assertion_name = "Saturation"
|
||||
resource_type = "container"
|
||||
container_name = "backend-api"
|
||||
source = "metrics"
|
||||
severity = "critical"
|
||||
value = 85
|
||||
}
|
||||
]
|
||||
|
||||
health_thresholds = [
|
||||
{
|
||||
assertion_name = "ServiceDown"
|
||||
expression = "up < 1"
|
||||
entity_type = "Service"
|
||||
},
|
||||
{
|
||||
assertion_name = "NodeDown"
|
||||
expression = "up{job=\"node-exporter\"} < 1"
|
||||
entity_type = "Service"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Resource reference
|
||||
|
||||
### `grafana_asserts_thresholds`
|
||||
|
||||
Manage Knowledge Graph threshold configurations through the Grafana API. This resource allows you to define custom thresholds for request, resource, and health assertions.
|
||||
|
||||
#### Arguments
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| --------------------- | -------------- | -------- | ------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `request_thresholds` | `list(object)` | No | List of request threshold configurations. Refer to [request thresholds block](#request-thresholds-block) for details. |
|
||||
| `resource_thresholds` | `list(object)` | No | List of resource threshold configurations. Refer to [resource thresholds block](#resource-thresholds-block) for details. |
|
||||
| `health_thresholds` | `list(object)` | No | List of health threshold configurations. Refer to [health thresholds block](#health-thresholds-block) for details. |
|
||||
|
||||
#### Request thresholds block
|
||||
|
||||
Each `request_thresholds` block supports the following:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ----------------- | -------- | -------- | ------------------------------------------------------ |
|
||||
| `entity_name` | `string` | Yes | The name of the entity to apply the threshold to. |
|
||||
| `assertion_name` | `string` | Yes | The name of the assertion to configure. |
|
||||
| `request_type` | `string` | Yes | The type of request (inbound, outbound). |
|
||||
| `request_context` | `string` | Yes | The request context or path to apply the threshold to. |
|
||||
| `value` | `number` | Yes | The threshold value. |
|
||||
|
||||
#### Resource thresholds block
|
||||
|
||||
Each `resource_thresholds` block supports the following:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ---------------- | -------- | -------- | ---------------------------------------------------- |
|
||||
| `assertion_name` | `string` | Yes | The name of the assertion to configure. |
|
||||
| `resource_type` | `string` | Yes | The type of resource (container, Pod, node). |
|
||||
| `container_name` | `string` | Yes | The name of the container to apply the threshold to. |
|
||||
| `source` | `string` | Yes | The source of the metrics (metrics, logs). |
|
||||
| `severity` | `string` | Yes | The severity level (warning, critical). |
|
||||
| `value` | `number` | Yes | The threshold value. |
|
||||
|
||||
#### Health thresholds block
|
||||
|
||||
Each `health_thresholds` block supports the following:
|
||||
|
||||
| Name | Type | Required | Description |
|
||||
| ---------------- | -------- | -------- | ------------------------------------------------------------------------------------ |
|
||||
| `assertion_name` | `string` | Yes | The name of the assertion to configure. |
|
||||
| `expression` | `string` | Yes | The Prometheus expression for the health check. |
|
||||
| `entity_type` | `string` | Yes | Entity type for the health threshold (for example, Service, Pod, Namespace, Volume). |
|
||||
| `alert_category` | `string` | No | Optional alert category label for the health threshold. |
|
||||
|
||||
#### Example
|
||||
|
||||
```terraform
|
||||
resource "grafana_asserts_thresholds" "example" {
|
||||
provider = grafana.asserts
|
||||
|
||||
request_thresholds = [{
|
||||
entity_name = "api-service"
|
||||
assertion_name = "ErrorRatioBreach"
|
||||
request_type = "inbound"
|
||||
request_context = "/api/v1/users"
|
||||
value = 0.02
|
||||
}]
|
||||
|
||||
resource_thresholds = [{
|
||||
assertion_name = "Saturation"
|
||||
resource_type = "container"
|
||||
container_name = "web-server"
|
||||
source = "metrics"
|
||||
severity = "warning"
|
||||
value = 75
|
||||
}]
|
||||
|
||||
health_thresholds = [{
|
||||
assertion_name = "ServiceDown"
|
||||
expression = "up{job=\"api-service\"} < 1"
|
||||
entity_type = "Service"
|
||||
}]
|
||||
}
|
||||
```
|
||||
|
||||
## Best practices
|
||||
|
||||
### Threshold configuration management
|
||||
|
||||
- Set appropriate threshold values based on your service level objectives (SLOs)
|
||||
- Use different severity levels (warning, critical) to create escalation paths
|
||||
- Test threshold configurations in non-production environments first
|
||||
- Monitor threshold effectiveness and adjust values based on actual performance data
|
||||
|
||||
### Request threshold best practices
|
||||
|
||||
- Configure request thresholds for critical user-facing endpoints
|
||||
- Set different thresholds for different request types (inbound vs outbound)
|
||||
- Consider request context when setting thresholds for specific API paths
|
||||
- Use error ratio thresholds to catch service degradation early
|
||||
- Review historical performance data to set realistic threshold values
|
||||
|
||||
### Resource threshold best practices
|
||||
|
||||
- Set resource thresholds based on your infrastructure capacity
|
||||
- Use container-specific thresholds for microservices architectures
|
||||
- Configure both warning and critical thresholds for gradual escalation
|
||||
- Monitor resource utilization patterns to set realistic threshold values
|
||||
- Consider seasonal or periodic patterns in resource usage
|
||||
|
||||
### Health threshold best practices
|
||||
|
||||
- Use Prometheus expressions that accurately reflect service health
|
||||
- Test health check expressions independently before applying them
|
||||
- Set up health thresholds for critical dependencies and external services
|
||||
- Use composite expressions for complex health checks
|
||||
- Ensure expressions perform efficiently without causing excessive load
|
||||
|
||||
### Value selection guidelines
|
||||
|
||||
- Start conservative and adjust based on real-world performance
|
||||
- Use percentages (0-1 range) for ratio-based metrics
|
||||
- Use milliseconds for latency thresholds
|
||||
- Document the reasoning behind specific threshold values
|
||||
- Review and update thresholds regularly based on system evolution
|
||||
|
||||
## Validation
|
||||
|
||||
After applying the Terraform configuration, verify that:
|
||||
|
||||
- Threshold configurations are applied in your Knowledge Graph instance
|
||||
- Configurations appear in the Knowledge Graph UI under **Observability > Rules > Threshold**
|
||||
- Request thresholds correctly identify breaches for specified services
|
||||
- Resource thresholds trigger at appropriate severity levels
|
||||
- Health thresholds accurately reflect service status
|
||||
- Threshold values align with your SLO commitments
|
||||
|
||||
## Related documentation
|
||||
|
||||
- [Manage thresholds in Knowledge Graph](/docs/grafana-cloud/knowledge-graph/configure/manage-thresholds/)
|
||||
- [Get started with Terraform for Knowledge Graph](../getting-started/)
|
||||
- [Configure alerts in Knowledge Graph](/docs/grafana-cloud/knowledge-graph/configure/alerts/)
|
||||
@@ -1,357 +0,0 @@
|
||||
---
|
||||
description: Learn how to create Grafana IRM integrations, escalation policies, and on-call schedules in Grafana Cloud using Terraform
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Terraform
|
||||
- Grafana Cloud IRM
|
||||
- OnCall
|
||||
title: Manage Grafana IRM in Grafana Cloud using Terraform
|
||||
weight: 120
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/terraform-oncall/
|
||||
---
|
||||
|
||||
# Manage Grafana IRM in Grafana Cloud using Terraform
|
||||
|
||||
Learn how to use Terraform to manage [Grafana IRM](https://grafana.com/docs/grafana-cloud/alerting-and-irm/irm/) resources.
|
||||
This guide shows you how to connect an integration, configure escalation policies, and add on-call schedules using Terraform.
|
||||
|
||||
To illustrate the use of IRM across multiple teams, this guide features examples with two teams: `Devs` and `SREs`.
|
||||
Additionally, it includes the necessary steps to configure Slack for IRM.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Grafana IRM supports Terraform-based configuration for a limited set of resources, primarily those related to OnCall functionality.
|
||||
These resources use the `grafana_oncall_` naming convention in Terraform. Additional IRM components are not yet configurable via Terraform.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before you begin, you should have the following:
|
||||
|
||||
- A Grafana Cloud account, as shown in [Get started](https://grafana.com/docs/grafana-cloud/get-started/)
|
||||
- [Terraform](https://www.terraform.io/downloads) installed on your machine
|
||||
- Administrator permissions in your Grafana instance
|
||||
- (Optional) Administrator permissions in your Slack workspace, if you plan to integrate Slack with Grafana IRM
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
All of the following Terraform configuration files should be saved in the same directory.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Connect Slack to Grafana IRM
|
||||
|
||||
Before including Slack settings in your Terraform setup, you must first configure the Slack integration with Grafana IRM.
|
||||
|
||||
To connect your Slack workspace to Grafana IRM, refer to the [Slack integration for Grafana IRM](https://grafana.com/docs/grafana-cloud/alerting-and-irm/irm/configure/integrations/irm-slack/) documentation.
|
||||
|
||||
## Configure the Grafana provider
|
||||
|
||||
This Terraform configuration sets up the [Grafana provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs) to provide necessary authentication when managing resources for Grafana IRM.
|
||||
|
||||
You can reuse a similar setup to the one described in [Creating and managing a Grafana Cloud stack using Terraform](../terraform-cloud-stack/) to set up a service account and a token.
|
||||
|
||||
1. Create a Service account and token in Grafana. To create a new one, refer to [Service account tokens](https://grafana.com/docs/grafana/latest/administration/service-accounts/#service-account-tokens).
|
||||
|
||||
1. Create a file named `main.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
version = ">= 3.15.3"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "grafana" {
|
||||
alias = "oncall"
|
||||
|
||||
url = "<Stack-URL>"
|
||||
auth = "<Service-account-token>"
|
||||
oncall_url = "<OnCall-URL>"
|
||||
}
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<Stack-URL>` with the URL of your Grafana stack
|
||||
- `<Service-account-token>` with the service account token that you created
|
||||
- `<OnCall-URL>` with the API URL found on the **Admin & API** tab of the IRM **Settings** page
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
If the service account has the right permissions, this provider setup also allows you to manage other Grafana resources.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Authentication via OnCall API tokens (deprecated)
|
||||
|
||||
OnCall API tokens are being deprecated.
|
||||
While existing tokens will continue to work, we recommend using
|
||||
[Grafana Cloud service account tokens](https://grafana.com/docs/grafana-cloud/security-and-account-management/authentication-and-permissions/service-accounts/) for all new API authentication.
|
||||
|
||||
{{< collapse title="Authentication via OnCall API tokens" >}}
|
||||
To use an existing OnCall API token:
|
||||
|
||||
1. Log into your Grafana Cloud instance
|
||||
1. Select **Alerts & IRM** > **IRM**
|
||||
1. Click **Settings**, and then select **Admin & API**
|
||||
1. Locate the **Grafana IRM API** section
|
||||
1. View, copy or revoke existing **OnCall API tokens**
|
||||
|
||||
1. Create a file named `main.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
version = ">= 2.9.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "grafana" {
|
||||
alias = "oncall"
|
||||
|
||||
oncall_access_token = "<OnCall-API-Token>"
|
||||
oncall_url = "<OnCall-URL>"
|
||||
}
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<OnCall-API-Token>` with your existing OnCall API Token
|
||||
- `<OnCall-URL>` with the API URL found on the **Admin & API** tab of the IRM **Settings** page
|
||||
{{< /collapse >}}
|
||||
|
||||
## Add on-call schedules
|
||||
|
||||
This Terraform configuration sets up two on-call schedules, `SREs` and `Devs`, using the [`grafana_oncall_schedule` resource](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/oncall_schedule) to define the schedules within Grafana IRM.
|
||||
Additionally, this configuration includes Slack channels to receive notifications for the on-call schedules of each team.
|
||||
|
||||
To learn more about managing on-call schedules, refer to the [On-call schedules documentation](https://grafana.com/docs/grafana-cloud/alerting-and-irm/irm/manage/on-call-schedules/).
|
||||
|
||||
1. Create two new calendars in your calendar service, one for `Devs` and one for `SREs`
|
||||
|
||||
1. Locate and save the secret iCal URLs.
|
||||
For example, in a Google calendar, these URLs can be found in **Settings > Settings for my calendars > Integrate calendar**
|
||||
|
||||
1. Create a file named `schedule.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
# Name of the Slack channel to notify about on-call schedules for Devs
|
||||
data "grafana_oncall_slack_channel" "Devs" {
|
||||
provider = grafana.oncall
|
||||
|
||||
name = "<Devs-channel-name>"
|
||||
}
|
||||
|
||||
# Name of the Slack channel to notify about on-call schedules for SREs
|
||||
data "grafana_oncall_slack_channel" "SREs" {
|
||||
provider = grafana.oncall
|
||||
|
||||
name = "<SREs-channel-name>"
|
||||
}
|
||||
|
||||
resource "grafana_oncall_schedule" "schedule_Devs" {
|
||||
provider = grafana.oncall
|
||||
|
||||
name = "Devs"
|
||||
type = "ical"
|
||||
ical_url_primary = "<secret-iCal-URL-for-devs-calendar>"
|
||||
slack {
|
||||
channel_id = data.grafana_oncall_slack_channel.Devs.slack_id
|
||||
}
|
||||
}
|
||||
|
||||
resource "grafana_oncall_schedule" "schedule_SREs" {
|
||||
provider = grafana.oncall
|
||||
|
||||
name = "SREs"
|
||||
type = "ical"
|
||||
ical_url_primary = "<secret-iCal-URL-for-SREs-calendar>"
|
||||
slack {
|
||||
channel_id = data.grafana_oncall_slack_channel.SREs.slack_id
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
1. Replace the following field values:
|
||||
- `<Devs-channel-name>` with name of the Slack channel to notify about on-call schedules for `Devs`
|
||||
- `<SREs-channel-name>` with name of the Slack channel to notify about on-call schedules for `SREs`
|
||||
- `<secret-iCal-URL-for-devs-calendar>` with the secret iCal URL created in the first step for `Devs` Calendar
|
||||
- `<secret-iCal-URL-for-SREs-calendar>` with the secret iCal URL created in the first step for `SREs` Calendar
|
||||
|
||||
## Add escalation chains
|
||||
|
||||
This Terraform configuration creates two escalation chains named `SREs` and `Devs` in Grafana IRM using the [`grafana_oncall_escalation_chain` (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/oncall_escalation_chain).
|
||||
The configuration also adds the following three steps to each escalation chain using the [`grafana_oncall_escalation` (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/oncall_escalation):
|
||||
|
||||
- Notify users from on-call schedule
|
||||
- Wait for 5 minutes
|
||||
- Notify default Slack channel
|
||||
|
||||
1. Create a file named `escalation-devs.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
resource "grafana_oncall_escalation_chain" "Devs" {
|
||||
provider = grafana.oncall
|
||||
|
||||
name = "Devs"
|
||||
}
|
||||
|
||||
// Notify users from on-call schedule
|
||||
resource "grafana_oncall_escalation" "notify_schedule_step_Devs" {
|
||||
provider = grafana.oncall
|
||||
|
||||
escalation_chain_id = grafana_oncall_escalation_chain.Devs.id
|
||||
type = "notify_on_call_from_schedule"
|
||||
notify_on_call_from_schedule = grafana_oncall_schedule.schedule_Devs.id
|
||||
position = 0
|
||||
}
|
||||
|
||||
// Wait step for 5 Minutes
|
||||
resource "grafana_oncall_escalation" "wait_step_Devs" {
|
||||
provider = grafana.oncall
|
||||
|
||||
escalation_chain_id = grafana_oncall_escalation_chain.Devs.id
|
||||
type = "wait"
|
||||
duration = 300
|
||||
position = 1
|
||||
}
|
||||
|
||||
// Notify default Slack channel step
|
||||
resource "grafana_oncall_escalation" "notify_step_Devs" {
|
||||
provider = grafana.oncall
|
||||
|
||||
escalation_chain_id = grafana_oncall_escalation_chain.Devs.id
|
||||
type = "notify_whole_channel"
|
||||
important = true
|
||||
position = 2
|
||||
}
|
||||
```
|
||||
|
||||
2. Create a file named `escalation-sre.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
resource "grafana_oncall_escalation_chain" "SREs" {
|
||||
provider = grafana.oncall
|
||||
|
||||
name = "SREs"
|
||||
}
|
||||
|
||||
// Notify users from on-call schedule
|
||||
resource "grafana_oncall_escalation" "notify_schedule_step_SREs" {
|
||||
provider = grafana.oncall
|
||||
|
||||
escalation_chain_id = grafana_oncall_escalation_chain.SREs.id
|
||||
type = "notify_on_call_from_schedule"
|
||||
notify_on_call_from_schedule = grafana_oncall_schedule.schedule_SREs.id
|
||||
position = 0
|
||||
}
|
||||
|
||||
// Wait step for 5 Minutes
|
||||
resource "grafana_oncall_escalation" "wait_step_SREs" {
|
||||
provider = grafana.oncall
|
||||
|
||||
escalation_chain_id = grafana_oncall_escalation_chain.SREs.id
|
||||
type = "wait"
|
||||
duration = 300
|
||||
position = 1
|
||||
}
|
||||
|
||||
// Notify default Slack channel step
|
||||
resource "grafana_oncall_escalation" "notify_step_SREs" {
|
||||
provider = grafana.oncall
|
||||
|
||||
escalation_chain_id = grafana_oncall_escalation_chain.SREs.id
|
||||
type = "notify_whole_channel"
|
||||
important = true
|
||||
position = 2
|
||||
}
|
||||
```
|
||||
|
||||
## Connect an integration to Grafana IRM
|
||||
|
||||
This Terraform configuration connects Alertmanager to Grafana IRM using the [`grafana_oncall_integration` (Resource)](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/oncall_integration).
|
||||
It also adds the `Devs` escalation chain as the default route for alerts.
|
||||
|
||||
1. Create a file named `integrations.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
resource "grafana_oncall_integration" "AlertManager" {
|
||||
provider = grafana.oncall
|
||||
|
||||
name = "AlertManager"
|
||||
type = "alertmanager"
|
||||
default_route {
|
||||
escalation_chain_id = grafana_oncall_escalation_chain.Devs.id
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
1. To configure Alertmanager, refer to [Alertmanager integration for Grafana OnCall](https://grafana.com/docs/grafana-cloud/alerting-and-irm/oncall/integrations/alertmanager/)
|
||||
|
||||
## Set up a route to configure escalation behavior for alert group notifications
|
||||
|
||||
This Terraform configuration sets up a route to the Alertmanager integration using the `grafana_oncall_route` (Resource).
|
||||
This route ensures that notifications for alerts with `\"namespace\" *: *\"ops-.*\"` in the payload are escalated to the `SREs` escalation chain.
|
||||
|
||||
Create a file named `routes.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
resource "grafana_oncall_route" "route_SREs" {
|
||||
provider = grafana.oncall
|
||||
|
||||
integration_id = grafana_oncall_integration.AlertManager.id
|
||||
escalation_chain_id = grafana_oncall_escalation_chain.SREs.id
|
||||
routing_regex = "\"namespace\" *: *\"ops-.*\""
|
||||
position = 0
|
||||
}
|
||||
```
|
||||
|
||||
## Apply the Terraform configuration
|
||||
|
||||
In a terminal, run the following commands from the directory where all of the configuration files are located.
|
||||
|
||||
1. Initialize a working directory containing Terraform configuration files.
|
||||
|
||||
```shell
|
||||
terraform init
|
||||
```
|
||||
|
||||
1. Preview the changes that Terraform will make.
|
||||
|
||||
```shell
|
||||
terraform plan
|
||||
```
|
||||
|
||||
1. Apply the configuration files.
|
||||
|
||||
```shell
|
||||
terraform apply
|
||||
```
|
||||
|
||||
## Validation
|
||||
|
||||
After you apply the changes in the Terraform configurations, you can verify the following:
|
||||
|
||||
- Two new Schedules named `Devs` and `SREs` are created in Grafana IRM:
|
||||
|
||||

|
||||
|
||||
- New Escalation chain named `SREs` is created in Grafana IRM:
|
||||
|
||||

|
||||
|
||||
- New Escalation chain named `Devs` is created in Grafana IRM:
|
||||
|
||||

|
||||
|
||||
- The Alertmanager integration is added and configured with escalation policies:
|
||||
|
||||

|
||||
|
||||
## Conclusion
|
||||
|
||||
In this guide, you learned how to use Terraform to manage Grafana IRM by connecting an integration, configuring escalation policies, and setting up on-call schedules.
|
||||
|
||||
To learn more about managing Grafana Cloud using Terraform, refer to [Grafana provider's documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs).
|
||||
@@ -1,81 +0,0 @@
|
||||
---
|
||||
description: Learn how to install plugins in Grafana Cloud using Terraform
|
||||
keywords:
|
||||
- Infrastructure as Code
|
||||
- Quickstart
|
||||
- Grafana Cloud
|
||||
- Terraform
|
||||
- Plugins
|
||||
title: Install plugins in Grafana Cloud using Terraform
|
||||
weight: 300
|
||||
canonical: https://grafana.com/docs/grafana/latest/as-code/infrastructure-as-code/terraform/terraform-plugins/
|
||||
---
|
||||
|
||||
# Install plugins in Grafana Cloud using Terraform
|
||||
|
||||
This guide shows you how to install plugins in Grafana Cloud using Terraform. For more information about Grafana plugins see [Find and use Grafana plugins](/docs/grafana-cloud/introduction/find-and-use-plugins/).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before you begin, you should have the following available:
|
||||
|
||||
- A Grafana Cloud account; for more information on setting up a Grafana Cloud account, refer to [Get started](https://grafana.com/docs/grafana-cloud/get-started/).
|
||||
- Terraform installed on your machine; for more information on how to install Terraform, refer to the [Terraform install documentation](https://developer.hashicorp.com/terraform/install).
|
||||
- Administrator permissions in your Grafana instance; for more information on assigning Grafana RBAC roles, refer to [Assign RBAC roles](/docs/grafana-cloud/security-and-account-management/authentication-and-permissions/access-control/assign-rbac-roles/).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
All of the following Terraform configuration files should be saved in the same directory.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Configure the Grafana provider
|
||||
|
||||
Use this Terraform configuration to set up the [Grafana provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs) to provide the authentication required to manage plugin resources.
|
||||
|
||||
1. Create a service account and token in Grafana. For more information on creating a service account and token, refer to [Service account tokens](https://grafana.com/docs/grafana/latest/administration/service-accounts/#service-account-tokens). You can also refer to [Creating and managing a Grafana Cloud stack using Terraform](../terraform-cloud-stack/) to set up a service account and a token.
|
||||
|
||||
1. Make sure that the token has the following permissions:
|
||||
|
||||
- `stack-plugins:read`
|
||||
- `stack-plugins:write`
|
||||
- `stack-plugins:delete`
|
||||
|
||||
Next, create a file named `main.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
version = ">= 4.5.3"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "grafana" {
|
||||
cloud_api_url = "<Stack-URL>"
|
||||
cloud_access_policy_token = "<Service-account-token>"
|
||||
}
|
||||
```
|
||||
|
||||
Replace the following field values:
|
||||
|
||||
- `Stack-URL` with the URL of your Grafana stack, for example `https://my-stack.grafana.net/`
|
||||
- `Service-account-token` with the service account token that you created
|
||||
|
||||
## Create new plugin resource
|
||||
|
||||
Create a file named `plugins.tf` and add the following:
|
||||
|
||||
```terraform
|
||||
resource "grafana_cloud_plugin_installation" "grafana-clock-panel" {
|
||||
stack_slug = "<Your-Stack-Slug>"
|
||||
slug = "grafana-clock-panel"
|
||||
version = "latest"
|
||||
}
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
In this guide, you learned how to install a plugin in Grafana Cloud using Terraform.
|
||||
|
||||
To learn more about plugin installation, refer to [Grafana provider's documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/cloud_plugin_installation).
|
||||
@@ -122,18 +122,18 @@ Dashboards and panels allow you to show your data in visual form. Each panel nee
|
||||
|
||||
For more information about data sources, refer to [Data sources](ref:data-sources) for specific guidelines.
|
||||
|
||||
1. To create a query, do one of the following:
|
||||
1. To add a query, do one of the following:
|
||||
- Write or construct a query in the query language of your data source.
|
||||
- Click **Replace with saved query** to reuse a [saved query](ref:saved-queries).
|
||||
- Click **+ Add from saved queries** to add a previously saved query.
|
||||
- If you've already written a query, you can click the **Replace with saved query** icon to use a previously saved query instead.
|
||||
|
||||
1. (Optional) To [save the query](ref:save-query) for reuse, click the **Save query** button (or icon).
|
||||
1. Click **Refresh** to query the data source.
|
||||
1. (Optional) To add subsequent queries, click **+ Add query** or **+ Add from saved queries**, and refresh the data source as many times as needed.
|
||||
1. (Optional) To [save the query](ref:save-query) for reuse, click the **Save query** icon.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
[Saved queries](ref:saved-queries) is currently in [public preview](https://grafana.com/docs/release-life-cycle/) in Grafana Enterprise and Grafana Cloud only.
|
||||
[Saved queries](ref:saved-queries) is in [public preview](https://grafana.com/docs/release-life-cycle/) in Grafana Enterprise and Cloud only.
|
||||
{{< /admonition >}}
|
||||
|
||||
1. Click **Refresh** to query the data source.
|
||||
1. In the visualization list, select a visualization type.
|
||||
|
||||

|
||||
|
||||
@@ -44,7 +44,7 @@ The options in this section control the branding and theming of the report attac
|
||||
- **Company logo** - Company logo displayed in the report PDF.
|
||||
Configure it by specifying a URL or uploading a file.
|
||||
The maximum file size is 16 MB.
|
||||
If not set, defaults to the Grafana logo. If the specified URL isn't valid, the logo image appears as broken.
|
||||
Defaults to the Grafana logo.
|
||||
|
||||
- **Theme** - Theme of the PDF attached to the report.
|
||||
The selected theme is also applied to the PDFs generated when you click **Preview PDF** during report creation or select the **Export as PDF** option on a dashboard.
|
||||
@@ -64,7 +64,7 @@ The options in this section control the branding and theming of the report attac
|
||||
|
||||
<!-- vale Grafana.WordList = YES -->
|
||||
|
||||
- **Company logo** - Company logo displayed in the report email. Configure it by specifying a URL or uploading a file. The maximum file size is 16 MB. If not set, defaults to the Grafana logo. If the specified URL isn't valid, the logo image appears as broken.
|
||||
- **Company logo** - Company logo displayed in the report email. Configure it by specifying a URL or uploading a file. The maximum file size is 16 MB. Defaults to the Grafana logo.
|
||||
- **Email footer** - Toggle to enable the report email footer. Select **Sent by** or **None**.
|
||||
- **Footer link text** - Text of the link in the report email footer. Defaults to `Grafana`.
|
||||
- **Footer link URL** - Link of the report email footer.
|
||||
|
||||
@@ -62,9 +62,9 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-CloudWatch/aws-authentication/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
configure-pdc:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
|
||||
@@ -3,6 +3,7 @@ aliases:
|
||||
- ../data-sources/azure-monitor/
|
||||
- ../features/datasources/azuremonitor/
|
||||
- azuremonitor/
|
||||
- azuremonitor/deprecated-application-insights/
|
||||
description: Guide for using Azure Monitor in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -22,7 +23,6 @@ labels:
|
||||
menuTitle: Azure Monitor
|
||||
title: Azure Monitor data source
|
||||
weight: 300
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
configure-grafana-feature-toggles:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -49,11 +49,6 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
transform-data:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
|
||||
configure-grafana-azure:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
|
||||
@@ -68,98 +63,295 @@ refs:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-access/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
query-editor-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
template-variables-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
alerting-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
troubleshooting-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
annotations-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-access/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
---
|
||||
|
||||
# Azure Monitor data source
|
||||
|
||||
The Azure Monitor data source plugin allows you to query and visualize data from Azure Monitor, the Azure service to maximize the availability and performance of applications and services in the Azure Cloud.
|
||||
Grafana ships with built-in support for Azure Monitor, the Azure service to maximize the availability and performance of applications and services in the Azure Cloud.
|
||||
This topic explains configuring and querying specific to the Azure Monitor data source.
|
||||
|
||||
## Supported Azure clouds
|
||||
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:data-source-management).
|
||||
Only users with the organization administrator role can add data sources.
|
||||
|
||||
The Azure Monitor data source supports the following Azure cloud environments:
|
||||
Once you've added the Azure Monitor data source, you can [configure it](#configure-the-data-source) so that your Grafana instance's users can create queries in its [query editor](query-editor/) when they [build dashboards](ref:build-dashboards) and use [Explore](ref:explore).
|
||||
|
||||
- **Azure** - Azure public cloud (default)
|
||||
- **Azure US Government** - Azure Government cloud
|
||||
- **Azure China** - Azure China cloud operated by 21Vianet
|
||||
The Azure Monitor data source supports visualizing data from four Azure services:
|
||||
|
||||
## Supported Azure services
|
||||
- **Azure Monitor Metrics:** Collect numeric data from resources in your Azure account.
|
||||
- **Azure Monitor Logs:** Collect log and performance data from your Azure account, and query using the Kusto Query Language (KQL).
|
||||
- **Azure Resource Graph:** Query your Azure resources across subscriptions.
|
||||
- **Azure Monitor Application Insights:** Collect trace logging data and other application performance metrics.
|
||||
|
||||
The Azure Monitor data source supports the following Azure services:
|
||||
## Configure the data source
|
||||
|
||||
| Service | Description |
|
||||
| ------------------------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Azure Monitor Metrics** | Collect numeric data from resources in your Azure account. Supports dimensions, aggregations, and time grain configuration. |
|
||||
| **Azure Monitor Logs** | Collect log and performance data from your Azure account using the Kusto Query Language (KQL). |
|
||||
| **Azure Resource Graph** | Query your Azure resources across subscriptions using KQL. Useful for inventory, compliance, and resource management. |
|
||||
| **Application Insights Traces** | Collect distributed trace data and correlate requests across your application components. |
|
||||
**To access the data source configuration page:**
|
||||
|
||||
## Get started
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Under Your connections, click **Data sources**.
|
||||
1. Enter `Azure Monitor` in the search bar.
|
||||
1. Click **Azure Monitor**.
|
||||
|
||||
The following documents will help you get started with the Azure Monitor data source:
|
||||
The **Settings** tab of the data source is displayed.
|
||||
|
||||
- [Configure the Azure Monitor data source](ref:configure-azure-monitor) - Set up authentication and connect to Azure
|
||||
- [Azure Monitor query editor](ref:query-editor-azure-monitor) - Create and edit queries for Metrics, Logs, Traces, and Resource Graph
|
||||
- [Template variables](ref:template-variables-azure-monitor) - Create dynamic dashboards with Azure Monitor variables
|
||||
- [Alerting](ref:alerting-azure-monitor) - Create alert rules using Azure Monitor data
|
||||
- [Troubleshooting](ref:troubleshooting-azure-monitor) - Solve common configuration and query errors
|
||||
### Configure Azure Active Directory (AD) authentication
|
||||
|
||||
## Additional features
|
||||
You must create an app registration and service principal in Azure AD to authenticate the data source.
|
||||
For configuration details, refer to the [Azure documentation for service principals](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in).
|
||||
|
||||
After you have configured the Azure Monitor data source, you can:
|
||||
The app registration you create must have the `Reader` role assigned on the subscription.
|
||||
For more information, refer to [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
|
||||
|
||||
- Add [Annotations](ref:annotations-azure-monitor) to overlay Azure log events on your graphs.
|
||||
- Configure and use [Template variables](ref:template-variables-azure-monitor) for dynamic dashboards.
|
||||
- Add [Transformations](ref:transform-data) to manipulate query results.
|
||||
- Set up [Alerting](ref:alerting-azure-monitor) and recording rules using Metrics, Logs, Traces, and Resource Graph queries.
|
||||
- Use [Explore](ref:explore) to investigate your Azure data without building a dashboard.
|
||||
If you host Grafana in Azure, such as in App Service or Azure Virtual Machines, you can configure the Azure Monitor data source to use Managed Identity for secure authentication without entering credentials into Grafana.
|
||||
For details, refer to [Configuring using Managed Identity](#configuring-using-managed-identity).
|
||||
|
||||
## Pre-built dashboards
|
||||
You can configure the Azure Monitor data source to use Workload Identity for secure authentication without entering credentials into Grafana if you host Grafana in a Kubernetes environment, such as AKS, and require access to Azure resources.
|
||||
For details, refer to [Configuring using Workload Identity](#configuring-using-workload-identity).
|
||||
|
||||
The Azure Monitor plugin includes the following pre-built dashboards:
|
||||
| Name | Description |
|
||||
| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Enables Managed Identity. Selecting Managed Identity hides many of the other fields. For details, see [Configuring using Managed Identity](#configuring-using-managed-identity). |
|
||||
| **Azure Cloud** | Sets the national cloud for your Azure account. For most users, this is the default "Azure". For details, see the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/develop/authentication-national-cloud). |
|
||||
| **Directory (tenant) ID** | Sets the directory/tenant ID for the Azure AD app registration to use for authentication. For details, see the [Azure tenant and app ID docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in). |
|
||||
| **Application (client) ID** | Sets the application/client ID for the Azure AD app registration to use for authentication. |
|
||||
| **Client secret** | Sets the application client secret for the Azure AD app registration to use for authentication. For details, see the [Azure application secret docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret). |
|
||||
| **Default subscription** | _(Optional)_ Sets a default subscription for template variables to use. |
|
||||
| **Enable Basic Logs** | Allows this data source to execute queries against [Basic Logs tables](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/basic-logs-query?tabs=portal-1) in supported Log Analytics Workspaces. These queries may incur additional costs. |
|
||||
|
||||
- **Azure Monitor Overview** - Displays key metrics across your Azure subscriptions and resources.
|
||||
- **Azure Storage Account** - Shows storage account metrics including availability, latency, and transactions.
|
||||
### Provision the data source
|
||||
|
||||
To import a pre-built dashboard:
|
||||
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
|
||||
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
|
||||
1. Go to **Connections** > **Data sources**.
|
||||
1. Select your Azure Monitor data source.
|
||||
1. Click the **Dashboards** tab.
|
||||
1. Click **Import** next to the dashboard you want to use.
|
||||
#### Provisioning examples
|
||||
|
||||
## Related resources
|
||||
**Azure AD App Registration (client secret):**
|
||||
|
||||
- [Azure Monitor documentation](https://docs.microsoft.com/en-us/azure/azure-monitor/)
|
||||
- [Kusto Query Language (KQL) reference](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/)
|
||||
- [Grafana community forum](https://community.grafana.com/)
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: clientsecret
|
||||
cloudName: azuremonitor # See table below
|
||||
tenantId: <tenant-id>
|
||||
clientId: <client-id>
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
secureJsonData:
|
||||
clientSecret: <client-secret>
|
||||
version: 1
|
||||
```
|
||||
|
||||
**Managed Identity:**
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: msi
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
**Workload Identity:**
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: workloadidentity
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
**Current User:**
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The `oauthPassThru` property is required for current user authentication to function.
|
||||
Additionally, `disableGrafanaCache` is necessary to prevent the data source returning cached responses for resources users don't have access to.
|
||||
{{< /admonition >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: 1 # config file version
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: currentuser
|
||||
oauthPassThru: true
|
||||
disableGrafanaCache: true
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
#### Supported cloud names
|
||||
|
||||
| Azure Cloud | `cloudName` Value |
|
||||
| ------------------------------------ | -------------------------- |
|
||||
| **Microsoft Azure public cloud** | `azuremonitor` (_Default_) |
|
||||
| **Microsoft Chinese national cloud** | `chinaazuremonitor` |
|
||||
| **US Government cloud** | `govazuremonitor` |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Cloud names for current user authentication differ to the `cloudName` values in the preceding table.
|
||||
The public cloud name is `AzureCloud`, the Chinese national cloud name is `AzureChinaCloud`, and the US Government cloud name is `AzureUSGovernment`.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Configure Managed Identity
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Managed Identity is available only in [Azure Managed Grafana](https://azure.microsoft.com/en-us/products/managed-grafana) or Grafana OSS/Enterprise when deployed in Azure. It is not available in Grafana Cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
You can use managed identity to configure Azure Monitor in Grafana if you host Grafana in Azure (such as an App Service or with Azure Virtual Machines) and have managed identity enabled on your VM.
|
||||
This lets you securely authenticate data sources without manually configuring credentials via Azure AD App Registrations.
|
||||
For details on Azure managed identities, refer to the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
|
||||
|
||||
**To enable managed identity for Grafana:**
|
||||
|
||||
1. Set the `managed_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
```
|
||||
|
||||
2. In the Azure Monitor data source configuration, set **Authentication** to **Managed Identity**.
|
||||
|
||||
This hides the directory ID, application ID, and client secret fields, and the data source uses managed identity to authenticate to Azure Monitor Metrics and Logs, and Azure Resource Graph.
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-managed-identity-2.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Managed Identity authentication" >}}
|
||||
|
||||
3. You can set the `managed_identity_client_id` field in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure) to allow a user-assigned managed identity to be used instead of the default system-assigned identity.
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
managed_identity_client_id = USER_ASSIGNED_IDENTITY_CLIENT_ID
|
||||
```
|
||||
|
||||
### Configure Workload Identity
|
||||
|
||||
You can use workload identity to configure Azure Monitor in Grafana if you host Grafana in a Kubernetes environment, such as AKS, in conjunction with managed identities.
|
||||
This lets you securely authenticate data sources without manually configuring credentials via Azure AD App Registrations.
|
||||
For details on workload identity, refer to the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/).
|
||||
|
||||
**To enable workload identity for Grafana:**
|
||||
|
||||
1. Set the `workload_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
```
|
||||
|
||||
2. In the Azure Monitor data source configuration, set **Authentication** to **Workload Identity**.
|
||||
|
||||
This hides the directory ID, application ID, and client secret fields, and the data source uses workload identity to authenticate to Azure Monitor Metrics and Logs, and Azure Resource Graph.
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-workload-identity.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Workload Identity authentication" >}}
|
||||
|
||||
3. There are additional configuration variables that can control the authentication method.`workload_identity_tenant_id` represents the Azure AD tenant that contains the managed identity, `workload_identity_client_id` represents the client ID of the managed identity if it differs from the default client ID, `workload_identity_token_file` represents the path to the token file. Refer to the [documentation](https://azure.github.io/azure-workload-identity/docs/) for more information on what values these variables should use, if any.
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
workload_identity_tenant_id = IDENTITY_TENANT_ID
|
||||
workload_identity_client_id = IDENTITY_CLIENT_ID
|
||||
workload_identity_token_file = TOKEN_FILE_PATH
|
||||
```
|
||||
|
||||
### Configure Current User authentication
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Current user authentication is an [experimental feature](/docs/release-life-cycle). Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Contact Grafana Support to enable this feature in Grafana Cloud. Aspects of Grafana may not work as expected when using this authentication method.
|
||||
{{< /admonition >}}
|
||||
|
||||
If your Grafana instance is configured with Azure Entra (formerly Active Directory) authentication for login, this authentication method can be used to forward the currently logged in user's credentials to the data source. The users credentials will then be used when requesting data from the data source. For details on how to configure your Grafana instance using Azure Entra refer to the [documentation](ref:configure-grafana-azure-auth).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Additional configuration is required to ensure that the App Registration used to login a user via Azure provides an access token with the permissions required by the data source.
|
||||
|
||||
The App Registration must be configured to issue both **Access Tokens** and **ID Tokens**.
|
||||
|
||||
1. In the Azure Portal, open the App Registration that requires configuration.
|
||||
2. Select **Authentication** in the side menu.
|
||||
3. Under **Implicit grant and hybrid flows** check both the **Access tokens** and **ID tokens** boxes.
|
||||
4. Save the changes to ensure the App Registration is updated.
|
||||
|
||||
The App Registration must also be configured with additional **API Permissions** to provide authenticated users with access to the APIs utilised by the data source.
|
||||
|
||||
1. In the Azure Portal, open the App Registration that requires configuration.
|
||||
1. Select **API Permissions** in the side menu.
|
||||
1. Ensure the `openid`, `profile`, `email`, and `offline_access` permissions are present under the **Microsoft Graph** section. If not, they must be added.
|
||||
1. Select **Add a permission** and choose the following permissions. They must be added individually. Refer to the [Azure documentation](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-configure-app-access-web-apis) for more information.
|
||||
- Select **Azure Service Management** > **Delegated permissions** > `user_impersonation` > **Add permissions**
|
||||
- Select **APIs my organization uses** > Search for **Log Analytics API** and select it > **Delegated permissions** > `Date.Read` > **Add permissions**
|
||||
|
||||
Once all permissions have been added, the Azure authentication section in Grafana must be updated. The `scopes` section must be updated to include the `.default` scope to ensure that a token with access to all APIs declared on the App Registration is requested by Grafana. Once updated the scopes value should equal: `.default openid email profile`.
|
||||
{{< /admonition >}}
|
||||
|
||||
This method of authentication doesn't inherently support all backend functionality as a user's credentials won't be in scope.
|
||||
Affected functionality includes alerting, reporting, and recorded queries.
|
||||
In order to support backend queries when using a data source configured with current user authentication, you can configure service credentials.
|
||||
Also, note that query and resource caching is disabled by default for data sources using current user authentication.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To configure fallback service credentials the [feature toggle](ref:configure-grafana-feature-toggles) `idForwarding` must be set to `true` and `user_identity_fallback_credentials_enabled` must be enabled in the [Azure configuration section](ref:configure-grafana-azure) (enabled by default when `user_identity_enabled` is set to `true`).
|
||||
{{< /admonition >}}
|
||||
|
||||
Permissions for fallback credentials may need to be broad to appropriately support backend functionality.
|
||||
For example, an alerting query created by a user is dependent on their permissions.
|
||||
If a user tries to create an alert for a resource that the fallback credentials can't access, the alert will fail.
|
||||
|
||||
**To enable current user authentication for Grafana:**
|
||||
|
||||
1. Set the `user_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
|
||||
By default this will also enable fallback service credentials.
|
||||
If you want to disable service credentials at the instance level set `user_identity_fallback_credentials_enabled` to false.
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
user_identity_enabled = true
|
||||
```
|
||||
|
||||
1. In the Azure Monitor data source configuration, set **Authentication** to **Current User**.
|
||||
If fallback service credentials are enabled at the instance level, an additional configuration section is visible that you can use to enable or disable using service credentials for this data source.
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-current-user.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Current User authentication" >}}
|
||||
|
||||
1. If you want backend functionality to work with this data source, enable service credentials and configure the data source using the most applicable credentials for your circumstances.
|
||||
|
||||
## Query the data source
|
||||
|
||||
The Azure Monitor data source can query data from Azure Monitor Metrics and Logs, the Azure Resource Graph, and Application Insights Traces. Each source has its own specialized query editor.
|
||||
|
||||
For details, see the [query editor documentation](query-editor/).
|
||||
|
||||
## Use template variables
|
||||
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
|
||||
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For details, see the [template variables documentation](template-variables/).
|
||||
|
||||
## Application Insights and Insights Analytics (removed)
|
||||
|
||||
Until Grafana v8.0, you could query the same Azure Application Insights data using Application Insights and Insights Analytics.
|
||||
|
||||
These queries were deprecated in Grafana v7.5. In Grafana v8.0, Application Insights and Insights Analytics were made read-only in favor of querying this data through Metrics and Logs. These query methods were completely removed in Grafana v9.0.
|
||||
|
||||
If you're upgrading from a Grafana version prior to v9.0 and relied on Application Insights and Analytics queries, refer to the [Grafana v9.0 documentation](/docs/grafana/v9.0/datasources/azuremonitor/deprecated-application-insights/) for help migrating these queries to Metrics and Logs queries.
|
||||
|
||||
@@ -1,262 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/alerting/
|
||||
description: Set up alerts using Azure Monitor data in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- azure
|
||||
- monitor
|
||||
- alerting
|
||||
- alerts
|
||||
- metrics
|
||||
- logs
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Alerting
|
||||
title: Azure Monitor alerting
|
||||
weight: 500
|
||||
refs:
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
alerting-fundamentals:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/
|
||||
create-alert-rule:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
grafana-managed-recording-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
troubleshoot:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
---
|
||||
|
||||
# Azure Monitor alerting
|
||||
|
||||
The Azure Monitor data source supports [Grafana Alerting](ref:alerting) and [Grafana-managed recording rules](ref:grafana-managed-recording-rules), allowing you to create alert rules based on Azure metrics, logs, traces, and resource data. You can monitor your Azure environment and receive notifications when specific conditions are met.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have the appropriate permissions to create alert rules in Grafana.
|
||||
- Verify your Azure Monitor data source is configured and working correctly.
|
||||
- Familiarize yourself with [Grafana Alerting concepts](ref:alerting-fundamentals).
|
||||
- **Important**: Verify your data source uses a supported authentication method. Refer to [Authentication requirements](#authentication-requirements).
|
||||
|
||||
## Supported query types for alerting
|
||||
|
||||
All Azure Monitor query types support alerting and recording rules:
|
||||
|
||||
| Query type | Use case | Notes |
|
||||
| -------------------- | -------------------------------------------------- | -------------------------------------------------------- |
|
||||
| Metrics | Threshold-based alerts on Azure resource metrics | Best suited for alerting; returns time-series data |
|
||||
| Logs | Alert on log patterns, error counts, or thresholds | Use KQL to aggregate data into numeric values |
|
||||
| Azure Resource Graph | Alert on resource state or configuration changes | Use count aggregations to return numeric data |
|
||||
| Traces | Alert on trace data and application performance | Use aggregations to return numeric values for evaluation |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Alert queries must return numeric data that Grafana can evaluate against a threshold. Queries that return only text or non-numeric data cannot be used directly for alerting.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Authentication requirements
|
||||
|
||||
Alerting and recording rules run as background processes without a user context. This means they require service-level authentication and don't work with all authentication methods.
|
||||
|
||||
| Authentication method | Supported |
|
||||
| -------------------------------- | ------------------------------------- |
|
||||
| App Registration (client secret) | ✓ |
|
||||
| Managed Identity | ✓ |
|
||||
| Workload Identity | ✓ |
|
||||
| Current User | ✓ (with fallback service credentials) |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
If you use **Current User** authentication, you must configure **fallback service credentials** for alerting and recording rules to function. User credentials aren't available for background operations, so Grafana uses the fallback credentials instead. Refer to [configure the data source](ref:configure-azure-monitor) for details on setting up fallback credentials.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Create an alert rule
|
||||
|
||||
To create an alert rule using Azure Monitor data:
|
||||
|
||||
1. Go to **Alerting** > **Alert rules**.
|
||||
1. Click **New alert rule**.
|
||||
1. Enter a name for your alert rule.
|
||||
1. In the **Define query and alert condition** section:
|
||||
- Select your Azure Monitor data source.
|
||||
- Configure your query (for example, a Metrics query for CPU usage or a Logs query using KQL).
|
||||
- Add a **Reduce** expression if your query returns multiple series.
|
||||
- Add a **Threshold** expression to define the alert condition.
|
||||
1. Configure the **Set evaluation behavior**:
|
||||
- Select or create a folder and evaluation group.
|
||||
- Set the evaluation interval (how often the alert is checked).
|
||||
- Set the pending period (how long the condition must be true before firing).
|
||||
1. Add labels and annotations to provide context for notifications.
|
||||
1. Click **Save rule**.
|
||||
|
||||
For detailed instructions, refer to [Create a Grafana-managed alert rule](ref:create-alert-rule).
|
||||
|
||||
## Example: VM CPU usage alert
|
||||
|
||||
This example creates an alert that fires when virtual machine CPU usage exceeds 80%:
|
||||
|
||||
1. Create a new alert rule.
|
||||
1. Configure the query:
|
||||
- **Service**: Metrics
|
||||
- **Resource**: Select your virtual machine
|
||||
- **Metric namespace**: `Microsoft.Compute/virtualMachines`
|
||||
- **Metric**: `Percentage CPU`
|
||||
- **Aggregation**: `Average`
|
||||
1. Add expressions:
|
||||
- **Reduce**: Last (to get the most recent data point)
|
||||
- **Threshold**: Is above 80
|
||||
1. Set evaluation to run every 1 minute with a 5-minute pending period.
|
||||
1. Save the rule.
|
||||
|
||||
## Example: Error log count alert
|
||||
|
||||
This example alerts when error logs exceed a threshold using a KQL query:
|
||||
|
||||
1. Create a new alert rule.
|
||||
1. Configure the query:
|
||||
- **Service**: Logs
|
||||
- **Resource**: Select your Log Analytics workspace
|
||||
- **Query**:
|
||||
```kusto
|
||||
AppExceptions
|
||||
| where TimeGenerated > ago(5m)
|
||||
| summarize ErrorCount = count() by bin(TimeGenerated, 1m)
|
||||
```
|
||||
1. Add expressions:
|
||||
- **Reduce**: Max (to get the highest count in the period)
|
||||
- **Threshold**: Is above 10
|
||||
1. Set evaluation to run every 5 minutes.
|
||||
1. Save the rule.
|
||||
|
||||
## Example: Resource count alert
|
||||
|
||||
This example alerts when the number of running virtual machines drops below a threshold using Azure Resource Graph:
|
||||
|
||||
1. Create a new alert rule.
|
||||
1. Configure the query:
|
||||
- **Service**: Azure Resource Graph
|
||||
- **Subscriptions**: Select your subscriptions
|
||||
- **Query**:
|
||||
|
||||
```kusto
|
||||
resources
|
||||
| where type == "microsoft.compute/virtualmachines"
|
||||
| where properties.extended.instanceView.powerState.displayStatus == "VM running"
|
||||
| summarize RunningVMs = count()
|
||||
```
|
||||
|
||||
1. Add expressions:
|
||||
- **Reduce**: Last
|
||||
- **Threshold**: Is below 3
|
||||
1. Set evaluation to run every 5 minutes.
|
||||
1. Save the rule.
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these recommendations to create reliable and efficient alerts with Azure Monitor data.
|
||||
|
||||
### Use appropriate query intervals
|
||||
|
||||
- Set the alert evaluation interval to be greater than or equal to the minimum data resolution from Azure Monitor.
|
||||
- Azure Monitor Metrics typically have 1-minute granularity at minimum.
|
||||
- Avoid very short intervals (less than 1 minute) as they may cause evaluation timeouts or miss data points.
|
||||
|
||||
### Reduce multiple series
|
||||
|
||||
When your Azure Monitor query returns multiple time series (for example, CPU usage across multiple VMs), use the **Reduce** expression to aggregate them:
|
||||
|
||||
- **Last**: Use the most recent value
|
||||
- **Mean**: Average across all series
|
||||
- **Max/Min**: Use the highest or lowest value
|
||||
- **Sum**: Total across all series
|
||||
|
||||
### Optimize Log Analytics queries
|
||||
|
||||
For Logs queries used in alerting:
|
||||
|
||||
- Use `summarize` to aggregate data into numeric values.
|
||||
- Include appropriate time filters using `ago()` or `TimeGenerated`.
|
||||
- Avoid returning large result sets; aggregate data in the query.
|
||||
- Test queries in Explore before using them in alert rules.
|
||||
|
||||
### Handle no data conditions
|
||||
|
||||
Configure what happens when no data is returned:
|
||||
|
||||
1. In the alert rule, find **Configure no data and error handling**.
|
||||
1. Choose an appropriate action:
|
||||
- **No Data**: Keep the alert in its current state
|
||||
- **Alerting**: Treat no data as an alert condition
|
||||
- **OK**: Treat no data as a healthy state
|
||||
|
||||
### Test queries before alerting
|
||||
|
||||
Always verify your query returns expected data before creating an alert:
|
||||
|
||||
1. Go to **Explore**.
|
||||
1. Select your Azure Monitor data source.
|
||||
1. Run the query you plan to use for alerting.
|
||||
1. Confirm the data format and values are correct.
|
||||
1. Verify the query returns numeric data suitable for threshold evaluation.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If your Azure Monitor alerts aren't working as expected, use the following sections to diagnose and resolve common issues.
|
||||
|
||||
### Alerts not firing
|
||||
|
||||
- Verify the data source uses a supported authentication method. If using Current User authentication, ensure fallback service credentials are configured.
|
||||
- Check that the query returns numeric data in Explore.
|
||||
- Ensure the evaluation interval allows enough time for data to be available.
|
||||
- Review the alert rule's health and any error messages in the Alerting UI.
|
||||
|
||||
### Authentication errors in alert evaluation
|
||||
|
||||
If you see authentication errors when alerts evaluate:
|
||||
|
||||
- Confirm the data source is configured with App Registration, Managed Identity, Workload Identity, or Current User with fallback service credentials.
|
||||
- If using App Registration, verify the client secret hasn't expired.
|
||||
- If using Current User, verify that fallback service credentials are configured and valid.
|
||||
- Check that the service principal has appropriate permissions on Azure resources.
|
||||
|
||||
### Query timeout errors
|
||||
|
||||
- Simplify complex KQL queries.
|
||||
- Reduce the time range in Log Analytics queries.
|
||||
- Add more specific filters to narrow result sets.
|
||||
|
||||
For additional troubleshooting help, refer to [Troubleshoot Azure Monitor](ref:troubleshoot).
|
||||
|
||||
## Additional resources
|
||||
|
||||
- [Grafana Alerting documentation](ref:alerting)
|
||||
- [Create alert rules](ref:create-alert-rule)
|
||||
- [Azure Monitor query editor](ref:query-editor)
|
||||
- [Grafana-managed recording rules](ref:grafana-managed-recording-rules)
|
||||
@@ -1,218 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/annotations/
|
||||
description: Use annotations with the Azure Monitor data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- azure
|
||||
- monitor
|
||||
- annotations
|
||||
- events
|
||||
- logs
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Annotations
|
||||
title: Azure Monitor annotations
|
||||
weight: 450
|
||||
refs:
|
||||
annotate-visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
---
|
||||
|
||||
# Azure Monitor annotations
|
||||
|
||||
[Annotations](ref:annotate-visualizations) overlay rich event information on top of graphs. You can use Azure Monitor Log Analytics queries to create annotations that mark important events, deployments, alerts, or other significant occurrences on your dashboards.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have configured the Azure Monitor data source.
|
||||
- You need access to a Log Analytics workspace containing the data you want to use for annotations.
|
||||
- Annotations use Log Analytics (KQL) queries only. Metrics, Traces, and Azure Resource Graph queries are not supported for annotations.
|
||||
|
||||
## Create an annotation query
|
||||
|
||||
To add an Azure Monitor annotation to a dashboard:
|
||||
|
||||
1. Open the dashboard where you want to add annotations.
|
||||
1. Click **Dashboard settings** (gear icon) in the top navigation.
|
||||
1. Select **Annotations** in the left menu.
|
||||
1. Click **Add annotation query**.
|
||||
1. Enter a **Name** for the annotation (e.g., "Azure Activity", "Deployments").
|
||||
1. Select your **Azure Monitor** data source.
|
||||
1. Choose the **Logs** service.
|
||||
1. Select a **Resource** (Log Analytics workspace or Application Insights resource).
|
||||
1. Write a KQL query that returns the annotation data.
|
||||
1. Click **Apply** to save.
|
||||
|
||||
## Query requirements
|
||||
|
||||
Your KQL query should return columns that Grafana can use to create annotations:
|
||||
|
||||
| Column | Required | Description |
|
||||
| ------------------ | ----------- | ------------------------------------------------------------------------------------------------ |
|
||||
| `TimeGenerated` | Yes | The timestamp for the annotation. Grafana uses this to position the annotation on the time axis. |
|
||||
| `Text` | Recommended | The annotation text displayed when you hover over or click the annotation. |
|
||||
| Additional columns | Optional | Any other columns returned become annotation tags. |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Always include a time filter in your query to limit results to the dashboard's time range. Use the `$__timeFilter()` macro.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Annotation query examples
|
||||
|
||||
The following examples demonstrate common annotation use cases.
|
||||
|
||||
### Azure Activity Log events
|
||||
|
||||
Display Azure Activity Log events such as resource modifications, deployments, and administrative actions:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where Level == "Error" or Level == "Warning" or CategoryValue == "Administrative"
|
||||
| project TimeGenerated, Text=OperationNameValue, Level, ResourceGroup, Caller
|
||||
| order by TimeGenerated desc
|
||||
| take 100
|
||||
```
|
||||
|
||||
### Deployment events
|
||||
|
||||
Show deployment-related activity:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where OperationNameValue contains "deployments"
|
||||
| project TimeGenerated, Text=strcat("Deployment: ", OperationNameValue), Status=ActivityStatusValue, ResourceGroup
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### Application Insights exceptions
|
||||
|
||||
Mark application exceptions as annotations:
|
||||
|
||||
```kusto
|
||||
AppExceptions
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| project TimeGenerated, Text=strcat(ProblemId, ": ", OuterMessage), SeverityLevel, AppRoleName
|
||||
| order by TimeGenerated desc
|
||||
| take 50
|
||||
```
|
||||
|
||||
### Custom events from Application Insights
|
||||
|
||||
Display custom events logged by your application:
|
||||
|
||||
```kusto
|
||||
AppEvents
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where Name == "DeploymentStarted" or Name == "DeploymentCompleted"
|
||||
| project TimeGenerated, Text=Name, AppRoleName
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### Security alerts
|
||||
|
||||
Show security-related alerts:
|
||||
|
||||
```kusto
|
||||
SecurityAlert
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| project TimeGenerated, Text=AlertName, Severity=AlertSeverity, Description
|
||||
| order by TimeGenerated desc
|
||||
| take 50
|
||||
```
|
||||
|
||||
### Resource health events
|
||||
|
||||
Display resource health status changes:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where CategoryValue == "ResourceHealth"
|
||||
| project TimeGenerated, Text=OperationNameValue, Status=ActivityStatusValue, ResourceId
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### VM start and stop events
|
||||
|
||||
Mark virtual machine state changes:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where OperationNameValue has_any ("start", "deallocate", "restart")
|
||||
| where ResourceProviderValue == "MICROSOFT.COMPUTE"
|
||||
| project TimeGenerated, Text=OperationNameValue, VM=Resource, Status=ActivityStatusValue
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
### Autoscale events
|
||||
|
||||
Show autoscale operations:
|
||||
|
||||
```kusto
|
||||
AzureActivity
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where OperationNameValue contains "autoscale"
|
||||
| project TimeGenerated, Text=strcat("Autoscale: ", OperationNameValue), Status=ActivityStatusValue, ResourceGroup
|
||||
| order by TimeGenerated desc
|
||||
```
|
||||
|
||||
## Customize annotation appearance
|
||||
|
||||
After creating an annotation query, you can customize its appearance:
|
||||
|
||||
| Setting | Description |
|
||||
| ------------- | -------------------------------------------------------------------------------------------------------- |
|
||||
| **Color** | Choose a color for the annotation markers. Use different colors to distinguish between annotation types. |
|
||||
| **Show in** | Select which panels display the annotations. |
|
||||
| **Filter by** | Add filters to limit when annotations appear. |
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these recommendations when creating annotations:
|
||||
|
||||
1. **Limit results**: Always use `take` or `limit` to restrict the number of annotations. Too many annotations can clutter your dashboard and impact performance.
|
||||
|
||||
2. **Use time filters**: Include `$__timeFilter()` to ensure queries only return data within the dashboard's time range.
|
||||
|
||||
3. **Create meaningful text**: Use `strcat()` or `project` to create descriptive annotation text that provides context at a glance.
|
||||
|
||||
4. **Add relevant tags**: Include columns like `ResourceGroup`, `Severity`, or `Status` that become clickable tags for filtering.
|
||||
|
||||
5. **Use descriptive names**: Name your annotations clearly (e.g., "Production Deployments", "Critical Alerts") so dashboard users understand what they represent.
|
||||
|
||||
## Troubleshoot annotations
|
||||
|
||||
If annotations aren't appearing as expected, try the following solutions.
|
||||
|
||||
### Annotations don't appear
|
||||
|
||||
- Verify the query returns data in the selected time range.
|
||||
- Check that the query includes a `TimeGenerated` column.
|
||||
- Test the query in the Azure Portal Log Analytics query editor.
|
||||
- Ensure the annotation is enabled (toggle is on).
|
||||
|
||||
### Too many annotations
|
||||
|
||||
- Add more specific filters to your query.
|
||||
- Use `take` to limit results.
|
||||
- Narrow the time range.
|
||||
|
||||
### Annotations appear at wrong times
|
||||
|
||||
- Verify the `TimeGenerated` column contains the correct timestamp.
|
||||
- Check your dashboard's timezone settings.
|
||||
@@ -1,605 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/configure/
|
||||
description: Guide for configuring the Azure Monitor data source in Grafana.
|
||||
keywords:
|
||||
- grafana
|
||||
- microsoft
|
||||
- azure
|
||||
- monitor
|
||||
- application
|
||||
- insights
|
||||
- log
|
||||
- analytics
|
||||
- guide
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Configure
|
||||
title: Configure the Azure Monitor data source
|
||||
weight: 200
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
configure-grafana-feature-toggles:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#feature_toggles
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#feature_toggles
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
configure-grafana-azure-auth:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/
|
||||
build-dashboards:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
configure-grafana-azure:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
|
||||
data-source-management:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
configure-grafana-azure-auth-scopes:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
|
||||
data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
configure-pdc:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
---
|
||||
|
||||
# Configure the Azure Monitor data source
|
||||
|
||||
This document explains how to configure the Azure Monitor data source and the available configuration options.
|
||||
For general information about data sources, refer to [Grafana data sources](ref:data-sources) and [Data source management](ref:data-source-management).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before configuring the Azure Monitor data source, ensure you have the following:
|
||||
|
||||
- **Grafana permissions:** You must have the `Organization administrator` role to configure data sources.
|
||||
Organization administrators can also [configure the data source via YAML](#provision-the-data-source) with the Grafana provisioning system or [using Terraform](#configure-with-terraform).
|
||||
|
||||
- **Azure prerequisites:** Depending on your chosen authentication method, you may need:
|
||||
- A Microsoft Entra ID (formerly Azure AD) app registration with a service principal (for App Registration authentication)
|
||||
- A Managed Identity enabled on your Azure VM or App Service (for Managed Identity authentication)
|
||||
- Workload identity configured in your Kubernetes cluster (for Workload Identity authentication)
|
||||
- Microsoft Entra ID authentication configured for Grafana login (for Current User authentication)
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
**Grafana Cloud users:** Managed Identity and Workload Identity authentication methods are not available in Grafana Cloud because they require Grafana to run on your Azure infrastructure. Use **App Registration** authentication instead.
|
||||
{{< /admonition >}}
|
||||
|
||||
- **Azure RBAC permissions:** The identity used to authenticate must have the `Reader` role on the Azure subscription containing the resources you want to monitor.
|
||||
For Log Analytics queries, the identity also needs appropriate permissions on the Log Analytics workspaces to be queried.
|
||||
Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The Azure Monitor data source plugin is built into Grafana. No additional installation is required.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Add the data source
|
||||
|
||||
To add the Azure Monitor data source:
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Click **Add new connection**.
|
||||
1. Type `Azure Monitor` in the search bar.
|
||||
1. Select **Azure Monitor**.
|
||||
1. Click **Add new data source** in the upper right.
|
||||
|
||||
You're taken to the **Settings** tab where you can configure the data source.
|
||||
|
||||
## Choose an authentication method
|
||||
|
||||
The Azure Monitor data source supports four authentication methods. Choose based on where Grafana is hosted and your security requirements:
|
||||
|
||||
| Authentication method | Best for | Requirements |
|
||||
| --------------------- | ------------------------------------------ | -------------------------------------------------------------- |
|
||||
| **App Registration** | Any Grafana deployment | Microsoft Entra ID app registration with client secret |
|
||||
| **Managed Identity** | Grafana hosted in Azure (VMs, App Service) | Managed identity enabled on the Azure resource |
|
||||
| **Workload Identity** | Grafana in Kubernetes (AKS) | Workload identity federation configured |
|
||||
| **Current User** | User-level access control | Microsoft Entra ID authentication configured for Grafana login |
|
||||
|
||||
## Configure authentication
|
||||
|
||||
Select one of the following authentication methods and complete the configuration.
|
||||
|
||||
### App Registration
|
||||
|
||||
Use a Microsoft Entra ID app registration (service principal) to authenticate. This method works with any Grafana deployment.
|
||||
|
||||
#### App Registration prerequisites
|
||||
|
||||
1. Create an app registration in Microsoft Entra ID.
|
||||
Refer to the [Azure documentation for creating a service principal](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in).
|
||||
|
||||
1. Create a client secret for the app registration.
|
||||
Refer to the [Azure documentation for creating a client secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).
|
||||
|
||||
1. Assign the `Reader` role to the app registration on the subscription or resources you want to monitor.
|
||||
Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
|
||||
|
||||
#### App Registration UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| **Authentication** | Select **App Registration**. |
|
||||
| **Azure Cloud** | The Azure environment to connect to. Select **Azure** for the public cloud, or choose Azure Government or Azure China for national clouds. |
|
||||
| **Directory (tenant) ID** | The GUID that identifies your Microsoft Entra ID tenant. |
|
||||
| **Application (client) ID** | The GUID for the app registration you created. |
|
||||
| **Client secret** | The secret key for the app registration. Keep this secure and rotate periodically. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
|
||||
#### Provision App Registration with YAML
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: clientsecret
|
||||
cloudName: azuremonitor # See supported cloud names below
|
||||
tenantId: <tenant-id>
|
||||
clientId: <client-id>
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
secureJsonData:
|
||||
clientSecret: <client-secret>
|
||||
version: 1
|
||||
```
|
||||
|
||||
### Managed Identity
|
||||
|
||||
Use Azure Managed Identity for secure, credential-free authentication when Grafana is hosted in Azure.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Managed Identity is available in [Azure Managed Grafana](https://azure.microsoft.com/en-us/products/managed-grafana) or self-hosted Grafana deployed in Azure. It is not available in Grafana Cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Managed Identity prerequisites
|
||||
|
||||
- Grafana must be hosted in Azure (App Service, Azure VMs, or Azure Managed Grafana).
|
||||
- Managed identity must be enabled on the Azure resource hosting Grafana.
|
||||
- The managed identity must have the `Reader` role on the subscription or resources you want to monitor.
|
||||
|
||||
For details on Azure managed identities, refer to the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
|
||||
|
||||
#### Managed Identity Grafana server configuration
|
||||
|
||||
Enable managed identity in the Grafana server configuration:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
```
|
||||
|
||||
To use a user-assigned managed identity instead of the system-assigned identity, also set:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
managed_identity_enabled = true
|
||||
managed_identity_client_id = <USER_ASSIGNED_IDENTITY_CLIENT_ID>
|
||||
```
|
||||
|
||||
Refer to [Grafana Azure configuration](ref:configure-grafana-azure) for more details.
|
||||
|
||||
#### Managed Identity UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------------ | --------------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Select **Managed Identity**. The directory ID, application ID, and client secret fields are hidden. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-managed-identity-2.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Managed Identity" >}}
|
||||
|
||||
#### Provision Managed Identity with YAML
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: msi
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
### Workload Identity
|
||||
|
||||
Use Azure Workload Identity for secure authentication in Kubernetes environments like AKS.
|
||||
|
||||
#### Workload Identity prerequisites
|
||||
|
||||
- Grafana must be running in a Kubernetes environment with workload identity federation configured.
|
||||
- The workload identity must have the `Reader` role on the subscription or resources you want to monitor.
|
||||
|
||||
For details, refer to the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/).
|
||||
|
||||
#### Workload Identity Grafana server configuration
|
||||
|
||||
Enable workload identity in the Grafana server configuration:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
```
|
||||
|
||||
Optional configuration variables:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
workload_identity_enabled = true
|
||||
workload_identity_tenant_id = <IDENTITY_TENANT_ID> # Microsoft Entra ID tenant containing the managed identity
|
||||
workload_identity_client_id = <IDENTITY_CLIENT_ID> # Client ID if different from default
|
||||
workload_identity_token_file = <TOKEN_FILE_PATH> # Path to the token file
|
||||
```
|
||||
|
||||
Refer to [Grafana Azure configuration](ref:configure-grafana-azure) and the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/) for more details.
|
||||
|
||||
#### Workload Identity UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------------ | ---------------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Select **Workload Identity**. The directory ID, application ID, and client secret fields are hidden. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-workload-identity.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Workload Identity" >}}
|
||||
|
||||
#### Provision Workload Identity with YAML
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: workloadidentity
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
### Current User
|
||||
|
||||
Forward the logged-in Grafana user's Azure credentials to the data source for user-level access control.
|
||||
|
||||
{{< admonition type="warning" >}}
|
||||
Current User authentication is an [experimental feature](/docs/release-life-cycle/). Engineering and on-call support is not available. Documentation is limited. No SLA is provided. Contact Grafana Support to enable this feature in Grafana Cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Current User prerequisites
|
||||
|
||||
Your Grafana instance must be configured with Microsoft Entra ID authentication. Refer to the [Microsoft Entra ID authentication documentation](ref:configure-grafana-azure-auth).
|
||||
|
||||
#### Configure your Azure App Registration
|
||||
|
||||
The App Registration used for Grafana login requires additional configuration:
|
||||
|
||||
**Enable token issuance:**
|
||||
|
||||
1. In the Azure Portal, open your App Registration.
|
||||
1. Select **Authentication** in the side menu.
|
||||
1. Under **Implicit grant and hybrid flows**, check both **Access tokens** and **ID tokens**.
|
||||
1. Save your changes.
|
||||
|
||||
**Add API permissions:**
|
||||
|
||||
1. In the Azure Portal, open your App Registration.
|
||||
1. Select **API Permissions** in the side menu.
|
||||
1. Ensure these permissions are present under **Microsoft Graph**: `openid`, `profile`, `email`, and `offline_access`.
|
||||
1. Add the following permissions:
|
||||
- **Azure Service Management** > **Delegated permissions** > `user_impersonation`
|
||||
- **APIs my organization uses** > Search for **Log Analytics API** > **Delegated permissions** > `Data.Read`
|
||||
|
||||
Refer to the [Azure documentation](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-configure-app-access-web-apis) for more information.
|
||||
|
||||
**Update Grafana scopes:**
|
||||
|
||||
Update the `scopes` section in your Grafana Azure authentication configuration to include the `.default` scope:
|
||||
|
||||
```
|
||||
.default openid email profile
|
||||
```
|
||||
|
||||
#### Current User Grafana server configuration
|
||||
|
||||
Enable current user authentication in the Grafana server configuration:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
user_identity_enabled = true
|
||||
```
|
||||
|
||||
By default, this also enables fallback service credentials. To disable fallback credentials at the instance level:
|
||||
|
||||
```ini
|
||||
[azure]
|
||||
user_identity_enabled = true
|
||||
user_identity_fallback_credentials_enabled = false
|
||||
```
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To use fallback service credentials, the [feature toggle](ref:configure-grafana-feature-toggles) `idForwarding` must be set to `true`.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Limitations and fallback credentials
|
||||
|
||||
Current User authentication doesn't support backend functionality like alerting, reporting, and recorded queries because user credentials aren't available for background operations.
|
||||
|
||||
To support these features, configure **fallback service credentials**. When enabled, Grafana uses the fallback credentials for backend operations. Note that operations using fallback credentials are limited to the permissions of those credentials, not the user's permissions.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Query and resource caching is disabled by default for data sources using Current User authentication.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Current User UI configuration
|
||||
|
||||
| Setting | Description |
|
||||
| -------------------------------- | ------------------------------------------------------------------------------------------- |
|
||||
| **Authentication** | Select **Current User**. |
|
||||
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
|
||||
| **Fallback Service Credentials** | Enable and configure credentials for backend features like alerting. |
|
||||
|
||||
{{< figure src="/media/docs/grafana/data-sources/screenshot-current-user.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Current User authentication" >}}
|
||||
|
||||
#### Provision Current User with YAML
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The `oauthPassThru` property is required for Current User authentication. The `disableGrafanaCache` property prevents returning cached responses for resources users don't have access to.
|
||||
{{< /admonition >}}
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Azure Monitor
|
||||
type: grafana-azure-monitor-datasource
|
||||
access: proxy
|
||||
jsonData:
|
||||
azureAuthType: currentuser
|
||||
oauthPassThru: true
|
||||
disableGrafanaCache: true
|
||||
subscriptionId: <subscription-id> # Optional, default subscription
|
||||
version: 1
|
||||
```
|
||||
|
||||
## Additional configuration options
|
||||
|
||||
These settings apply to all authentication methods.
|
||||
|
||||
### General settings
|
||||
|
||||
| Setting | Description |
|
||||
| ----------- | ------------------------------------------------------------------------------- |
|
||||
| **Name** | The data source name used in panels and queries. Example: `azure-monitor-prod`. |
|
||||
| **Default** | Toggle to make this the default data source for new panels. |
|
||||
|
||||
### Enable Basic Logs
|
||||
|
||||
Toggle **Enable Basic Logs** to allow queries against [Basic Logs tables](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/basic-logs-query?tabs=portal-1) in supported Log Analytics Workspaces.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Querying Basic Logs tables incurs additional costs on a per-query basis.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Private data source connect (Grafana Cloud only)
|
||||
|
||||
If you're using Grafana Cloud and need to connect to Azure resources in a private network, use Private Data Source Connect (PDC).
|
||||
|
||||
1. Click the **Private data source connect** dropdown to select your PDC configuration.
|
||||
1. Click **Manage private data source connect** to view your PDC connection details.
|
||||
|
||||
For more information, refer to [Private data source connect](ref:private-data-source-connect) and [Configure PDC](ref:configure-pdc).
|
||||
|
||||
## Supported cloud names
|
||||
|
||||
When provisioning the data source, use the following `cloudName` values:
|
||||
|
||||
| Azure Cloud | `cloudName` value |
|
||||
| -------------------------------- | ------------------------ |
|
||||
| Microsoft Azure public cloud | `azuremonitor` (default) |
|
||||
| Microsoft Chinese national cloud | `chinaazuremonitor` |
|
||||
| US Government cloud | `govazuremonitor` |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
For Current User authentication, the cloud names differ: use `AzureCloud` for public cloud, `AzureChinaCloud` for the Chinese national cloud, and `AzureUSGovernment` for the US Government cloud.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Verify the connection
|
||||
|
||||
After configuring the data source, click **Save & test**. A successful connection displays a message confirming that the credentials are valid and have access to the configured default subscription.
|
||||
|
||||
If the test fails, verify:
|
||||
|
||||
- Your credentials are correct (tenant ID, client ID, client secret)
|
||||
- The identity has the required Azure RBAC permissions
|
||||
- For Managed Identity or Workload Identity, that the Grafana server configuration is correct
|
||||
- Network connectivity to Azure endpoints
|
||||
|
||||
## Provision the data source
|
||||
|
||||
You can define and configure the Azure Monitor data source in YAML files as part of the Grafana provisioning system.
|
||||
For more information about provisioning, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
|
||||
### Provision quick reference
|
||||
|
||||
| Authentication method | `azureAuthType` value | Required fields |
|
||||
| --------------------- | --------------------- | -------------------------------------------------- |
|
||||
| App Registration | `clientsecret` | `tenantId`, `clientId`, `clientSecret` |
|
||||
| Managed Identity | `msi` | None (uses VM identity) |
|
||||
| Workload Identity | `workloadidentity` | None (uses pod identity) |
|
||||
| Current User | `currentuser` | `oauthPassThru: true`, `disableGrafanaCache: true` |
|
||||
|
||||
All methods support the optional `subscriptionId` field to set a default subscription.
|
||||
|
||||
For complete YAML examples, see the [authentication method sections](#configure-authentication) above.
|
||||
|
||||
## Configure with Terraform
|
||||
|
||||
You can configure the Azure Monitor data source using the [Grafana Terraform provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs). This approach enables infrastructure-as-code workflows and version control for your Grafana configuration.
|
||||
|
||||
### Terraform prerequisites
|
||||
|
||||
- [Terraform](https://www.terraform.io/downloads) installed
|
||||
- Grafana Terraform provider configured with appropriate credentials
|
||||
- For Grafana Cloud: A [Cloud Access Policy token](https://grafana.com/docs/grafana-cloud/account-management/authentication-and-permissions/access-policies/) with data source permissions
|
||||
|
||||
### Provider configuration
|
||||
|
||||
Configure the Grafana provider to connect to your Grafana instance:
|
||||
|
||||
```hcl
|
||||
terraform {
|
||||
required_providers {
|
||||
grafana = {
|
||||
source = "grafana/grafana"
|
||||
version = ">= 2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# For Grafana Cloud
|
||||
provider "grafana" {
|
||||
url = "<YOUR_GRAFANA_CLOUD_STACK_URL>"
|
||||
auth = "<YOUR_SERVICE_ACCOUNT_TOKEN>"
|
||||
}
|
||||
|
||||
# For self-hosted Grafana
|
||||
# provider "grafana" {
|
||||
# url = "http://localhost:3000"
|
||||
# auth = "<API_KEY_OR_SERVICE_ACCOUNT_TOKEN>"
|
||||
# }
|
||||
```
|
||||
|
||||
### Terraform examples
|
||||
|
||||
The following examples show how to configure the Azure Monitor data source for each authentication method.
|
||||
|
||||
**App Registration (client secret):**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "clientsecret"
|
||||
cloudName = "azuremonitor"
|
||||
tenantId = "<TENANT_ID>"
|
||||
clientId = "<CLIENT_ID>"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
|
||||
secure_json_data_encoded = jsonencode({
|
||||
clientSecret = "<CLIENT_SECRET>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Managed Identity:**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "msi"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Workload Identity:**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "workloadidentity"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**Current User:**
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "currentuser"
|
||||
oauthPassThru = true
|
||||
disableGrafanaCache = true
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
**With Basic Logs enabled:**
|
||||
|
||||
Add `enableBasicLogs = true` to any of the above configurations:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "azure_monitor" {
|
||||
type = "grafana-azure-monitor-datasource"
|
||||
name = "Azure Monitor"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
azureAuthType = "clientsecret"
|
||||
cloudName = "azuremonitor"
|
||||
tenantId = "<TENANT_ID>"
|
||||
clientId = "<CLIENT_ID>"
|
||||
subscriptionId = "<SUBSCRIPTION_ID>"
|
||||
enableBasicLogs = true
|
||||
})
|
||||
|
||||
secure_json_data_encoded = jsonencode({
|
||||
clientSecret = "<CLIENT_SECRET>"
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
For more information about the Grafana Terraform provider, refer to the [provider documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs) and the [grafana_data_source resource](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source).
|
||||
@@ -21,7 +21,6 @@ labels:
|
||||
menuTitle: Query editor
|
||||
title: Azure Monitor query editor
|
||||
weight: 300
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
query-transform-data-query-options:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -33,85 +32,30 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
troubleshoot-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
|
||||
configure-grafana-feature-toggles:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/
|
||||
template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
alerting-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
|
||||
annotations-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
|
||||
---
|
||||
|
||||
# Azure Monitor query editor
|
||||
|
||||
Grafana provides a query editor for the Azure Monitor data source, which is located on the [Explore page](ref:explore). You can also access the Azure Monitor query editor from a dashboard panel. Click the menu in the upper right of the panel and select **Edit**.
|
||||
This topic explains querying specific to the Azure Monitor data source.
|
||||
For general documentation on querying data sources in Grafana, see [Query and transform data](ref:query-transform-data).
|
||||
|
||||
This document explains querying specific to the Azure Monitor data source.
|
||||
For general documentation on querying data sources in Grafana, refer to [Query and transform data](ref:query-transform-data).
|
||||
## Choose a query editing mode
|
||||
|
||||
The Azure Monitor data source can query data from Azure Monitor Metrics and Logs, the Azure Resource Graph, and Application Insights Traces. Each source has its own specialized query editor.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have [configured the Azure Monitor data source](ref:configure-azure-monitor).
|
||||
- Verify your credentials have appropriate permissions for the resources you want to query.
|
||||
|
||||
## Key concepts
|
||||
|
||||
If you're new to Azure Monitor, here are some key terms used throughout this documentation:
|
||||
|
||||
| Term | Description |
|
||||
| ------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **KQL (Kusto Query Language)** | The query language used for Azure Monitor Logs and Azure Resource Graph. KQL uses a pipe-based syntax similar to Unix commands and is optimized for read-only data exploration. If you know SQL, the [SQL to Kusto cheat sheet](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/sqlcheatsheet) can help you get started. |
|
||||
| **Log Analytics workspace** | An Azure resource that collects and stores log data from your Azure resources, applications, and services. You query this data using KQL. |
|
||||
| **Application Insights** | Azure's application performance monitoring (APM) service. It collects telemetry data like requests, exceptions, and traces from your applications. |
|
||||
| **Metrics vs. Logs** | **Metrics** are lightweight numeric values collected at regular intervals (e.g., CPU percentage). **Logs** are detailed records of events with varying schemas (e.g., request logs, error messages). Metrics use a visual query builder; Logs require KQL. |
|
||||
|
||||
## Choose a query editor mode
|
||||
|
||||
The Azure Monitor data source's query editor has four modes depending on which Azure service you want to query:
|
||||
The Azure Monitor data source's query editor has three modes depending on which Azure service you want to query:
|
||||
|
||||
- **Metrics** for [Azure Monitor Metrics](#query-azure-monitor-metrics)
|
||||
- **Logs** for [Azure Monitor Logs](#query-azure-monitor-logs)
|
||||
- [**Azure Resource Graph**](#query-azure-resource-graph)
|
||||
- **Traces** for [Application Insights Traces](#query-application-insights-traces)
|
||||
- **Azure Resource Graph** for [Azure Resource Graph](#query-azure-resource-graph)
|
||||
|
||||
## Query Azure Monitor Metrics
|
||||
|
||||
Azure Monitor Metrics collects numeric data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and you can query them to investigate your resources' health and usage and maximize availability and performance.
|
||||
Azure Monitor Metrics collects numeric data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and you can query them to investigate your resources' health and usage and maximise availability and performance.
|
||||
|
||||
Monitor Metrics use a lightweight format that stores only numeric data in a specific structure and supports near real-time scenarios, making it useful for fast detection of issues.
|
||||
In contrast, Azure Monitor Logs can store a variety of data types, each with their own structure.
|
||||
|
||||
{{< figure src="/static/img/docs/azure-monitor/query-editor-metrics.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor Metrics sample query visualizing CPU percentage over time" >}}
|
||||
{{< figure src="/static/img/docs/azure-monitor/query-editor-metrics.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Logs Metrics sample query visualizing CPU percentage over time" >}}
|
||||
|
||||
### Create a Metrics query
|
||||
|
||||
@@ -141,7 +85,7 @@ Optionally, you can apply further aggregations or filter by dimensions.
|
||||
|
||||
The available options change depending on what is relevant to the selected metric.
|
||||
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
|
||||
### Format legend aliases
|
||||
|
||||
@@ -165,7 +109,7 @@ For example:
|
||||
| `{{ dimensionname }}` | _(Legacy for backward compatibility)_ Replaced with the name of the first dimension. |
|
||||
| `{{ dimensionvalue }}` | _(Legacy for backward compatibility)_ Replaced with the value of the first dimension. |
|
||||
|
||||
### Filter with dimensions
|
||||
### Filter using dimensions
|
||||
|
||||
Some metrics also have dimensions, which associate additional metadata.
|
||||
Dimensions are represented as key-value pairs assigned to each value of a metric.
|
||||
@@ -177,7 +121,7 @@ For more information on multi-dimensional metrics, refer to the [Azure Monitor d
|
||||
|
||||
## Query Azure Monitor Logs
|
||||
|
||||
Azure Monitor Logs collects and organizes log and performance data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and makes many sources of data available to query together with the [Kusto Query Language (KQL)](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/).
|
||||
Azure Monitor Logs collects and organises log and performance data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and makes many sources of data available to query together with the [Kusto Query Language (KQL)](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/).
|
||||
|
||||
While Azure Monitor Metrics stores only simplified numerical data, Logs can store different data types, each with their own structure.
|
||||
You can also perform complex analysis of Logs data by using KQL.
|
||||
@@ -186,32 +130,6 @@ The Azure Monitor data source also supports querying of [Basic Logs](https://lea
|
||||
|
||||
{{< figure src="/static/img/docs/azure-monitor/query-editor-logs.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor Logs sample query comparing successful requests to failed requests" >}}
|
||||
|
||||
### Logs query builder (public preview)
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The Logs query builder is a [public preview feature](/docs/release-life-cycle/). It may not be enabled in all Grafana environments.
|
||||
{{< /admonition >}}
|
||||
|
||||
The Logs query builder provides a visual interface for building Azure Monitor Logs queries without writing KQL. This is helpful if you're new to KQL or want to quickly build simple queries.
|
||||
|
||||
**To enable the Logs query builder:**
|
||||
|
||||
1. Enable the `azureMonitorLogsBuilderEditor` [feature toggle](ref:configure-grafana-feature-toggles) in your Grafana configuration.
|
||||
1. Restart Grafana for the change to take effect.
|
||||
|
||||
**To switch between Builder and Code modes:**
|
||||
|
||||
When the feature is enabled, a **Builder / Code** toggle appears in the Logs query editor:
|
||||
|
||||
- **Builder**: Use the visual interface to select tables, columns, filters, and aggregations. The builder generates the KQL query for you.
|
||||
- **Code**: Write KQL queries directly. Use this mode for complex queries that require full KQL capabilities.
|
||||
|
||||
New queries default to Builder mode. Existing queries that were created with raw KQL remain in Code mode.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
You can switch from Builder to Code mode at any time to view or edit the generated KQL. However, switching from Code to Builder mode may not preserve complex queries that can't be represented in the builder interface.
|
||||
{{< /admonition >}}
|
||||
|
||||
### Create a Logs query
|
||||
|
||||
**To create a Logs query:**
|
||||
@@ -222,13 +140,13 @@ You can switch from Builder to Code mode at any time to view or edit the generat
|
||||
|
||||
Alternatively, you can dynamically query all resources under a single resource group or subscription.
|
||||
{{< admonition type="note" >}}
|
||||
If a time span is specified in the query, the overlap between the query time span and the dashboard time range will be used. See the [API documentation for
|
||||
If a timespan is specified in the query, the overlap of the timespan between the query and the dashboard will be used as the query timespan. See the [API documentation for
|
||||
details.](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters)
|
||||
{{< /admonition >}}
|
||||
|
||||
1. Enter your KQL query.
|
||||
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
|
||||
**To create a Basic Logs query:**
|
||||
|
||||
@@ -243,7 +161,7 @@ You can also augment queries by using [template variables](ref:template-variable
|
||||
{{< /admonition >}}
|
||||
1. Enter your KQL query.
|
||||
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
You can also augment queries by using [template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/).
|
||||
|
||||
### Logs query examples
|
||||
|
||||
@@ -256,28 +174,24 @@ The Azure documentation includes resources to help you learn KQL:
|
||||
- [Tutorial: Use Kusto queries in Azure Monitor](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/tutorial?pivots=azuremonitor)
|
||||
- [SQL to Kusto cheat sheet](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/sqlcheatsheet)
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
**Time-range:** The time-range used for the query can be modified via the time-range switch:
|
||||
> **Time-range:** The time-range that will be used for the query can be modified via the time-range switch. Selecting `Query` will only make use of time-ranges specified within the query.
|
||||
> Specifying `Dashboard` will only make use of the Grafana time-range.
|
||||
> If there are no time-ranges specified within the query, the default Log Analytics time-range will apply.
|
||||
> For more details on this change, refer to the [Azure Monitor Logs API documentation](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters).
|
||||
> If the `Intersection` option was previously chosen it will be migrated by default to `Dashboard`.
|
||||
|
||||
- Selecting **Query** uses only time-ranges specified within the query.
|
||||
- Selecting **Dashboard** uses only the Grafana dashboard time-range.
|
||||
- If no time-range is specified in the query, the default Log Analytics time-range applies.
|
||||
|
||||
For more details, refer to the [Azure Monitor Logs API documentation](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters). If you previously used the `Intersection` option, it has been migrated to `Dashboard`.
|
||||
{{< /admonition >}}
|
||||
|
||||
This example query returns a virtual machine's CPU performance, averaged over 5-minute time grains:
|
||||
This example query returns a virtual machine's CPU performance, averaged over 5ms time grains:
|
||||
|
||||
```kusto
|
||||
Perf
|
||||
// $__timeFilter is a special Grafana macro that filters the results to the time span of the dashboard
|
||||
# $__timeFilter is a special Grafana macro that filters the results to the time span of the dashboard
|
||||
| where $__timeFilter(TimeGenerated)
|
||||
| where CounterName == "% Processor Time"
|
||||
| summarize avg(CounterValue) by bin(TimeGenerated, 5m), Computer
|
||||
| order by TimeGenerated asc
|
||||
```
|
||||
|
||||
Use time series queries for values that change over time, usually for graph visualizations such as the Time series panel.
|
||||
Use time series queries for values that change over time, usually for graph visualisations such as the Time series panel.
|
||||
Each query should return at least a datetime column and numeric value column.
|
||||
The result must also be sorted in ascending order by the datetime column.
|
||||
|
||||
@@ -443,33 +357,21 @@ Application Insights stores trace data in an underlying Log Analytics workspace
|
||||
This query type only supports Application Insights resources.
|
||||
{{< /admonition >}}
|
||||
|
||||
1. (Optional) Specify an **Operation ID** value to filter traces.
|
||||
1. (Optional) Specify **event types** to filter by.
|
||||
1. (Optional) Specify **event properties** to filter by.
|
||||
1. (Optional) Change the **Result format** to switch between tabular format and trace format.
|
||||
Running a query of this kind will return all trace data within the timespan specified by the panel/dashboard.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Selecting the trace format filters events to only the `trace` type. Use this format with the Trace visualization.
|
||||
{{< /admonition >}}
|
||||
Optionally, you can apply further filtering or select a specific Operation ID to query. The result format can also be switched between a tabular format or the trace format which will return the data in a format that can be used with the Trace visualization.
|
||||
|
||||
Running a query returns all trace data within the time span specified by the panel or dashboard time range.
|
||||
{{< admonition type="note" >}}
|
||||
Selecting the trace format will filter events with the `trace` type.
|
||||
{{< /admonition >}}
|
||||
|
||||
You can also augment queries by using [template variables](ref:template-variables).
|
||||
1. Specify an Operation ID value.
|
||||
1. Specify event types to filter by.
|
||||
1. Specify event properties to filter by.
|
||||
|
||||
## Use queries for alerting and recording rules
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
|
||||
All Azure Monitor query types (Metrics, Logs, Azure Resource Graph, and Traces) can be used with Grafana Alerting and recording rules.
|
||||
|
||||
For detailed information about creating alert rules, supported query types, authentication requirements, and examples, refer to [Azure Monitor alerting](ref:alerting-azure-monitor).
|
||||
|
||||
## Work with large Azure resource datasets
|
||||
## Working with large Azure resource data sets
|
||||
|
||||
If a request exceeds the [maximum allowed value of records](https://docs.microsoft.com/en-us/azure/governance/resource-graph/concepts/work-with-data#paging-results), the result is paginated and only the first page of results are returned.
|
||||
You can use filters to reduce the amount of records returned under that value.
|
||||
|
||||
## Next steps
|
||||
|
||||
- [Use template variables](../template-variables/) to create dynamic, reusable dashboards
|
||||
- [Add annotations](ref:annotations-azure-monitor) to overlay events on your graphs
|
||||
- [Set up alerting](ref:alerting-azure-monitor) to create alert rules based on Azure Monitor data
|
||||
- [Troubleshoot](ref:troubleshoot-azure-monitor) common query and configuration issues
|
||||
|
||||
@@ -23,7 +23,6 @@ labels:
|
||||
menuTitle: Template variables
|
||||
title: Azure Monitor template variables
|
||||
weight: 400
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
variables:
|
||||
- pattern: /docs/grafana/
|
||||
@@ -35,11 +34,6 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
---
|
||||
|
||||
# Azure Monitor template variables
|
||||
@@ -48,173 +42,58 @@ Instead of hard-coding details such as resource group or resource name values in
|
||||
This helps you create more interactive, dynamic, and reusable dashboards.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables).
|
||||
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables) documentation.
|
||||
|
||||
## Before you begin
|
||||
## Use query variables
|
||||
|
||||
- Ensure you have [configured the Azure Monitor data source](ref:configure-azure-monitor).
|
||||
- If you want template variables to auto-populate subscriptions, set a **Default Subscription** in the data source configuration.
|
||||
You can specify these Azure Monitor data source queries in the Variable edit view's **Query Type** field.
|
||||
|
||||
## Create a template variable
|
||||
|
||||
To create a template variable for Azure Monitor:
|
||||
|
||||
1. Open the dashboard where you want to add the variable.
|
||||
1. Click **Dashboard settings** (gear icon) in the top navigation.
|
||||
1. Select **Variables** in the left menu.
|
||||
1. Click **Add variable**.
|
||||
1. Enter a **Name** for your variable (e.g., `subscription`, `resourceGroup`, `resource`).
|
||||
1. In the **Type** dropdown, select **Query**.
|
||||
1. In the **Data source** dropdown, select your Azure Monitor data source.
|
||||
1. In the **Query Type** dropdown, select the appropriate query type (see [Available query types](#available-query-types)).
|
||||
1. Configure any additional fields required by the selected query type.
|
||||
1. Click **Run query** to preview the variable values.
|
||||
1. Configure display options such as **Multi-value** or **Include All option** as needed.
|
||||
1. Click **Apply** to save the variable.
|
||||
|
||||
## Available query types
|
||||
|
||||
The Azure Monitor data source provides the following query types for template variables:
|
||||
|
||||
| Query type | Description |
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Subscriptions** | Returns a list of Azure subscriptions accessible to the configured credentials. |
|
||||
| **Resource Groups** | Returns resource groups for a specified subscription. Supports multi-value selection. |
|
||||
| **Namespaces** | Returns metric namespaces for the specified subscription. If a resource group is specified, returns only namespaces within that group. |
|
||||
| **Regions** | Returns Azure regions available for the specified subscription. |
|
||||
| **Resource Names** | Returns resource names for a specified subscription, resource group, and namespace. Supports multi-value selection. |
|
||||
| **Metric Names** | Returns available metric names for a specified resource. |
|
||||
| **Workspaces** | Returns Log Analytics workspaces for the specified subscription. |
|
||||
| **Logs** | Executes a KQL query and returns the results as variable values. See [Create a Logs variable](#create-a-logs-variable). |
|
||||
| **Custom Namespaces** | Returns custom metric namespaces for a specified resource. |
|
||||
| **Custom Metric Names** | Returns custom metric names for a specified resource. |
|
||||
| Name | Description |
|
||||
| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Subscriptions** | Returns subscriptions. |
|
||||
| **Resource Groups** | Returns resource groups for a specified subscription. Supports multi-value. |
|
||||
| **Namespaces** | Returns metric namespaces for the specified subscription. If a resource group is provided, only the namespaces within that group are returned. |
|
||||
| **Regions** | Returns regions for the specified subscription |
|
||||
| **Resource Names** | Returns a list of resource names for a specified subscription, resource group and namespace. Supports multi-value. |
|
||||
| **Metric Names** | Returns a list of metric names for a resource. |
|
||||
| **Workspaces** | Returns a list of workspaces for the specified subscription. |
|
||||
| **Logs** | Use a KQL query to return values. |
|
||||
| **Custom Namespaces** | Returns metric namespaces for the specified resource. |
|
||||
| **Custom Metric Names** | Returns a list of custom metric names for the specified resource. |
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Custom metrics cannot be emitted against a subscription or resource group. Select specific resources when retrieving custom metric namespaces or custom metric names.
|
||||
Custom metrics cannot be emitted against a subscription or resource group. Select resources only when you need to retrieve custom metric namespaces or custom metric names associated with a specific resource.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Create cascading variables
|
||||
You can use any Log Analytics Kusto Query Language (KQL) query that returns a single list of values in the `Query` field.
|
||||
For example:
|
||||
|
||||
Cascading variables (also called dependent or chained variables) allow you to create dropdown menus that filter based on previous selections. This is useful for drilling down from subscription to resource group to specific resource.
|
||||
| Query | List of values returned |
|
||||
| ----------------------------------------------------------------------------------------- | --------------------------------------- |
|
||||
| `workspace("myWorkspace").Heartbeat \| distinct Computer` | Virtual machines |
|
||||
| `workspace("$workspace").Heartbeat \| distinct Computer` | Virtual machines with template variable |
|
||||
| `workspace("$workspace").Perf \| distinct ObjectName` | Objects from the Perf table |
|
||||
| `workspace("$workspace").Perf \| where ObjectName == "$object"` `\| distinct CounterName` | Metric names from the Perf table |
|
||||
|
||||
### Example: Subscription → Resource Group → Resource Name
|
||||
### Query variable example
|
||||
|
||||
**Step 1: Create a Subscription variable**
|
||||
|
||||
1. Create a variable named `subscription`.
|
||||
1. Set **Query Type** to **Subscriptions**.
|
||||
|
||||
**Step 2: Create a Resource Group variable**
|
||||
|
||||
1. Create a variable named `resourceGroup`.
|
||||
1. Set **Query Type** to **Resource Groups**.
|
||||
1. In the **Subscription** field, select `$subscription`.
|
||||
|
||||
**Step 3: Create a Resource Name variable**
|
||||
|
||||
1. Create a variable named `resource`.
|
||||
1. Set **Query Type** to **Resource Names**.
|
||||
1. In the **Subscription** field, select `$subscription`.
|
||||
1. In the **Resource Group** field, select `$resourceGroup`.
|
||||
1. Select the appropriate **Namespace** for your resources (e.g., `Microsoft.Compute/virtualMachines`).
|
||||
|
||||
Now when you change the subscription, the resource group dropdown updates automatically, and when you change the resource group, the resource name dropdown updates.
|
||||
|
||||
## Create a Logs variable
|
||||
|
||||
The **Logs** query type lets you use a KQL query to populate variable values. The query must return a single column of values.
|
||||
|
||||
**To create a Logs variable:**
|
||||
|
||||
1. Create a new variable with **Query Type** set to **Logs**.
|
||||
1. Select a **Resource** (Log Analytics workspace or Application Insights resource).
|
||||
1. Enter a KQL query that returns a single column.
|
||||
|
||||
### Logs variable query examples
|
||||
|
||||
| Query | Returns |
|
||||
| ----------------------------------------- | ------------------------------------- |
|
||||
| `Heartbeat \| distinct Computer` | List of virtual machine names |
|
||||
| `Perf \| distinct ObjectName` | List of performance object names |
|
||||
| `AzureActivity \| distinct ResourceGroup` | List of resource groups with activity |
|
||||
| `AppRequests \| distinct Name` | List of application request names |
|
||||
|
||||
You can reference other variables in your Logs query:
|
||||
|
||||
```kusto
|
||||
workspace("$workspace").Heartbeat | distinct Computer
|
||||
```
|
||||
|
||||
```kusto
|
||||
workspace("$workspace").Perf
|
||||
| where ObjectName == "$object"
|
||||
| distinct CounterName
|
||||
```
|
||||
|
||||
## Variable refresh options
|
||||
|
||||
Control when your variables refresh by setting the **Refresh** option:
|
||||
|
||||
| Option | Behavior |
|
||||
| ------------------------ | ----------------------------------------------------------------------------------------- |
|
||||
| **On dashboard load** | Variables refresh each time the dashboard loads. Best for data that changes infrequently. |
|
||||
| **On time range change** | Variables refresh when the dashboard time range changes. Use for time-sensitive queries. |
|
||||
|
||||
For dashboards with many variables or complex queries, use **On dashboard load** to improve performance.
|
||||
|
||||
## Use variables in queries
|
||||
|
||||
After you create template variables, you can use them in your Azure Monitor queries by referencing them with the `$` prefix.
|
||||
|
||||
### Metrics query example
|
||||
|
||||
In a Metrics query, select your variables in the resource picker fields:
|
||||
|
||||
- **Subscription**: `$subscription`
|
||||
- **Resource Group**: `$resourceGroup`
|
||||
- **Resource Name**: `$resource`
|
||||
|
||||
### Logs query example
|
||||
|
||||
Reference variables directly in your KQL queries:
|
||||
This time series query uses query variables:
|
||||
|
||||
```kusto
|
||||
Perf
|
||||
| where ObjectName == "$object" and CounterName == "$metric"
|
||||
| where TimeGenerated >= $__timeFrom() and TimeGenerated <= $__timeTo()
|
||||
| where $__contains(Computer, $computer)
|
||||
| where $__contains(Computer, $computer)
|
||||
| summarize avg(CounterValue) by bin(TimeGenerated, $__interval), Computer
|
||||
| order by TimeGenerated asc
|
||||
```
|
||||
|
||||
## Multi-value variables
|
||||
### Multi-value variables
|
||||
|
||||
You can enable **Multi-value** selection for **Resource Groups** and **Resource Names** variables. When using multi-value variables in a Metrics query, all selected resources must:
|
||||
It is possible to select multiple values for **Resource Groups** and **Resource Names** and use a single metrics query pointing to those values as long as they:
|
||||
|
||||
- Belong to the same subscription
|
||||
- Be in the same Azure region
|
||||
- Be of the same resource type (namespace)
|
||||
- Belong to the same subscription.
|
||||
- Are in the same region.
|
||||
- Are of the same type (namespace).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
When a multi-value variable is used as a parameter in another variable query (for example, to retrieve metric names), only the first selected value is used. Ensure the first resource group and resource name combination is valid.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Troubleshoot template variables
|
||||
|
||||
If you encounter issues with template variables, try the following solutions.
|
||||
|
||||
### Variable returns no values
|
||||
|
||||
- Verify the Azure Monitor data source is configured correctly and can connect to Azure.
|
||||
- Check that the credentials have appropriate permissions to list the requested resources.
|
||||
- For cascading variables, ensure parent variables have valid selections.
|
||||
|
||||
### Variable values are outdated
|
||||
|
||||
- Check the **Refresh** setting and adjust if needed.
|
||||
- Click the refresh icon next to the variable dropdown to manually refresh.
|
||||
|
||||
### Multi-value selection not working in queries
|
||||
|
||||
- Ensure the resources meet the requirements (same subscription, region, and type).
|
||||
- For Logs queries, use the `$__contains()` macro to handle multi-value variables properly.
|
||||
Also, note that if a template variable pointing to multiple resource groups or names is used in another template variable as a parameter (e.g. to retrieve metric names), only the first value will be used. This means that the combination of the first resource group and name selected should be valid.
|
||||
|
||||
@@ -1,320 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/azure-monitor/troubleshooting/
|
||||
description: Troubleshooting guide for the Azure Monitor data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- azure
|
||||
- monitor
|
||||
- troubleshooting
|
||||
- errors
|
||||
- authentication
|
||||
- query
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshoot
|
||||
title: Troubleshoot Azure Monitor data source issues
|
||||
weight: 500
|
||||
last_reviewed: 2025-12-04
|
||||
refs:
|
||||
configure-azure-monitor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
|
||||
template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
|
||||
query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
|
||||
---
|
||||
|
||||
# Troubleshoot Azure Monitor data source issues
|
||||
|
||||
This document provides solutions to common issues you may encounter when configuring or using the Azure Monitor data source.
|
||||
|
||||
## Configuration and authentication errors
|
||||
|
||||
These errors typically occur when setting up the data source or when authentication credentials are invalid.
|
||||
|
||||
### "Authorization failed" or "Access denied"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Save & test fails with "Authorization failed"
|
||||
- Queries return "Access denied" errors
|
||||
- Subscriptions don't load when clicking **Load Subscriptions**
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| -------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| App registration doesn't have required permissions | Assign the `Reader` role to the app registration on the subscription or resource group you want to monitor. Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current). |
|
||||
| Incorrect tenant ID, client ID, or client secret | Verify the credentials in the Azure Portal under **App registrations** > your app > **Overview** (for IDs) and **Certificates & secrets** (for secret). |
|
||||
| Client secret has expired | Create a new client secret in Azure and update the data source configuration. |
|
||||
| Managed Identity not enabled on the Azure resource | For VMs, enable managed identity in the Azure Portal under **Identity**. For App Service, enable it under **Identity** in the app settings. |
|
||||
| Managed Identity not assigned the Reader role | Assign the `Reader` role to the managed identity on the target subscription or resources. |
|
||||
|
||||
### "Invalid client secret" or "Client secret not found"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Authentication fails immediately after configuration
|
||||
- Error message references invalid credentials
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Ensure you copied the client secret **value**, not the secret ID. In Azure Portal under **Certificates & secrets**, the secret value is only shown once when created. The secret ID is a different identifier and won't work for authentication.
|
||||
2. Verify the client secret was copied correctly (no extra spaces or truncation).
|
||||
3. Check if the secret has expired in Azure Portal under **App registrations** > your app > **Certificates & secrets**.
|
||||
4. Create a new secret and update the data source configuration.
|
||||
|
||||
### "Tenant not found" or "Invalid tenant ID"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Data source test fails with tenant-related errors
|
||||
- Unable to authenticate
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the Directory (tenant) ID in Azure Portal under **Microsoft Entra ID** > **Overview**.
|
||||
2. Ensure you're using the correct Azure cloud setting (Azure, Azure Government, or Azure China).
|
||||
3. Check that the tenant ID is a valid GUID format.
|
||||
|
||||
### Managed Identity not working
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Managed Identity option is available but authentication fails
|
||||
- Error: "Managed identity authentication is not available"
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify `managed_identity_enabled = true` is set in the Grafana server configuration under `[azure]`.
|
||||
2. Confirm the Azure resource hosting Grafana has managed identity enabled.
|
||||
3. For user-assigned managed identity, ensure `managed_identity_client_id` is set correctly.
|
||||
4. Verify the managed identity has the `Reader` role on the target resources.
|
||||
5. Restart Grafana after changing server configuration.
|
||||
|
||||
### Workload Identity not working
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Workload Identity authentication fails in Kubernetes/AKS environment
|
||||
- Token file errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify `workload_identity_enabled = true` is set in the Grafana server configuration.
|
||||
2. Check that the service account is correctly annotated for workload identity.
|
||||
3. Verify the federated credential is configured in Azure.
|
||||
4. Ensure the token path is accessible to the Grafana pod.
|
||||
5. Check the workload identity webhook is running in the cluster.
|
||||
|
||||
## Query errors
|
||||
|
||||
These errors occur when executing queries against Azure Monitor services.
|
||||
|
||||
### "No data" or empty results
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query executes without error but returns no data
|
||||
- Charts show "No data" message
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Time range doesn't contain data | Expand the dashboard time range or verify data exists in Azure Portal. |
|
||||
| Wrong resource selected | Verify you've selected the correct subscription, resource group, and resource. |
|
||||
| Metric not available for resource | Not all metrics are available for all resources. Check available metrics in Azure Portal under the resource's **Metrics** blade. |
|
||||
| Metric has no values | Some metrics only populate under certain conditions (e.g., error counts when errors occur). |
|
||||
| Permissions issue | Verify the identity has read access to the specific resource. |
|
||||
|
||||
### "Bad request" or "Invalid query"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query fails with 400 error
|
||||
- Error message indicates query syntax issues
|
||||
|
||||
**Solutions for Logs queries:**
|
||||
|
||||
1. Validate your KQL syntax in the Azure Portal Log Analytics query editor.
|
||||
2. Check for typos in table names or column names.
|
||||
3. Ensure referenced tables exist in the selected workspace.
|
||||
4. Verify the time range is valid (not in the future, not too far in the past for data retention).
|
||||
|
||||
**Solutions for Metrics queries:**
|
||||
|
||||
1. Verify the metric name is valid for the selected resource type.
|
||||
2. Check that dimension filters use valid dimension names and values.
|
||||
3. Ensure the aggregation type is supported for the selected metric.
|
||||
|
||||
### "Resource not found"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query fails with 404 error
|
||||
- Resource picker shows resources that can't be queried
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the resource still exists in Azure (it may have been deleted or moved).
|
||||
2. Check that the subscription is correct.
|
||||
3. Refresh the resource picker by re-selecting the subscription.
|
||||
4. Verify the identity has access to the resource's resource group.
|
||||
|
||||
### Logs query timeout
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query runs for a long time then fails
|
||||
- Error mentions timeout or query limits
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Narrow the time range to reduce data volume.
|
||||
2. Add filters to reduce the result set.
|
||||
3. Use `summarize` to aggregate data instead of returning raw rows.
|
||||
4. Consider using Basic Logs for large datasets (if enabled).
|
||||
5. Break complex queries into smaller parts.
|
||||
|
||||
### "Metrics not available" for a resource
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Resource appears in picker but no metrics are listed
|
||||
- Metric dropdown is empty
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the resource type supports Azure Monitor metrics.
|
||||
2. Check if the resource is in a region that supports metrics.
|
||||
3. Some resources require diagnostic settings to emit metrics—configure these in Azure Portal.
|
||||
4. Try selecting a different namespace for the resource.
|
||||
|
||||
## Azure Resource Graph errors
|
||||
|
||||
These errors are specific to Azure Resource Graph (ARG) queries.
|
||||
|
||||
### "Query execution failed"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- ARG query fails with execution errors
|
||||
- Results don't match expected resources
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Validate query syntax in Azure Portal Resource Graph Explorer.
|
||||
2. Check that you have access to the subscriptions being queried.
|
||||
3. Verify table names are correct (e.g., `Resources`, `ResourceContainers`).
|
||||
4. Some ARG features require specific permissions, check [ARG documentation](https://docs.microsoft.com/en-us/azure/governance/resource-graph/).
|
||||
|
||||
### Query returns incomplete results
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Not all expected resources appear in results
|
||||
- Results seem truncated
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. ARG queries are paginated. The data source handles pagination automatically, but very large result sets may be limited.
|
||||
2. Add filters to reduce result set size.
|
||||
3. Verify you have access to all subscriptions containing the resources.
|
||||
|
||||
## Application Insights Traces errors
|
||||
|
||||
These errors are specific to the Traces query type.
|
||||
|
||||
### "No traces found"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Trace query returns empty results
|
||||
- Operation ID search finds nothing
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the Application Insights resource is collecting trace data.
|
||||
2. Check that the time range includes when the traces were generated.
|
||||
3. Ensure the Operation ID is correct (copy directly from another trace or log).
|
||||
4. Verify the identity has access to the Application Insights resource.
|
||||
|
||||
## Template variable errors
|
||||
|
||||
For detailed troubleshooting of template variables, refer to the [template variables troubleshooting section](ref:template-variables).
|
||||
|
||||
### Variables return no values
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the data source connection is working (test it in the data source settings).
|
||||
2. Check that parent variables (for cascading variables) have valid selections.
|
||||
3. Verify the identity has permissions to list the requested resources.
|
||||
4. For Logs variables, ensure the KQL query returns a single column.
|
||||
|
||||
### Variables are slow to load
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Set variable refresh to **On dashboard load** instead of **On time range change**.
|
||||
2. Reduce the scope of variable queries (e.g., filter by resource group instead of entire subscription).
|
||||
3. For Logs variables, optimize the KQL query to return results faster.
|
||||
|
||||
## Connection and network errors
|
||||
|
||||
These errors indicate problems with network connectivity between Grafana and Azure services.
|
||||
|
||||
### "Connection refused" or timeout errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Data source test fails with network errors
|
||||
- Queries timeout without returning results
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify network connectivity from Grafana to Azure endpoints.
|
||||
2. Check firewall rules allow outbound HTTPS (port 443) to Azure services.
|
||||
3. For private networks, ensure Private Link or VPN is configured correctly.
|
||||
4. For Grafana Cloud, configure [Private Data Source Connect](ref:configure-azure-monitor) if accessing private resources.
|
||||
|
||||
### SSL/TLS certificate errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Certificate validation failures
|
||||
- SSL handshake errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Ensure the system time is correct (certificate validation fails with incorrect time).
|
||||
2. Verify corporate proxy isn't intercepting HTTPS traffic.
|
||||
3. Check that required CA certificates are installed on the Grafana server.
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you've tried the solutions above and still encounter issues:
|
||||
|
||||
1. Check the [Grafana community forums](https://community.grafana.com/) for similar issues.
|
||||
1. Review the [Azure Monitor data source GitHub issues](https://github.com/grafana/grafana/issues) for known bugs.
|
||||
1. Enable debug logging in Grafana to capture detailed error information.
|
||||
1. Contact Grafana Support if you're an Enterprise, Cloud Pro or Cloud Contracted user.
|
||||
1. When reporting issues, include:
|
||||
- Grafana version
|
||||
- Error messages (redact sensitive information)
|
||||
- Steps to reproduce
|
||||
- Relevant configuration (redact credentials)
|
||||
@@ -17,6 +17,16 @@ menuTitle: Elasticsearch
|
||||
title: Elasticsearch data source
|
||||
weight: 325
|
||||
refs:
|
||||
configuration:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#sigv4_auth_enabled
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#sigv4_auth_enabled
|
||||
provisioning-grafana:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
@@ -34,61 +44,100 @@ refs:
|
||||
Elasticsearch is a search and analytics engine used for a variety of use cases.
|
||||
You can create many types of queries to visualize logs or metrics stored in Elasticsearch, and annotate graphs with log events stored in Elasticsearch.
|
||||
|
||||
The following resources will help you get started with Elasticsearch and Grafana:
|
||||
The following will help you get started working with Elasticsearch and Grafana:
|
||||
|
||||
- [What is Elasticsearch?](https://www.elastic.co/guide/en/elasticsearch/reference/current/elasticsearch-intro.html)
|
||||
- [Configure the Elasticsearch data source](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/configure/)
|
||||
- [Elasticsearch query editor](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/query-editor/)
|
||||
- [Elasticsearch template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/template-variables/)
|
||||
- [Elasticsearch annotations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/annotations/)
|
||||
- [Elasticsearch alerting](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/alerting/)
|
||||
- [Troubleshooting issues with the Elasticsearch data source](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/troubleshooting/)
|
||||
|
||||
## Key capabilities
|
||||
|
||||
The Elasticsearch data source supports:
|
||||
|
||||
- **Metrics queries:** Aggregate and visualize numeric data using bucket and metric aggregations.
|
||||
- **Log queries:** Search, filter, and explore log data with Lucene query syntax.
|
||||
- **Annotations:** Overlay Elasticsearch events on your dashboard graphs.
|
||||
- **Alerting:** Create alerts based on Elasticsearch query results.
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before you configure the Elasticsearch data source, you need:
|
||||
|
||||
- An Elasticsearch instance (v7.17+, v8.x, or v9.x)
|
||||
- Network access from Grafana to your Elasticsearch server
|
||||
- Appropriate user credentials or API keys with read access
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
If you use Amazon OpenSearch Service (the successor to Amazon Elasticsearch Service), use the [OpenSearch data source](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/opensearch/) instead.
|
||||
{{< /admonition >}}
|
||||
- [Configure the Elasticsearch data source](/docs/grafana/latest/datasources/elasticsearch/configure-elasticsearch-data-source/)
|
||||
- [Elasticsearch query editor](query-editor/)
|
||||
- [Elasticsearch template variables](template-variables/)
|
||||
|
||||
## Supported Elasticsearch versions
|
||||
|
||||
{{< admonition type="warning" >}}
|
||||
The Elasticsearch data source plugin currently does not support Elastic Cloud Serverless, or any other serverless variant of Elasticsearch.
|
||||
{{< /admonition >}}
|
||||
|
||||
This data source supports these versions of Elasticsearch:
|
||||
|
||||
- ≥ v7.17
|
||||
- v7.17+
|
||||
- v8.x
|
||||
- v9.x
|
||||
|
||||
The Grafana maintenance policy for the Elasticsearch data source aligns with [Elastic Product End of Life Dates](https://www.elastic.co/support/eol). Grafana ensures proper functionality for supported versions only. If you use an EOL version of Elasticsearch, you can still run queries, but the query builder displays a warning. Grafana doesn't guarantee functionality or provide fixes for EOL versions.
|
||||
Our maintenance policy for Elasticsearch data source is aligned with the [Elastic Product End of Life Dates](https://www.elastic.co/support/eol) and we ensure proper functionality for supported versions. If you are using an Elasticsearch with version that is past its end-of-life (EOL), you can still execute queries, but you will receive a notification in the query builder indicating that the version of Elasticsearch you are using is no longer supported. It's important to note that in such cases, we do not guarantee the correctness of the functionality, and we will not be addressing any related issues.
|
||||
|
||||
## Additional resources
|
||||
## Provision the data source
|
||||
|
||||
Once you have configured the Elasticsearch data source, you can:
|
||||
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
|
||||
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-grafana).
|
||||
|
||||
- Use [Explore](ref:explore) to run ad-hoc queries against your Elasticsearch data.
|
||||
- Configure and use [template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/elasticsearch/template-variables/) for dynamic dashboards.
|
||||
- Add [Transformations](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/) to process query results.
|
||||
- [Build dashboards](ref:build-dashboards) to visualize your Elasticsearch data.
|
||||
{{< admonition type="note" >}}
|
||||
The previously used `database` field has now been [deprecated](https://github.com/grafana/grafana/pull/58647).
|
||||
You should now use the `index` field in `jsonData` to store the index name.
|
||||
Please see the examples below.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Related data sources
|
||||
### Provisioning examples
|
||||
|
||||
- [OpenSearch](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/opensearch/) - For Amazon OpenSearch Service.
|
||||
- [Loki](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/loki/) - Grafana's log aggregation system.
|
||||
**Basic provisioning**
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Elastic
|
||||
type: elasticsearch
|
||||
access: proxy
|
||||
url: http://localhost:9200
|
||||
jsonData:
|
||||
index: '[metrics-]YYYY.MM.DD'
|
||||
interval: Daily
|
||||
timeField: '@timestamp'
|
||||
```
|
||||
|
||||
**Provision for logs**
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: elasticsearch-v7-filebeat
|
||||
type: elasticsearch
|
||||
access: proxy
|
||||
url: http://localhost:9200
|
||||
jsonData:
|
||||
index: '[filebeat-]YYYY.MM.DD'
|
||||
interval: Daily
|
||||
timeField: '@timestamp'
|
||||
logMessageField: message
|
||||
logLevelField: fields.level
|
||||
dataLinks:
|
||||
- datasourceUid: my_jaeger_uid # Target UID needs to be known
|
||||
field: traceID
|
||||
url: '$${__value.raw}' # Careful about the double "$$" because of env var expansion
|
||||
```
|
||||
|
||||
## Configure Amazon Elasticsearch Service
|
||||
|
||||
If you use Amazon Elasticsearch Service, you can use Grafana's Elasticsearch data source to visualize data from it.
|
||||
|
||||
If you use an AWS Identity and Access Management (IAM) policy to control access to your Amazon Elasticsearch Service domain, you must use AWS Signature Version 4 (AWS SigV4) to sign all requests to that domain.
|
||||
|
||||
For details on AWS SigV4, refer to the [AWS documentation](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html).
|
||||
|
||||
### AWS Signature Version 4 authentication
|
||||
|
||||
To sign requests to your Amazon Elasticsearch Service domain, you can enable SigV4 in Grafana's [configuration](ref:configuration).
|
||||
|
||||
Once AWS SigV4 is enabled, you can configure it on the Elasticsearch data source configuration page.
|
||||
For more information about AWS authentication options, refer to [AWS authentication](../aws-cloudwatch/aws-authentication/).
|
||||
|
||||
{{< figure src="/static/img/docs/v73/elasticsearch-sigv4-config-editor.png" max-width="500px" class="docs-image--no-shadow" caption="SigV4 configuration for AWS Elasticsearch Service" >}}
|
||||
|
||||
## Query the data source
|
||||
|
||||
You can select multiple metrics and group by multiple terms or filters when using the Elasticsearch query editor.
|
||||
|
||||
For details, see the [query editor documentation](query-editor/).
|
||||
|
||||
## Use template variables
|
||||
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
|
||||
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For details, see the [template variables documentation](template-variables/).
|
||||
|
||||
@@ -1,144 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/elasticsearch/alerting/
|
||||
description: Using Grafana Alerting with the Elasticsearch data source
|
||||
keywords:
|
||||
- grafana
|
||||
- elasticsearch
|
||||
- alerting
|
||||
- alerts
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Alerting
|
||||
title: Elasticsearch alerting
|
||||
weight: 550
|
||||
refs:
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
|
||||
create-alert-rule:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
---
|
||||
|
||||
# Elasticsearch alerting
|
||||
|
||||
You can use Grafana Alerting with Elasticsearch to create alerts based on your Elasticsearch data. This allows you to monitor metrics, detect anomalies, and receive notifications when specific conditions are met.
|
||||
|
||||
For general information about Grafana Alerting, refer to [Grafana Alerting](ref:alerting).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before creating alerts with Elasticsearch, ensure you have:
|
||||
|
||||
- An Elasticsearch data source configured in Grafana
|
||||
- Appropriate permissions to create alert rules
|
||||
- Understanding of the metrics you want to monitor
|
||||
|
||||
## Supported query types
|
||||
|
||||
Elasticsearch alerting works best with **metrics queries** that return time series data. To create a valid alert query:
|
||||
|
||||
- Use a **Date histogram** as the last bucket aggregation (under **Group by**)
|
||||
- Select appropriate metric aggregations (Count, Average, Sum, Min, Max, etc.)
|
||||
|
||||
Queries that return time series data allow Grafana to evaluate values over time and trigger alerts when thresholds are crossed.
|
||||
|
||||
### Query types and alerting compatibility
|
||||
|
||||
| Query type | Alerting support | Notes |
|
||||
| ------------------------------ | ---------------- | ----------------------------------------------------------- |
|
||||
| Metrics with Date histogram | ✅ Full support | Recommended for alerting |
|
||||
| Metrics without Date histogram | ⚠️ Limited | May not evaluate correctly over time |
|
||||
| Logs | ❌ Not supported | Use metrics queries instead |
|
||||
| Raw data | ❌ Not supported | Use metrics queries instead |
|
||||
| Raw document (deprecated) | ❌ Not supported | Deprecated since Grafana v10.1. Use metrics queries instead |
|
||||
|
||||
## Create an alert rule
|
||||
|
||||
To create an alert rule using Elasticsearch:
|
||||
|
||||
1. Navigate to **Alerting** > **Alert rules**.
|
||||
1. Click **New alert rule**.
|
||||
1. Enter a name for the alert rule.
|
||||
1. Select your **Elasticsearch** data source.
|
||||
1. Build your query using the query editor:
|
||||
- Add metric aggregations (for example, Average, Count, Sum)
|
||||
- Add a Date histogram under **Group by**
|
||||
- Optionally add filters using Lucene query syntax
|
||||
1. Configure the alert condition (for example, when the average is above a threshold).
|
||||
1. Set the evaluation interval and pending period.
|
||||
1. Configure notifications and labels.
|
||||
1. Click **Save rule**.
|
||||
|
||||
For detailed instructions, refer to [Create a Grafana-managed alert rule](ref:create-alert-rule).
|
||||
|
||||
## Example alert queries
|
||||
|
||||
The following examples show common alerting scenarios with Elasticsearch.
|
||||
|
||||
### Alert on high error count
|
||||
|
||||
Monitor the number of error-level log entries:
|
||||
|
||||
1. **Query:** `level:error`
|
||||
1. **Metric:** Count
|
||||
1. **Group by:** Date histogram (interval: 1m)
|
||||
1. **Condition:** When count is above 100
|
||||
|
||||
### Alert on average response time
|
||||
|
||||
Monitor API response times:
|
||||
|
||||
1. **Query:** `type:api_request`
|
||||
1. **Metric:** Average on field `response_time`
|
||||
1. **Group by:** Date histogram (interval: 5m)
|
||||
1. **Condition:** When average is above 500 (milliseconds)
|
||||
|
||||
### Alert on unique user count drop
|
||||
|
||||
Detect drops in active users:
|
||||
|
||||
1. **Query:** `*` (all documents)
|
||||
1. **Metric:** Unique count on field `user_id`
|
||||
1. **Group by:** Date histogram (interval: 1h)
|
||||
1. **Condition:** When unique count is below 100
|
||||
|
||||
## Limitations
|
||||
|
||||
When using Elasticsearch with Grafana Alerting, be aware of the following limitations:
|
||||
|
||||
### Template variables not supported
|
||||
|
||||
Alert queries cannot contain template variables. Grafana evaluates alert rules on the backend without dashboard context, so variables like `$hostname` or `$environment` won't be resolved.
|
||||
|
||||
If your dashboard query uses template variables, create a separate query for alerting with hard coded values.
|
||||
|
||||
### Logs queries not supported
|
||||
|
||||
Queries using the **Logs** metric type cannot be used for alerting. Convert your query to use metric aggregations with a Date histogram instead.
|
||||
|
||||
### Query complexity
|
||||
|
||||
Complex queries with many nested aggregations may timeout or fail to evaluate. Simplify queries for alerting by:
|
||||
|
||||
- Reducing the number of bucket aggregations
|
||||
- Using appropriate time intervals
|
||||
- Adding filters to limit the data scanned
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these best practices when creating Elasticsearch alerts:
|
||||
|
||||
- **Use specific filters:** Add Lucene query filters to focus on relevant data and improve query performance.
|
||||
- **Choose appropriate intervals:** Match the Date histogram interval to your evaluation frequency.
|
||||
- **Test queries first:** Verify your query returns expected results in Explore before creating an alert.
|
||||
- **Set realistic thresholds:** Base alert thresholds on historical data patterns.
|
||||
- **Use meaningful names:** Give alert rules descriptive names that indicate what they monitor.
|
||||
@@ -1,124 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/elasticsearch/annotations/
|
||||
description: Using annotations with Elasticsearch in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- elasticsearch
|
||||
- annotations
|
||||
- events
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Annotations
|
||||
title: Elasticsearch annotations
|
||||
weight: 500
|
||||
refs:
|
||||
annotate-visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
---
|
||||
|
||||
# Elasticsearch annotations
|
||||
|
||||
Annotations overlay event data on your dashboard graphs, helping you correlate log events with metrics.
|
||||
You can use Elasticsearch as a data source for annotations to display events such as deployments, alerts, or other significant occurrences on your visualizations.
|
||||
|
||||
For general information about annotations, refer to [Annotate visualizations](ref:annotate-visualizations).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before creating Elasticsearch annotations, ensure you have:
|
||||
|
||||
- An Elasticsearch data source configured in Grafana
|
||||
- Documents in Elasticsearch containing event data with timestamp fields
|
||||
- Read access to the Elasticsearch index containing your events
|
||||
|
||||
## Create an annotation query
|
||||
|
||||
To add an Elasticsearch annotation to your dashboard:
|
||||
|
||||
1. Navigate to your dashboard and click **Dashboard settings** (gear icon).
|
||||
1. Select **Annotations** in the left menu.
|
||||
1. Click **Add annotation query**.
|
||||
1. Enter a **Name** for the annotation.
|
||||
1. Select your **Elasticsearch** data source from the **Data source** drop-down.
|
||||
1. Configure the annotation query and field mappings.
|
||||
1. Click **Save dashboard**.
|
||||
|
||||
## Query
|
||||
|
||||
Use the query field to filter which Elasticsearch documents appear as annotations. The query uses [Lucene query syntax](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html#query-string-syntax).
|
||||
|
||||
**Examples:**
|
||||
|
||||
| Query | Description |
|
||||
| ---------------------------------------- | ---------------------------------------------------- |
|
||||
| `*` | Matches all documents. |
|
||||
| `type:deployment` | Shows only deployment events. |
|
||||
| `level:error OR level:critical` | Shows error and critical events. |
|
||||
| `service:api AND environment:production` | Shows events for a specific service and environment. |
|
||||
| `tags:release` | Shows events tagged as releases. |
|
||||
|
||||
You can use template variables in your annotation queries. For example, `service:$service` filters annotations based on the selected service variable.
|
||||
|
||||
## Field mappings
|
||||
|
||||
Field mappings tell Grafana which Elasticsearch fields contain the annotation data.
|
||||
|
||||
### Time
|
||||
|
||||
The **Time** field specifies which field contains the annotation timestamp.
|
||||
|
||||
- **Default:** `@timestamp`
|
||||
- **Format:** The field must contain a date value that Elasticsearch recognizes.
|
||||
|
||||
### Time End
|
||||
|
||||
The **Time End** field specifies a field containing the end time for range annotations. Range annotations display as a shaded region on the graph instead of a single vertical line.
|
||||
|
||||
- **Default:** Empty (single-point annotations)
|
||||
- **Use case:** Display maintenance windows, incidents, or any event with a duration.
|
||||
|
||||
### Text
|
||||
|
||||
The **Text** field specifies which field contains the annotation description displayed when you hover over the annotation.
|
||||
|
||||
- **Default:** `tags`
|
||||
- **Tip:** Use a descriptive field like `message`, `description`, or `summary`.
|
||||
|
||||
### Tags
|
||||
|
||||
The **Tags** field specifies which field contains tags for the annotation. Tags help categorize and filter annotations.
|
||||
|
||||
- **Default:** Empty
|
||||
- **Format:** The field can contain either a comma-separated string or an array of strings.
|
||||
|
||||
## Example: Deployment annotations
|
||||
|
||||
To display deployment events as annotations:
|
||||
|
||||
1. Create an annotation query with the following settings:
|
||||
- **Query:** `type:deployment`
|
||||
- **Time:** `@timestamp`
|
||||
- **Text:** `message`
|
||||
- **Tags:** `environment`
|
||||
|
||||
This configuration displays deployment events with their messages as the annotation text and environments as tags.
|
||||
|
||||
## Example: Range annotations for incidents
|
||||
|
||||
To display incidents with duration:
|
||||
|
||||
1. Create an annotation query with the following settings:
|
||||
- **Query:** `type:incident`
|
||||
- **Time:** `start_time`
|
||||
- **Time End:** `end_time`
|
||||
- **Text:** `description`
|
||||
- **Tags:** `severity`
|
||||
|
||||
This configuration displays incidents as shaded regions from their start time to end time.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user