Compare commits
39 Commits
cursor/pla
...
docs/loki-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f1de558e2 | ||
|
|
ba2ef712d8 | ||
|
|
a45400de8d | ||
|
|
0c7936111f | ||
|
|
7c55021b94 | ||
|
|
8b7bd6f646 | ||
|
|
b889d23c29 | ||
|
|
e014d9a000 | ||
|
|
3eab41557a | ||
|
|
db96b6c1e3 | ||
|
|
3f78facce0 | ||
|
|
361fadf3be | ||
|
|
f244c9d483 | ||
|
|
29b04bd2ed | ||
|
|
c0fe27406b | ||
|
|
9409af6f1c | ||
|
|
e1c60e0a83 | ||
|
|
1f88aeb91f | ||
|
|
901360dca4 | ||
|
|
2cf485f6bf | ||
|
|
8ff88036e7 | ||
|
|
5eb0e6f432 | ||
|
|
1465b44d5a | ||
|
|
ee62baea2c | ||
|
|
1f20ca5a3d | ||
|
|
97b241d4ab | ||
|
|
466a27deff | ||
|
|
264131a390 | ||
|
|
7698970f22 | ||
|
|
bbaf91ed9c | ||
|
|
92464b2dc8 | ||
|
|
5fe192a893 | ||
|
|
380154707b | ||
|
|
217427e072 | ||
|
|
585d24dafa | ||
|
|
fccece3ca0 | ||
|
|
d44cab9eaf | ||
|
|
3d3b4dd213 | ||
|
|
2947d41ea8 |
@@ -68,14 +68,14 @@ require (
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/at-wat/mqtt-go v0.19.6 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 // indirect
|
||||
github.com/aws/smithy-go v1.23.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // indirect
|
||||
github.com/aws/smithy-go v1.23.2 // indirect
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
@@ -162,14 +162,14 @@ require (
|
||||
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect
|
||||
github.com/grafana/dataplane/sdata v0.0.9 // indirect
|
||||
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect
|
||||
github.com/grafana/grafana-aws-sdk v1.3.0 // indirect
|
||||
github.com/grafana/grafana-aws-sdk v1.4.2 // indirect
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1 // indirect
|
||||
github.com/grafana/grafana/apps/provisioning v0.0.0 // indirect
|
||||
github.com/grafana/grafana/pkg/apiserver v0.0.0 // indirect
|
||||
github.com/grafana/grafana/pkg/semconv v0.0.0-20250804150913-990f1c69ecc2 // indirect
|
||||
github.com/grafana/otel-profiling-go v0.5.1 // indirect
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect
|
||||
github.com/grafana/sqlds/v4 v4.2.7 // indirect
|
||||
github.com/grafana/sqlds/v5 v5.0.3 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20191002090509-6af20e3a5340 // indirect
|
||||
|
||||
@@ -173,44 +173,44 @@ github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
|
||||
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 h1:fWZhGAwVRK/fAN2tmt7ilH4PPAE11rDj7HytrmbZ2FE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11/go.mod h1:dd+Lkp6YmMryke+qxW/VnKyhMBDTYP41Q2Bb+6gNZgY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10 h1:7LllDZAegXU3yk41mwM6KcPu0wmjKGQB1bg99bNdQm4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10/go.mod h1:Ge6gzXPjqu4v0oHvgAwvGzYcK921GU0hQM25WF/Kl+8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 h1:TxkI7QI+sFkTItN/6cJuMZEIVMFXeu2dI1ZffkXngKI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14/go.mod h1:12x4Uw/vijC11XkctTjy92TNCQ+UnNJkT7fzX0Yd93E=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 h1:gLD09eaJUdiszm7vd1btiQUYE0Hj+0I2b8AS+75z9AY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8/go.mod h1:4RW3oMPt1POR74qVOC4SbubxAwdP4pCT0nSw3jycOU4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.17 h1:QFl8lL6RgakNK86vusim14P2k8BFSxjvUkcWLDjgz9Y=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.17/go.mod h1:V8P7ILjp/Uef/aX8TjGk6OHZN6IKPM5YW6S78QnRD5c=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21 h1:56HGpsgnmD+2/KpG0ikvvR8+3v3COCwaF4r+oWwOeNA=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21/go.mod h1:3YELwedmQbw7cXNaII2Wywd+YY58AmLPwX4LzARgmmA=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 h1:cTXRdLkpBanlDwISl+5chq5ui1d1YWg4PWMR9c3kXyw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84/go.mod h1:kwSy5X7tfIHN39uucmjQVs2LvDdXEjQucgQQEqCggEo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 h1:6bgAZgRyT4RoFWhxS+aoGMFyE0cD1bSzFnEEi4bFPGI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 h1:HhJYoES3zOz34yWEpGENqJvRVPqpmJyR3+AFg9ybhdY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 h1:GMYy2EOWfzdP3wfVAGXBNKY5vK4K8vMET4sYOYltmqs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36/go.mod h1:gDhdAV6wL3PmPqBhiPbnlS447GoWs8HTTOYef9/9Inw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 h1:nAP2GYbfh8dd2zGZqFRSMlq+/F6cMPBUuCsGAMkN074=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4/go.mod h1:LT10DsiGjLWh4GbjInf9LQejkYEhBgBCjLG5+lvk4EE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 h1:M6JI2aGFEzYxsF6CXIuRBnkge9Wf9a2xU39rNeXgu10=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8/go.mod h1:Fw+MyTwlwjFsSTE31mH211Np+CUslml8mzc0AFEG09s=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 h1:qcLWgdhq45sDM9na4cvXax9dyLitn8EYBRl8Ak4XtG4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17/go.mod h1:M+jkjBFZ2J6DJrjMv2+vkBbuht6kxJYtJiwoVgX4p4U=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.0 h1:0reDqfEN+tB+sozj2r92Bep8MEwBZgtAXTND1Kk9OXg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4 h1:FTdEN9dtWPB0EOURNtDPmwGp6GGvMqRJCAihkSl/1No=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4/go.mod h1:mYubxV9Ff42fZH4kexj43gFPhgc/LyC7KqvUKt1watc=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 h1:I7ghctfGXrscr7r1Ga/mDqSJKm7Fkpl5Mwq79Z+rZqU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0/go.mod h1:Zo9id81XP6jbayIFWNuDpA6lMBWhsVy+3ou2jLa4JnA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 h1:+LVB0xBqEgjQoqr9bGZbRzvg212B0f17JdflleJRNR4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5/go.mod h1:xoaxeqnnUaZjPjaICgIy5B+MHCSb/ZSOn4MvkFNOUA0=
|
||||
github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M=
|
||||
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 h1:0JPwLz1J+5lEOfy/g0SURC9cxhbQ1lIMHMa+AHZSzz0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 h1:OWs0/j2UYR5LOGi88sD5/lhN6TDLG6SfA7CqsQO9zF0=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 h1:mLlUgHn02ue8whiR4BmxxGJLR2gwU6s6ZzJ5wDamBUs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk=
|
||||
github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
|
||||
github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
|
||||
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df h1:GSoSVRLoBaFpOOds6QyY1L8AX7uoY+Ln3BHc22W40X0=
|
||||
@@ -637,8 +637,8 @@ github.com/grafana/grafana-app-sdk v0.48.7 h1:9mF7nqkqP0QUYYDlznoOt+GIyjzj45wGfU
|
||||
github.com/grafana/grafana-app-sdk v0.48.7/go.mod h1:DWsaaH39ZMHwSOSoUBaeW8paMrRaYsjRYlLwCJYd78k=
|
||||
github.com/grafana/grafana-app-sdk/logging v0.48.7 h1:Oa5qg473gka5+W/WQk61Xbw4YdAv+wV2Z4bJtzeCaQw=
|
||||
github.com/grafana/grafana-app-sdk/logging v0.48.7/go.mod h1:5u3KalezoBAAo2Y3ytDYDAIIPvEqFLLDSxeiK99QxDU=
|
||||
github.com/grafana/grafana-aws-sdk v1.3.0 h1:/bfJzP93rCel1GbWoRSq0oUo424MZXt8jAp2BK9w8tM=
|
||||
github.com/grafana/grafana-aws-sdk v1.3.0/go.mod h1:VGycF0JkCGKND2O5je1ucOqPJ0ZNhZYzV3c2bNBAaGk=
|
||||
github.com/grafana/grafana-aws-sdk v1.4.2 h1:GrUEoLbs46r8rG/GZL4L2b63Bo+rkIYKdtCT7kT5KkM=
|
||||
github.com/grafana/grafana-aws-sdk v1.4.2/go.mod h1:1qnZdYs6gQzxxF0dDodaE7Rn9fiMzuhwvtaAZ7ySnhY=
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1 h1:FFcEA01tW+SmuJIuDbHOdgUBL+d7DPrZ2N4zwzPhfGk=
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1/go.mod h1:Oi4anANlCuTCc66jCyqIzfVbgLXFll8Wja+Y4vfANlc=
|
||||
github.com/grafana/grafana-plugin-sdk-go v0.284.0 h1:1bK7eWsnPBLUWDcWJWe218Ik5ad0a5JpEL4mH9ry7Ws=
|
||||
@@ -655,8 +655,8 @@ github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasn
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/grafana/sqlds/v4 v4.2.7 h1:sFQhsS7DBakNMdxa++yOfJ9BVvkZwFJ0B95o57K0/XA=
|
||||
github.com/grafana/sqlds/v4 v4.2.7/go.mod h1:BQRjUG8rOqrBI4NAaeoWrIMuoNgfi8bdhCJ+5cgEfLU=
|
||||
github.com/grafana/sqlds/v5 v5.0.3 h1:+yUMUxfa0WANQsmS9xtTFSRX1Q55Iv1B9EjlrW4VlBU=
|
||||
github.com/grafana/sqlds/v5 v5.0.3/go.mod h1:GKeTTiC+GeR1X0z3f0Iee+hZnNgN62uQpj5XVMx5Uew=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 h1:QGLs/O40yoNK9vmy4rhUGBVyMf1lISBGtXRpsu/Qu/o=
|
||||
|
||||
@@ -106,25 +106,25 @@ require (
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/at-wat/mqtt-go v0.19.6 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 // indirect
|
||||
github.com/aws/smithy-go v1.23.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // indirect
|
||||
github.com/aws/smithy-go v1.23.2 // indirect
|
||||
github.com/bahlo/generic-list-go v0.2.0 // indirect
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
@@ -229,7 +229,7 @@ require (
|
||||
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 // indirect
|
||||
github.com/grafana/dataplane/sdata v0.0.9 // indirect
|
||||
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect
|
||||
github.com/grafana/grafana-aws-sdk v1.3.0 // indirect
|
||||
github.com/grafana/grafana-aws-sdk v1.4.2 // indirect
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1 // indirect
|
||||
github.com/grafana/grafana-plugin-sdk-go v0.284.0 // indirect
|
||||
github.com/grafana/grafana/apps/dashboard v0.0.0 // indirect
|
||||
@@ -242,7 +242,7 @@ require (
|
||||
github.com/grafana/otel-profiling-go v0.5.1 // indirect
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||
github.com/grafana/sqlds/v4 v4.2.7 // indirect
|
||||
github.com/grafana/sqlds/v5 v5.0.3 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 // indirect
|
||||
|
||||
@@ -238,24 +238,24 @@ github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
|
||||
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 h1:fWZhGAwVRK/fAN2tmt7ilH4PPAE11rDj7HytrmbZ2FE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11/go.mod h1:dd+Lkp6YmMryke+qxW/VnKyhMBDTYP41Q2Bb+6gNZgY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10 h1:7LllDZAegXU3yk41mwM6KcPu0wmjKGQB1bg99bNdQm4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10/go.mod h1:Ge6gzXPjqu4v0oHvgAwvGzYcK921GU0hQM25WF/Kl+8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 h1:TxkI7QI+sFkTItN/6cJuMZEIVMFXeu2dI1ZffkXngKI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14/go.mod h1:12x4Uw/vijC11XkctTjy92TNCQ+UnNJkT7fzX0Yd93E=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 h1:gLD09eaJUdiszm7vd1btiQUYE0Hj+0I2b8AS+75z9AY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8/go.mod h1:4RW3oMPt1POR74qVOC4SbubxAwdP4pCT0nSw3jycOU4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.17 h1:QFl8lL6RgakNK86vusim14P2k8BFSxjvUkcWLDjgz9Y=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.17/go.mod h1:V8P7ILjp/Uef/aX8TjGk6OHZN6IKPM5YW6S78QnRD5c=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21 h1:56HGpsgnmD+2/KpG0ikvvR8+3v3COCwaF4r+oWwOeNA=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21/go.mod h1:3YELwedmQbw7cXNaII2Wywd+YY58AmLPwX4LzARgmmA=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 h1:cTXRdLkpBanlDwISl+5chq5ui1d1YWg4PWMR9c3kXyw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84/go.mod h1:kwSy5X7tfIHN39uucmjQVs2LvDdXEjQucgQQEqCggEo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 h1:6bgAZgRyT4RoFWhxS+aoGMFyE0cD1bSzFnEEi4bFPGI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 h1:HhJYoES3zOz34yWEpGENqJvRVPqpmJyR3+AFg9ybhdY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 h1:GMYy2EOWfzdP3wfVAGXBNKY5vK4K8vMET4sYOYltmqs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36/go.mod h1:gDhdAV6wL3PmPqBhiPbnlS447GoWs8HTTOYef9/9Inw=
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.45.3 h1:Nn3qce+OHZuMj/edx4its32uxedAmquCDxtZkrdeiD4=
|
||||
@@ -264,12 +264,12 @@ github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.51.0 h1:e5cbPZYTIY2nUEFie
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.51.0/go.mod h1:UseIHRfrm7PqeZo6fcTb6FUCXzCnh1KJbQbmOfxArGM=
|
||||
github.com/aws/aws-sdk-go-v2/service/ec2 v1.225.2 h1:IfMb3Ar8xEaWjgH/zeVHYD8izwJdQgRP5mKCTDt4GNk=
|
||||
github.com/aws/aws-sdk-go-v2/service/ec2 v1.225.2/go.mod h1:35jGWx7ECvCwTsApqicFYzZ7JFEnBc6oHUuOQ3xIS54=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 h1:nAP2GYbfh8dd2zGZqFRSMlq+/F6cMPBUuCsGAMkN074=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4/go.mod h1:LT10DsiGjLWh4GbjInf9LQejkYEhBgBCjLG5+lvk4EE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 h1:M6JI2aGFEzYxsF6CXIuRBnkge9Wf9a2xU39rNeXgu10=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8/go.mod h1:Fw+MyTwlwjFsSTE31mH211Np+CUslml8mzc0AFEG09s=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 h1:qcLWgdhq45sDM9na4cvXax9dyLitn8EYBRl8Ak4XtG4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17/go.mod h1:M+jkjBFZ2J6DJrjMv2+vkBbuht6kxJYtJiwoVgX4p4U=
|
||||
github.com/aws/aws-sdk-go-v2/service/kms v1.41.2 h1:zJeUxFP7+XP52u23vrp4zMcVhShTWbNO8dHV6xCSvFo=
|
||||
@@ -280,14 +280,16 @@ github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 h1:Pwbxovp
|
||||
github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6/go.mod h1:Z4xLt5mXspLKjBV92i165wAJ/3T6TIv4n7RtIS8pWV0=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.0 h1:0reDqfEN+tB+sozj2r92Bep8MEwBZgtAXTND1Kk9OXg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4 h1:FTdEN9dtWPB0EOURNtDPmwGp6GGvMqRJCAihkSl/1No=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4/go.mod h1:mYubxV9Ff42fZH4kexj43gFPhgc/LyC7KqvUKt1watc=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 h1:I7ghctfGXrscr7r1Ga/mDqSJKm7Fkpl5Mwq79Z+rZqU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0/go.mod h1:Zo9id81XP6jbayIFWNuDpA6lMBWhsVy+3ou2jLa4JnA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 h1:+LVB0xBqEgjQoqr9bGZbRzvg212B0f17JdflleJRNR4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5/go.mod h1:xoaxeqnnUaZjPjaICgIy5B+MHCSb/ZSOn4MvkFNOUA0=
|
||||
github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M=
|
||||
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.40.1 h1:w6a0H79HrHf3lr+zrw+pSzR5B+caiQFAKiNHlrUcnoc=
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.40.1/go.mod h1:c6Vg0BRiU7v0MVhHupw90RyL120QBwAMLbDCzptGeMk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 h1:0JPwLz1J+5lEOfy/g0SURC9cxhbQ1lIMHMa+AHZSzz0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 h1:OWs0/j2UYR5LOGi88sD5/lhN6TDLG6SfA7CqsQO9zF0=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 h1:mLlUgHn02ue8whiR4BmxxGJLR2gwU6s6ZzJ5wDamBUs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk=
|
||||
github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
|
||||
github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27 h1:60m4tnanN1ctzIu4V3bfCNJ39BiOPSm1gHFlFjTkRE0=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c=
|
||||
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
|
||||
@@ -851,8 +853,8 @@ github.com/grafana/grafana-app-sdk v0.48.7 h1:9mF7nqkqP0QUYYDlznoOt+GIyjzj45wGfU
|
||||
github.com/grafana/grafana-app-sdk v0.48.7/go.mod h1:DWsaaH39ZMHwSOSoUBaeW8paMrRaYsjRYlLwCJYd78k=
|
||||
github.com/grafana/grafana-app-sdk/logging v0.48.7 h1:Oa5qg473gka5+W/WQk61Xbw4YdAv+wV2Z4bJtzeCaQw=
|
||||
github.com/grafana/grafana-app-sdk/logging v0.48.7/go.mod h1:5u3KalezoBAAo2Y3ytDYDAIIPvEqFLLDSxeiK99QxDU=
|
||||
github.com/grafana/grafana-aws-sdk v1.3.0 h1:/bfJzP93rCel1GbWoRSq0oUo424MZXt8jAp2BK9w8tM=
|
||||
github.com/grafana/grafana-aws-sdk v1.3.0/go.mod h1:VGycF0JkCGKND2O5je1ucOqPJ0ZNhZYzV3c2bNBAaGk=
|
||||
github.com/grafana/grafana-aws-sdk v1.4.2 h1:GrUEoLbs46r8rG/GZL4L2b63Bo+rkIYKdtCT7kT5KkM=
|
||||
github.com/grafana/grafana-aws-sdk v1.4.2/go.mod h1:1qnZdYs6gQzxxF0dDodaE7Rn9fiMzuhwvtaAZ7ySnhY=
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1 h1:FFcEA01tW+SmuJIuDbHOdgUBL+d7DPrZ2N4zwzPhfGk=
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1/go.mod h1:Oi4anANlCuTCc66jCyqIzfVbgLXFll8Wja+Y4vfANlc=
|
||||
github.com/grafana/grafana-cloud-migration-snapshot v1.9.0 h1:JOzchPgptwJdruYoed7x28lFDwhzs7kssResYsnC0iI=
|
||||
@@ -889,8 +891,8 @@ github.com/grafana/pyroscope/api v1.2.1-0.20250415190842-3ff7247547ae h1:35W3Wjp
|
||||
github.com/grafana/pyroscope/api v1.2.1-0.20250415190842-3ff7247547ae/go.mod h1:6CJ1uXmLZ13ufpO9xE4pST+DyaBt0uszzrV0YnoaVLQ=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/grafana/sqlds/v4 v4.2.7 h1:sFQhsS7DBakNMdxa++yOfJ9BVvkZwFJ0B95o57K0/XA=
|
||||
github.com/grafana/sqlds/v4 v4.2.7/go.mod h1:BQRjUG8rOqrBI4NAaeoWrIMuoNgfi8bdhCJ+5cgEfLU=
|
||||
github.com/grafana/sqlds/v5 v5.0.3 h1:+yUMUxfa0WANQsmS9xtTFSRX1Q55Iv1B9EjlrW4VlBU=
|
||||
github.com/grafana/sqlds/v5 v5.0.3/go.mod h1:GKeTTiC+GeR1X0z3f0Iee+hZnNgN62uQpj5XVMx5Uew=
|
||||
github.com/grafana/tempo v1.5.1-0.20250529124718-87c2dc380cec h1:wnzJov9RhSHGaTYGzTygL4qq986fLen8xSqnQgaMd28=
|
||||
github.com/grafana/tempo v1.5.1-0.20250529124718-87c2dc380cec/go.mod h1:j1IY7J2rUz7TcTjFVVx6HCpyTlYOJPtXuGRZ7sI+vSo=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
|
||||
@@ -2321,6 +2323,8 @@ modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk=
|
||||
pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
|
||||
@@ -30,14 +30,14 @@ require (
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
||||
github.com/apache/arrow-go/v18 v18.4.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 // indirect
|
||||
github.com/aws/smithy-go v1.23.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // indirect
|
||||
github.com/aws/smithy-go v1.23.2 // indirect
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
@@ -97,14 +97,14 @@ require (
|
||||
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 // indirect
|
||||
github.com/grafana/dataplane/sdata v0.0.9 // indirect
|
||||
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect
|
||||
github.com/grafana/grafana-aws-sdk v1.3.0 // indirect
|
||||
github.com/grafana/grafana-aws-sdk v1.4.2 // indirect
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1 // indirect
|
||||
github.com/grafana/grafana-plugin-sdk-go v0.284.0 // indirect
|
||||
github.com/grafana/grafana/pkg/apiserver v0.0.0 // indirect
|
||||
github.com/grafana/grafana/pkg/semconv v0.0.0-20250804150913-990f1c69ecc2 // indirect
|
||||
github.com/grafana/otel-profiling-go v0.5.1 // indirect
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect
|
||||
github.com/grafana/sqlds/v4 v4.2.7 // indirect
|
||||
github.com/grafana/sqlds/v5 v5.0.3 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20191002090509-6af20e3a5340 // indirect
|
||||
|
||||
@@ -28,22 +28,22 @@ github.com/apache/thrift v0.22.0 h1:r7mTJdj51TMDe6RtcmNdQxgn9XcyfGDOzegMDRg47uc=
|
||||
github.com/apache/thrift v0.22.0/go.mod h1:1e7J/O1Ae6ZQMTYdy9xa3w9k+XHWPfRvdPyJeynQ+/g=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 h1:fWZhGAwVRK/fAN2tmt7ilH4PPAE11rDj7HytrmbZ2FE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 h1:TxkI7QI+sFkTItN/6cJuMZEIVMFXeu2dI1ZffkXngKI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14/go.mod h1:12x4Uw/vijC11XkctTjy92TNCQ+UnNJkT7fzX0Yd93E=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 h1:6bgAZgRyT4RoFWhxS+aoGMFyE0cD1bSzFnEEi4bFPGI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 h1:HhJYoES3zOz34yWEpGENqJvRVPqpmJyR3+AFg9ybhdY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 h1:M6JI2aGFEzYxsF6CXIuRBnkge9Wf9a2xU39rNeXgu10=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8/go.mod h1:Fw+MyTwlwjFsSTE31mH211Np+CUslml8mzc0AFEG09s=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 h1:+LVB0xBqEgjQoqr9bGZbRzvg212B0f17JdflleJRNR4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5/go.mod h1:xoaxeqnnUaZjPjaICgIy5B+MHCSb/ZSOn4MvkFNOUA0=
|
||||
github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M=
|
||||
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21 h1:56HGpsgnmD+2/KpG0ikvvR8+3v3COCwaF4r+oWwOeNA=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21/go.mod h1:3YELwedmQbw7cXNaII2Wywd+YY58AmLPwX4LzARgmmA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 h1:mLlUgHn02ue8whiR4BmxxGJLR2gwU6s6ZzJ5wDamBUs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk=
|
||||
github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
|
||||
github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df h1:GSoSVRLoBaFpOOds6QyY1L8AX7uoY+Ln3BHc22W40X0=
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df/go.mod h1:hiVxq5OP2bUGBRNS3Z/bt/reCLFNbdcST6gISi1fiOM=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
@@ -229,8 +229,8 @@ github.com/grafana/grafana-app-sdk v0.48.7 h1:9mF7nqkqP0QUYYDlznoOt+GIyjzj45wGfU
|
||||
github.com/grafana/grafana-app-sdk v0.48.7/go.mod h1:DWsaaH39ZMHwSOSoUBaeW8paMrRaYsjRYlLwCJYd78k=
|
||||
github.com/grafana/grafana-app-sdk/logging v0.48.7 h1:Oa5qg473gka5+W/WQk61Xbw4YdAv+wV2Z4bJtzeCaQw=
|
||||
github.com/grafana/grafana-app-sdk/logging v0.48.7/go.mod h1:5u3KalezoBAAo2Y3ytDYDAIIPvEqFLLDSxeiK99QxDU=
|
||||
github.com/grafana/grafana-aws-sdk v1.3.0 h1:/bfJzP93rCel1GbWoRSq0oUo424MZXt8jAp2BK9w8tM=
|
||||
github.com/grafana/grafana-aws-sdk v1.3.0/go.mod h1:VGycF0JkCGKND2O5je1ucOqPJ0ZNhZYzV3c2bNBAaGk=
|
||||
github.com/grafana/grafana-aws-sdk v1.4.2 h1:GrUEoLbs46r8rG/GZL4L2b63Bo+rkIYKdtCT7kT5KkM=
|
||||
github.com/grafana/grafana-aws-sdk v1.4.2/go.mod h1:1qnZdYs6gQzxxF0dDodaE7Rn9fiMzuhwvtaAZ7ySnhY=
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1 h1:FFcEA01tW+SmuJIuDbHOdgUBL+d7DPrZ2N4zwzPhfGk=
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1/go.mod h1:Oi4anANlCuTCc66jCyqIzfVbgLXFll8Wja+Y4vfANlc=
|
||||
github.com/grafana/grafana-plugin-sdk-go v0.284.0 h1:1bK7eWsnPBLUWDcWJWe218Ik5ad0a5JpEL4mH9ry7Ws=
|
||||
@@ -243,8 +243,8 @@ github.com/grafana/prometheus-alertmanager v0.25.1-0.20250911094103-5456b6e45604
|
||||
github.com/grafana/prometheus-alertmanager v0.25.1-0.20250911094103-5456b6e45604/go.mod h1:O/QP1BCm0HHIzbKvgMzqb5sSyH88rzkFk84F4TfJjBU=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU=
|
||||
github.com/grafana/sqlds/v4 v4.2.7 h1:sFQhsS7DBakNMdxa++yOfJ9BVvkZwFJ0B95o57K0/XA=
|
||||
github.com/grafana/sqlds/v4 v4.2.7/go.mod h1:BQRjUG8rOqrBI4NAaeoWrIMuoNgfi8bdhCJ+5cgEfLU=
|
||||
github.com/grafana/sqlds/v5 v5.0.3 h1:+yUMUxfa0WANQsmS9xtTFSRX1Q55Iv1B9EjlrW4VlBU=
|
||||
github.com/grafana/sqlds/v5 v5.0.3/go.mod h1:GKeTTiC+GeR1X0z3f0Iee+hZnNgN62uQpj5XVMx5Uew=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 h1:QGLs/O40yoNK9vmy4rhUGBVyMf1lISBGtXRpsu/Qu/o=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0/go.mod h1:hM2alZsMUni80N33RBe6J0e423LB+odMj7d3EMP9l20=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 h1:B+8ClL/kCQkRiU82d9xajRPKYMrB7E0MbtzWVi1K4ns=
|
||||
|
||||
@@ -30,11 +30,22 @@ KeeperSpec: {
|
||||
}
|
||||
|
||||
#AWSConfig: {
|
||||
accessKeyID: #CredentialValue
|
||||
secretAccessKey: #CredentialValue
|
||||
region: string
|
||||
accessKey?: #AWSAccessKey
|
||||
assumeRole?: #AWSAssumeRole
|
||||
kmsKeyID?: string
|
||||
}
|
||||
|
||||
#AWSAccessKey: {
|
||||
accessKeyID: #CredentialValue
|
||||
secretAccessKey: #CredentialValue
|
||||
}
|
||||
|
||||
#AWSAssumeRole: {
|
||||
assumeRoleArn: string
|
||||
externalID: string
|
||||
}
|
||||
|
||||
#AzureConfig: {
|
||||
keyVaultName: string
|
||||
tenantID: string
|
||||
|
||||
@@ -4,14 +4,26 @@ package v1beta1
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type KeeperAWSConfig struct {
|
||||
AccessKeyID KeeperCredentialValue `json:"accessKeyID"`
|
||||
SecretAccessKey KeeperCredentialValue `json:"secretAccessKey"`
|
||||
KmsKeyID *string `json:"kmsKeyID,omitempty"`
|
||||
Region string `json:"region"`
|
||||
AccessKey *KeeperAWSAccessKey `json:"accessKey,omitempty"`
|
||||
AssumeRole *KeeperAWSAssumeRole `json:"assumeRole,omitempty"`
|
||||
KmsKeyID *string `json:"kmsKeyID,omitempty"`
|
||||
}
|
||||
|
||||
// NewKeeperAWSConfig creates a new KeeperAWSConfig object.
|
||||
func NewKeeperAWSConfig() *KeeperAWSConfig {
|
||||
return &KeeperAWSConfig{
|
||||
return &KeeperAWSConfig{}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type KeeperAWSAccessKey struct {
|
||||
AccessKeyID KeeperCredentialValue `json:"accessKeyID"`
|
||||
SecretAccessKey KeeperCredentialValue `json:"secretAccessKey"`
|
||||
}
|
||||
|
||||
// NewKeeperAWSAccessKey creates a new KeeperAWSAccessKey object.
|
||||
func NewKeeperAWSAccessKey() *KeeperAWSAccessKey {
|
||||
return &KeeperAWSAccessKey{
|
||||
AccessKeyID: *NewKeeperCredentialValue(),
|
||||
SecretAccessKey: *NewKeeperCredentialValue(),
|
||||
}
|
||||
@@ -36,6 +48,17 @@ func NewKeeperCredentialValue() *KeeperCredentialValue {
|
||||
return &KeeperCredentialValue{}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type KeeperAWSAssumeRole struct {
|
||||
AssumeRoleArn string `json:"assumeRoleArn"`
|
||||
ExternalID string `json:"externalID"`
|
||||
}
|
||||
|
||||
// NewKeeperAWSAssumeRole creates a new KeeperAWSAssumeRole object.
|
||||
func NewKeeperAWSAssumeRole() *KeeperAWSAssumeRole {
|
||||
return &KeeperAWSAssumeRole{}
|
||||
}
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
type KeeperAzureConfig struct {
|
||||
KeyVaultName string `json:"keyVaultName"`
|
||||
|
||||
@@ -12,6 +12,7 @@ const (
|
||||
AzureKeeperType KeeperType = "azure"
|
||||
GCPKeeperType KeeperType = "gcp"
|
||||
HashiCorpKeeperType KeeperType = "hashicorp"
|
||||
SystemKeeperType KeeperType = "system"
|
||||
)
|
||||
|
||||
func (kt KeeperType) String() string {
|
||||
@@ -20,9 +21,31 @@ func (kt KeeperType) String() string {
|
||||
|
||||
// KeeperConfig is an interface that all keeper config types must implement.
|
||||
type KeeperConfig interface {
|
||||
// Returns the name of the keeper
|
||||
GetName() string
|
||||
Type() KeeperType
|
||||
}
|
||||
|
||||
type NamedKeeperConfig[T interface {
|
||||
Type() KeeperType
|
||||
}] struct {
|
||||
Name string
|
||||
Cfg T
|
||||
}
|
||||
|
||||
func NewNamedKeeperConfig[T interface {
|
||||
Type() KeeperType
|
||||
}](keeperName string, cfg T) *NamedKeeperConfig[T] {
|
||||
return &NamedKeeperConfig[T]{Name: keeperName, Cfg: cfg}
|
||||
}
|
||||
|
||||
func (c *NamedKeeperConfig[T]) GetName() string {
|
||||
return c.Name
|
||||
}
|
||||
func (c *NamedKeeperConfig[T]) Type() KeeperType {
|
||||
return c.Cfg.Type()
|
||||
}
|
||||
|
||||
func (s *KeeperSpec) GetType() KeeperType {
|
||||
if s.Aws != nil {
|
||||
return AWSKeeperType
|
||||
@@ -43,7 +66,7 @@ func (s *KeeperSpec) GetType() KeeperType {
|
||||
type SystemKeeperConfig struct{}
|
||||
|
||||
func (*SystemKeeperConfig) Type() KeeperType {
|
||||
return "system"
|
||||
return SystemKeeperType
|
||||
}
|
||||
|
||||
func (s *KeeperAWSConfig) Type() KeeperType {
|
||||
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
|
||||
return map[string]common.OpenAPIDefinition{
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.Keeper": schema_pkg_apis_secret_v1beta1_Keeper(ref),
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSAccessKey": schema_pkg_apis_secret_v1beta1_KeeperAWSAccessKey(ref),
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSAssumeRole": schema_pkg_apis_secret_v1beta1_KeeperAWSAssumeRole(ref),
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSConfig": schema_pkg_apis_secret_v1beta1_KeeperAWSConfig(ref),
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAzureConfig": schema_pkg_apis_secret_v1beta1_KeeperAzureConfig(ref),
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperCredentialValue": schema_pkg_apis_secret_v1beta1_KeeperCredentialValue(ref),
|
||||
@@ -79,7 +81,7 @@ func schema_pkg_apis_secret_v1beta1_Keeper(ref common.ReferenceCallback) common.
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_secret_v1beta1_KeeperAWSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
func schema_pkg_apis_secret_v1beta1_KeeperAWSAccessKey(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
@@ -97,6 +99,65 @@ func schema_pkg_apis_secret_v1beta1_KeeperAWSConfig(ref common.ReferenceCallback
|
||||
Ref: ref("github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperCredentialValue"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"accessKeyID", "secretAccessKey"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperCredentialValue"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_secret_v1beta1_KeeperAWSAssumeRole(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"assumeRoleArn": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"externalID": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"assumeRoleArn", "externalID"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_pkg_apis_secret_v1beta1_KeeperAWSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"region": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"accessKey": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Ref: ref("github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSAccessKey"),
|
||||
},
|
||||
},
|
||||
"assumeRole": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Ref: ref("github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSAssumeRole"),
|
||||
},
|
||||
},
|
||||
"kmsKeyID": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Type: []string{"string"},
|
||||
@@ -104,11 +165,11 @@ func schema_pkg_apis_secret_v1beta1_KeeperAWSConfig(ref common.ReferenceCallback
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"accessKeyID", "secretAccessKey"},
|
||||
Required: []string{"region"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperCredentialValue"},
|
||||
"github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSAccessKey", "github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1.KeeperAWSAssumeRole"},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -428,12 +428,25 @@ Or using a Kubernetes format, for example `kubernetes-dashboard.json`:
|
||||
|
||||
You _must_ use the Kubernetes resource format to provision dashboards v2 / dynamic dashboards.
|
||||
|
||||
It later polls that path every `updateIntervalSeconds` for updates to the dashboard files and updates its database.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Grafana installs dashboards at the root level if you don't set the `folder` field.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Detect updates to provisioned dashboards files
|
||||
|
||||
After Grafana provisions your dashboards, it checks the filesystem for changes and updates dashboards as needed.
|
||||
|
||||
The mechanism Grafana uses to do this depends on your `updateIntervalSeconds` value:
|
||||
|
||||
- **More than 10 seconds**: Grafana polls the path at that interval.
|
||||
- **10 seconds or less**: Grafana watches the filesystem for changes and updates dashboards when it detects them.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
When `updateIntervalSeconds` is 10 or less, Grafana relies on filesystem watch events to detect changes.
|
||||
Depending on your filesystem and how you mount or sync dashboard files (for example, Docker bind mounts or some network filesystems), those events might not reach Grafana.
|
||||
To work around this, set `updateIntervalSeconds` to more than 10 to force polling, or update your setup so filesystem watch events are propagated.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Make changes to a provisioned dashboard
|
||||
|
||||
You can make changes to a provisioned dashboard in the Grafana UI but its not possible to automatically save the changes back to the provisioning source.
|
||||
|
||||
@@ -105,6 +105,11 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/panels-visualizations/visualizations/
|
||||
cloudwatch-troubleshooting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/troubleshooting/
|
||||
---
|
||||
|
||||
# Amazon CloudWatch data source
|
||||
@@ -119,6 +124,7 @@ The following documents will help you get started working with the CloudWatch da
|
||||
- [CloudWatch query editor](ref:cloudwatch-query-editor)
|
||||
- [Templates and variables](ref:cloudwatch-template-variables)
|
||||
- [Configure AWS authentication](ref:cloudwatch-aws-authentication)
|
||||
- [Troubleshoot CloudWatch issues](ref:cloudwatch-troubleshooting)
|
||||
|
||||
## Import pre-configured dashboards
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
---
|
||||
aliases:
|
||||
- ../data-sources/aws-CloudWatch/
|
||||
- ../data-sources/aws-CloudWatch/preconfig-CloudWatch-dashboards/
|
||||
- ../data-sources/aws-CloudWatch/provision-CloudWatch/
|
||||
- CloudWatch/
|
||||
- preconfig-CloudWatch-dashboards/
|
||||
- provision-CloudWatch/
|
||||
- ../../data-sources/aws-cloudwatch/configure/
|
||||
- ../../data-sources/aws-cloudwatch/
|
||||
- ../../data-sources/aws-cloudwatch/preconfig-cloudwatch-dashboards/
|
||||
- ../../data-sources/aws-cloudwatch/provision-cloudwatch/
|
||||
- ../cloudwatch/
|
||||
- ../preconfig-cloudwatch-dashboards/
|
||||
- ../provision-cloudwatch/
|
||||
description: This document provides configuration instructions for the CloudWatch data source.
|
||||
keywords:
|
||||
- grafana
|
||||
@@ -25,11 +26,6 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/logs/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/logs/
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
@@ -40,16 +36,6 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#aws
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#aws
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
|
||||
build-dashboards:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
data-source-management:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
@@ -153,7 +139,7 @@ You must use both an access key ID and a secret access key to authenticate.
|
||||
|
||||
Grafana automatically creates a link to a trace in X-Ray data source if logs contain the `@xrayTraceId` field. To use this feature, you must already have an X-Ray data source configured. For details, see the [X-Ray data source docs](/grafana/plugins/grafana-X-Ray-datasource/). To view the X-Ray link, select the log row in either the Explore view or dashboard [Logs panel](ref:logs) to view the log details section.
|
||||
|
||||
To log the `@xrayTraceId`, refer to the [AWS X-Ray documentation](https://docs.amazonaws.cn/en_us/xray/latest/devguide/xray-services.html). To provide the field to Grafana, your log queries must also contain the `@xrayTraceId` field, for example by using the query `fields @message, @xrayTraceId`.
|
||||
To log the `@xrayTraceId`, refer to the [AWS X-Ray documentation](https://docs.aws.amazon.com/xray/latest/devguide/xray-services.html). To provide the field to Grafana, your log queries must also contain the `@xrayTraceId` field, for example by using the query `fields @message, @xrayTraceId`.
|
||||
|
||||
**Private data source connect** - _Only for Grafana Cloud users._
|
||||
|
||||
|
||||
@@ -34,11 +34,6 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/#navigate-the-query-tab
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/#navigate-the-query-tab
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
@@ -183,7 +178,7 @@ If you use the expression field to reference another query, such as `queryA * 2`
|
||||
When you select `Builder` mode within the Metric search editor, a new Account field is displayed. Use the `Account` field to specify which of the linked monitoring accounts to target for the given query. By default, the `All` option is specified, which will target all linked accounts.
|
||||
|
||||
While in `Code` mode, you can specify any math expression. If the Monitoring account badge displays in the query editor header, all `SEARCH` expressions entered in this field will be cross-account by default and can query metrics from linked accounts. Note that while queries run cross-account, the autocomplete feature currently doesn't fetch cross-account resources, so you'll need to manually specify resource names when writing cross-account queries.
|
||||
You can limit the search to one or a set of accounts, as documented in the [AWS documentation](http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
|
||||
You can limit the search to one or a set of accounts, as documented in the [AWS documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html).
|
||||
|
||||
### Period macro
|
||||
|
||||
@@ -198,7 +193,7 @@ The link provided is valid for any account but displays the expected metrics onl
|
||||
|
||||
{{< figure src="/media/docs/cloudwatch/cloudwatch-deep-link-v12.1.png" caption="CloudWatch deep linking" >}}
|
||||
|
||||
This feature is not available for metrics based on [metric math expressions](#metric-math-expressions).
|
||||
This feature is not available for metrics based on [metric math expressions](#use-metric-math-expressions).
|
||||
|
||||
### Use Metric Insights syntax
|
||||
|
||||
@@ -319,9 +314,9 @@ The CloudWatch plugin monitors and troubleshoots applications that span multiple
|
||||
|
||||
To enable cross-account observability, complete the following steps:
|
||||
|
||||
1. Go to the [Amazon CloudWatch documentation](http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html) and follow the instructions for enabling cross-account observability.
|
||||
1. Go to the [Amazon CloudWatch documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Unified-Cross-Account.html) and follow the instructions for enabling cross-account observability.
|
||||
|
||||
1. Add [two API actions](https://grafana.com//docs/grafana/latest/datasources/aws-cloudwatch/configure/#cross-account-observability-permissions) to the IAM policy attached to the role/user running the plugin.
|
||||
1. Add [two API actions](https://grafana.com/docs/grafana/latest/datasources/aws-cloudwatch/configure/#cross-account-observability-permissions) to the IAM policy attached to the role/user running the plugin.
|
||||
|
||||
Cross-account querying is available in the plugin through the **Logs**, **Metric search**, and **Metric Insights** modes.
|
||||
After you have configured it, you'll see a **Monitoring account** badge in the query editor header.
|
||||
|
||||
519
docs/sources/datasources/aws-cloudwatch/troubleshooting/index.md
Normal file
519
docs/sources/datasources/aws-cloudwatch/troubleshooting/index.md
Normal file
@@ -0,0 +1,519 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/aws-cloudwatch/troubleshooting/
|
||||
description: Troubleshooting guide for the Amazon CloudWatch data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- cloudwatch
|
||||
- aws
|
||||
- troubleshooting
|
||||
- errors
|
||||
- authentication
|
||||
- query
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshooting
|
||||
title: Troubleshoot Amazon CloudWatch data source issues
|
||||
weight: 500
|
||||
refs:
|
||||
configure-cloudwatch:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/configure/
|
||||
cloudwatch-aws-authentication:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/aws-authentication/
|
||||
cloudwatch-template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/template-variables/
|
||||
cloudwatch-query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/aws-cloudwatch/query-editor/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
---
|
||||
|
||||
# Troubleshoot Amazon CloudWatch data source issues
|
||||
|
||||
This document provides solutions to common issues you may encounter when configuring or using the Amazon CloudWatch data source. For configuration instructions, refer to [Configure CloudWatch](ref:configure-cloudwatch).
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The data source health check validates both metrics and logs permissions. If your IAM policy only grants access to one of these (for example, metrics-only or logs-only), the health check displays a red status. However, the service you have permissions for is still usable—you can query metrics or logs based on whichever permissions are configured.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Authentication errors
|
||||
|
||||
These errors occur when AWS credentials are invalid, missing, or don't have the required permissions.
|
||||
|
||||
### "Access Denied" or "Not authorized to perform this operation"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Save & test fails with "Access Denied"
|
||||
- Queries return authorization errors
|
||||
- Namespaces, metrics, or dimensions don't load
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| --------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| IAM policy missing required permissions | Attach the appropriate IAM policy to your user or role. For metrics, you need `cloudwatch:ListMetrics`, `cloudwatch:GetMetricData`, and related permissions. For logs, you need `logs:DescribeLogGroups`, `logs:StartQuery`, `logs:GetQueryResults`, and related permissions. Refer to [Configure CloudWatch](ref:configure-cloudwatch) for complete policy examples. |
|
||||
| Incorrect access key or secret key | Verify the credentials in the AWS Console under **IAM** > **Users** > your user > **Security credentials**. Generate new credentials if necessary. |
|
||||
| Credentials have expired | For temporary credentials, generate new ones. For access keys, verify they haven't been deactivated or deleted. |
|
||||
| Wrong AWS region | Verify the default region in the data source configuration matches where your resources are located. |
|
||||
| Assume Role ARN is incorrect | Verify the role ARN format: `arn:aws:iam::<account-id>:role/<role-name>`. Check that the role exists in the AWS Console. |
|
||||
|
||||
### "Unable to assume role"
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Authentication fails when using Assume Role ARN
|
||||
- Error message references STS or AssumeRole
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the trust relationship on the IAM role allows the Grafana credentials to assume it.
|
||||
1. Check the trust policy includes the correct principal (the user or role running Grafana).
|
||||
1. If using an external ID, ensure it matches exactly in both the role's trust policy and the Grafana data source configuration.
|
||||
1. Verify the base credentials have the `sts:AssumeRole` permission.
|
||||
1. Check that the role ARN is correct and the role exists.
|
||||
|
||||
**Example trust policy:**
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"AWS": "arn:aws:iam::<your-account-id>:user/<grafana-user>"
|
||||
},
|
||||
"Action": "sts:AssumeRole",
|
||||
"Condition": {
|
||||
"StringEquals": {
|
||||
"sts:ExternalId": "<your-external-id>"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### AWS SDK Default authentication not working
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Data source test fails when using AWS SDK Default
|
||||
- Works locally but fails in production
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify AWS credentials are configured in the environment where Grafana runs.
|
||||
1. Check for credentials in the default locations:
|
||||
- Environment variables (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`)
|
||||
- Shared credentials file (`~/.aws/credentials`)
|
||||
- EC2 instance metadata (if running on EC2)
|
||||
- ECS task role (if running in ECS)
|
||||
- EKS service account (if running in EKS)
|
||||
1. Ensure the Grafana process has permission to read the credentials file.
|
||||
1. For EKS with IRSA, set the pod's security context to allow user 472 (grafana) to access the projected token. Refer to [AWS authentication](ref:cloudwatch-aws-authentication) for details.
|
||||
|
||||
### Credentials file not found
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Error indicates credentials file cannot be read
|
||||
- Authentication fails with "Credentials file" option
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Create the credentials file at `~/.aws/credentials` for the user running the `grafana-server` service.
|
||||
1. Verify the file has correct permissions (`0644`).
|
||||
1. If the file exists but isn't working, move it to `/usr/share/grafana/` and set permissions to `0644`.
|
||||
1. Ensure the profile name in the data source configuration matches a profile in the credentials file.
|
||||
|
||||
## Connection errors
|
||||
|
||||
These errors occur when Grafana cannot reach AWS CloudWatch endpoints.
|
||||
|
||||
### "Request timed out" or connection failures
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Data source test times out
|
||||
- Queries fail with timeout errors
|
||||
- Intermittent connection issues
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify network connectivity from the Grafana server to AWS endpoints.
|
||||
1. Check firewall rules allow outbound HTTPS (port 443) to AWS services.
|
||||
1. If using a VPC, ensure proper NAT gateway or VPC endpoint configuration.
|
||||
1. For Grafana Cloud connecting to private resources, configure [Private data source connect](ref:private-data-source-connect).
|
||||
1. Check if the default region is correct—incorrect regions may cause longer timeouts.
|
||||
1. Increase the timeout settings if queries involve large data volumes.
|
||||
|
||||
### Custom endpoint configuration issues
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Connection fails when using a custom endpoint
|
||||
- Endpoint URL rejected
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the endpoint URL format is correct.
|
||||
1. Ensure the endpoint is accessible from the Grafana server.
|
||||
1. Check that the endpoint supports the required AWS APIs.
|
||||
1. For VPC endpoints, verify the endpoint policy allows the required actions.
|
||||
|
||||
## CloudWatch Metrics query errors
|
||||
|
||||
These errors occur when querying CloudWatch Metrics.
|
||||
|
||||
### "No data" or empty results
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query executes without error but returns no data
|
||||
- Charts show "No data" message
|
||||
|
||||
**Possible causes and solutions:**
|
||||
|
||||
| Cause | Solution |
|
||||
| ------------------------------- | ---------------------------------------------------------------------------------------------------------- |
|
||||
| Time range doesn't contain data | Expand the dashboard time range. CloudWatch metrics have different retention periods based on resolution. |
|
||||
| Wrong namespace or metric name | Verify the namespace (for example, `AWS/EC2`) and metric name (for example, `CPUUtilization`) are correct. |
|
||||
| Incorrect dimensions | Ensure dimension names and values match your AWS resources exactly. |
|
||||
| Match Exact enabled incorrectly | When Match Exact is enabled, all dimensions must be specified. Try disabling it to see if metrics appear. |
|
||||
| Period too large | Reduce the period setting or set it to "auto" to ensure data points are returned for your time range. |
|
||||
| Custom metrics not configured | Add custom metric namespaces in the data source configuration under **Namespaces of Custom Metrics**. |
|
||||
|
||||
### "Metric not found" or metrics don't appear in drop-down
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Expected metrics don't appear in the query editor
|
||||
- Metric drop-down is empty for a namespace
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the metric exists in the selected region.
|
||||
1. For custom metrics, add the namespace to **Namespaces of Custom Metrics** in the data source configuration.
|
||||
1. Check that the IAM policy includes `cloudwatch:ListMetrics` permission.
|
||||
1. CloudWatch limits `ListMetrics` to 500 results per page. To retrieve more metrics, increase the `list_metrics_page_limit` setting in the [Grafana configuration file](https://grafana.com/docs/grafana/latest/datasources/aws-cloudwatch/configure/#configure-the-data-source-with-grafanaini).
|
||||
1. Use the Query Inspector to verify the API request and response.
|
||||
|
||||
### Dimension values not loading
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Dimension value drop-down doesn't populate
|
||||
- Wildcard searches return no results
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the IAM policy includes `cloudwatch:ListMetrics` permission.
|
||||
1. Check that the namespace and metric are selected before dimension values can load.
|
||||
1. For EC2 dimensions, ensure `ec2:DescribeTags` and `ec2:DescribeInstances` permissions are granted.
|
||||
1. Dimension values require existing metrics—if no metrics match, no values appear.
|
||||
|
||||
### "Too many data points" or API throttling
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Queries fail with throttling errors
|
||||
- Performance degrades with multiple panels
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Increase the period setting to reduce the number of data points.
|
||||
1. Reduce the time range of your queries.
|
||||
1. Use fewer dimensions or wildcard queries per panel.
|
||||
1. Request a quota increase for `GetMetricData` requests per second in the [AWS Service Quotas console](https://console.aws.amazon.com/servicequotas/).
|
||||
1. Enable query caching in Grafana to reduce API calls.
|
||||
|
||||
### Metric math expression errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Expression returns errors
|
||||
- Referenced metrics not found
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify each referenced metric has a unique ID set.
|
||||
1. Check that metric IDs start with a lowercase letter and contain only letters, numbers, and underscores.
|
||||
1. Ensure all referenced metrics are in the same query.
|
||||
1. Verify the expression syntax follows [AWS Metric Math](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/using-metric-math.html) documentation.
|
||||
1. Metric math expressions can't be used with Grafana alerting if they reference other query rows.
|
||||
|
||||
## CloudWatch Logs query errors
|
||||
|
||||
These errors occur when querying CloudWatch Logs.
|
||||
|
||||
### "Query failed" or logs don't appear
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Log queries return errors
|
||||
- No log data is displayed
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify log group names are correct and exist in the selected region.
|
||||
1. Check the IAM policy includes `logs:StartQuery`, `logs:GetQueryResults`, and `logs:DescribeLogGroups` permissions.
|
||||
1. Ensure the time range contains log data.
|
||||
1. Verify the query syntax is valid. For CloudWatch Logs Insights QL, test the query in the AWS Console.
|
||||
1. Select the correct query language (Logs Insights QL, OpenSearch PPL, or OpenSearch SQL) based on your query syntax.
|
||||
|
||||
### Log query timeout
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Query runs for a long time then fails
|
||||
- Error mentions timeout
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Increase the **Query timeout result** setting in the data source configuration (default is 30 minutes).
|
||||
1. Narrow the time range to reduce the amount of data scanned.
|
||||
1. Add filters to your query to limit results.
|
||||
1. Break complex queries into smaller, more focused queries.
|
||||
1. For alerting, the timeout defined in the [Grafana configuration file](https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#unified_alerting) takes precedence.
|
||||
|
||||
### Log groups not appearing in selector
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Log group selector is empty
|
||||
- Can't find expected log groups
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the IAM policy includes `logs:DescribeLogGroups` permission.
|
||||
1. Check that log groups exist in the selected region.
|
||||
1. For cross-account observability, ensure proper IAM permissions for `oam:ListSinks` and `oam:ListAttachedLinks`.
|
||||
1. Use prefix search to filter log groups if you have many groups.
|
||||
1. Verify the selected account (for cross-account) contains the expected log groups.
|
||||
|
||||
### OpenSearch SQL query errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- OpenSearch SQL queries fail
|
||||
- Syntax errors with SQL queries
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Specify the log group identifier or ARN in the `FROM` clause:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `log_group_name` WHERE `@message` LIKE '%error%'
|
||||
```
|
||||
|
||||
1. For multiple log groups, use the `logGroups` function:
|
||||
|
||||
```sql
|
||||
SELECT * FROM `logGroups(logGroupIdentifier: ['LogGroup1', 'LogGroup2'])`
|
||||
```
|
||||
|
||||
1. Amazon CloudWatch supports only a subset of OpenSearch SQL commands. Refer to the [CloudWatch Logs documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData_Languages.html) for supported syntax.
|
||||
|
||||
## Template variable errors
|
||||
|
||||
These errors occur when using template variables with the CloudWatch data source.
|
||||
|
||||
### Variables return no values
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Variable drop-down is empty
|
||||
- Dashboard fails to load with variable errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the data source connection is working.
|
||||
1. Check that the IAM policy includes permissions for the variable query type:
|
||||
- **Regions:** No additional permissions needed.
|
||||
- **Namespaces:** No additional permissions needed.
|
||||
- **Metrics:** Requires `cloudwatch:ListMetrics`.
|
||||
- **Dimension Values:** Requires `cloudwatch:ListMetrics`.
|
||||
- **EC2 Instance Attributes:** Requires `ec2:DescribeInstances`.
|
||||
- **EBS Volume IDs:** Requires `ec2:DescribeVolumes`.
|
||||
- **Resource ARNs:** Requires `tag:GetResources`.
|
||||
- **Log Groups:** Requires `logs:DescribeLogGroups`.
|
||||
1. For dependent variables, ensure parent variables have valid selections.
|
||||
1. Verify the region is set correctly (use "default" for the data source's default region).
|
||||
|
||||
For more information on template variables, refer to [CloudWatch template variables](ref:cloudwatch-template-variables).
|
||||
|
||||
### Multi-value template variables cause query failures
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Queries fail when selecting multiple dimension values
|
||||
- Error about search expression limits
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Search expressions are limited to 1,024 characters. Reduce the number of selected values.
|
||||
1. Use the asterisk (`*`) wildcard instead of selecting "All" to query all metrics for a dimension.
|
||||
1. Multi-valued template variables are only supported for dimension values—not for Region, Namespace, or Metric Name.
|
||||
|
||||
## Cross-account observability errors
|
||||
|
||||
These errors occur when using CloudWatch cross-account observability features.
|
||||
|
||||
### Cross-account queries fail
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Can't query metrics or logs from linked accounts
|
||||
- Monitoring account badge doesn't appear
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify cross-account observability is configured in the AWS CloudWatch console.
|
||||
1. Add the required IAM permissions:
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": ["oam:ListSinks", "oam:ListAttachedLinks"],
|
||||
"Effect": "Allow",
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
1. Check that the monitoring account and source accounts are properly linked in AWS.
|
||||
1. Cross-account observability works within a single region—verify all accounts are in the same region.
|
||||
1. EC2 Instance Attributes can't be queried across accounts because they use the EC2 API, not the CloudWatch API.
|
||||
|
||||
## Quota and pricing issues
|
||||
|
||||
These issues relate to AWS service quotas and cost management.
|
||||
|
||||
### API throttling errors
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- "Rate exceeded" errors
|
||||
- Dashboard panels intermittently fail to load
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Reduce the frequency of dashboard refreshes.
|
||||
1. Increase the period setting to reduce `GetMetricData` requests.
|
||||
1. Enable query caching in Grafana (available in Grafana Enterprise and Grafana Cloud).
|
||||
1. Request a quota increase in the [AWS Service Quotas console](https://console.aws.amazon.com/servicequotas/).
|
||||
1. Consider consolidating similar queries using metric math.
|
||||
|
||||
### Unexpectedly high CloudWatch costs
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- AWS CloudWatch costs are higher than expected
|
||||
- Frequent API calls from Grafana
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. The `GetMetricData` API doesn't qualify for the CloudWatch API free tier.
|
||||
1. Reduce dashboard auto-refresh frequency.
|
||||
1. Increase the period setting to reduce data points returned.
|
||||
1. Use query caching to reduce repeated API calls.
|
||||
1. Review variable query settings—set variable refresh to "On dashboard load" instead of "On time range change."
|
||||
1. Avoid using wildcards in dimensions when possible, as they generate search expressions with multiple API calls.
|
||||
|
||||
## Other common issues
|
||||
|
||||
These issues don't produce specific error messages but are commonly encountered.
|
||||
|
||||
### Custom metrics don't appear
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Custom metrics from applications or agents don't show in the namespace drop-down
|
||||
- Only standard AWS namespaces are visible
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Add your custom metric namespace to the **Namespaces of Custom Metrics** field in the data source configuration.
|
||||
1. Separate multiple namespaces with commas (for example, `CWAgent,CustomNamespace`).
|
||||
1. Verify custom metrics have been published to CloudWatch in the selected region.
|
||||
|
||||
### Pre-configured dashboards not working
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Imported dashboards show no data
|
||||
- Dashboard variables don't load
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify the data source name in the dashboard matches your CloudWatch data source.
|
||||
1. Check that the dashboard's AWS region setting matches where your resources are located.
|
||||
1. Ensure the IAM policy grants access to the required services (EC2, Lambda, RDS, etc.).
|
||||
1. Verify resources exist and are emitting metrics in the selected region.
|
||||
|
||||
### X-Ray trace links not appearing
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Log entries don't show X-Ray trace links
|
||||
- `@xrayTraceId` field not appearing
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. Verify an X-Ray data source is configured and linked in the CloudWatch data source settings.
|
||||
1. Ensure your logs contain the `@xrayTraceId` field.
|
||||
1. Update log queries to include `@xrayTraceId` in the fields, for example: `fields @message, @xrayTraceId`.
|
||||
1. Configure your application to log X-Ray trace IDs. Refer to the [AWS X-Ray documentation](https://docs.aws.amazon.com/xray/latest/devguide/xray-services.html).
|
||||
|
||||
## Enable debug logging
|
||||
|
||||
To capture detailed error information for troubleshooting:
|
||||
|
||||
1. Set the Grafana log level to `debug` in the configuration file:
|
||||
|
||||
```ini
|
||||
[log]
|
||||
level = debug
|
||||
```
|
||||
|
||||
1. Review logs in `/var/log/grafana/grafana.log` (or your configured log location).
|
||||
1. Look for CloudWatch-specific entries that include request and response details.
|
||||
1. Reset the log level to `info` after troubleshooting to avoid excessive log volume.
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you've tried the solutions above and still encounter issues:
|
||||
|
||||
1. Check the [Grafana community forums](https://community.grafana.com/) for similar issues.
|
||||
1. Review the [CloudWatch plugin GitHub issues](https://github.com/grafana/grafana/issues) for known bugs.
|
||||
1. Consult the [AWS CloudWatch documentation](https://docs.aws.amazon.com/cloudwatch/) for service-specific guidance.
|
||||
1. Contact Grafana Support if you're an Enterprise, Cloud Pro, or Cloud Contracted user.
|
||||
1. When reporting issues, include:
|
||||
- Grafana version
|
||||
- AWS region
|
||||
- Error messages (redact sensitive information)
|
||||
- Steps to reproduce
|
||||
- Query configuration (redact credentials and account IDs)
|
||||
@@ -14,147 +14,140 @@ labels:
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Loki
|
||||
title: Configure the Loki data source
|
||||
title: Loki data source
|
||||
weight: 800
|
||||
refs:
|
||||
data-source-management:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
build-dashboards:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
explore:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
logs-integration-labels-and-detected-fields:
|
||||
visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#labels-and-detected-fields
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#labels-and-detected-fields
|
||||
destination: /docs/grafana-cloud/visualizations/panels-visualizations/visualizations/
|
||||
variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/dashboards/variables/
|
||||
transformations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/panels-visualizations/query-transform-data/transform-data/
|
||||
loki-alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/alerting/
|
||||
loki-annotations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/annotations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/annotations/
|
||||
import-dashboard:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/import-dashboards/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/visualizations/dashboards/build-dashboards/import-dashboards/
|
||||
loki-troubleshooting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/troubleshooting/
|
||||
configure-loki:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
loki-query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/query-editor/
|
||||
loki-template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/template-variables/
|
||||
configure-loki-derived-fields:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/#derived-fields
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/#derived-fields
|
||||
---
|
||||
|
||||
# Loki data source
|
||||
|
||||
Grafana Loki is a set of components that can be combined into a fully featured logging stack.
|
||||
Unlike other logging systems, Loki is built around the idea of only indexing metadata about your logs: labels (just like Prometheus labels). Log data itself is then compressed and stored in chunks in object stores such as S3 or GCS, or even locally on a filesystem.
|
||||
Grafana Loki is a log aggregation system that stores and queries logs from your applications and infrastructure. Unlike traditional logging systems, Loki indexes only metadata (labels) about your logs rather than the full text. Log data is compressed and stored in object stores such as Amazon S3 or Google Cloud Storage, or locally on a filesystem.
|
||||
|
||||
The following guides will help you get started with Loki:
|
||||
|
||||
- [Getting started with Loki](/docs/loki/latest/get-started/)
|
||||
- [Install Loki](/docs/loki/latest/installation/)
|
||||
- [Loki best practices](/docs/loki/latest/best-practices/#best-practices)
|
||||
- [Configure the Loki data source](/docs/grafana/latest/datasources/loki/configure-loki-data-source/)
|
||||
- [LogQL](/docs/loki/latest/logql/)
|
||||
- [Loki query editor](query-editor/)
|
||||
You can use this data source to query, visualize, and alert on log data stored in Loki.
|
||||
|
||||
## Supported Loki versions
|
||||
|
||||
This data source supports these versions of Loki:
|
||||
This data source supports Loki v2.9 and later.
|
||||
|
||||
- v2.9+
|
||||
## Key capabilities
|
||||
|
||||
## Adding a data source
|
||||
The Loki data source provides the following capabilities:
|
||||
|
||||
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:data-source-management)
|
||||
Only users with the organization administrator role can add data sources.
|
||||
Administrators can also [configure the data source via YAML](#provision-the-data-source) with Grafana's provisioning system.
|
||||
- **Log queries:** Query and filter logs using [LogQL](https://grafana.com/docs/loki/latest/logql/), Loki's query language inspired by PromQL.
|
||||
- **Metric queries:** Extract metrics from log data using LogQL metric queries, enabling you to count log events, calculate rates, and aggregate values.
|
||||
- **Live tailing:** Stream logs in real time as they're ingested into Loki.
|
||||
- **Derived fields:** Create links from log lines to external systems such as tracing backends, allowing you to jump directly from a log entry to a related trace.
|
||||
- **Annotations:** Overlay log events on time series graphs to correlate logs with metrics.
|
||||
- **Alerting:** Create alert rules based on log queries to notify you when specific patterns or thresholds are detected.
|
||||
|
||||
Once you've added the Loki data source, you can [configure it](#configure-the-data-source) so that your Grafana instance's users can create queries in its [query editor](query-editor/) when they [build dashboards](ref:build-dashboards), use [Explore](ref:explore), and [annotate visualizations](query-editor/#apply-annotations).
|
||||
## Get started
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To troubleshoot configuration and other issues, check the log file located at `/var/log/grafana/grafana.log` on Unix systems, or in `<grafana_install_dir>/data/log` on other platforms and manual installations.
|
||||
{{< /admonition >}}
|
||||
The following documentation helps you get started with the Loki data source:
|
||||
|
||||
## Provision the data source
|
||||
- [Configure the Loki data source](ref:configure-loki)
|
||||
- [Loki query editor](ref:loki-query-editor)
|
||||
- [Loki template variables](ref:loki-template-variables)
|
||||
- [Troubleshoot the Loki data source](ref:loki-troubleshooting)
|
||||
|
||||
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
|
||||
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
For more information about Loki itself, refer to the [Loki documentation](https://grafana.com/docs/loki/latest/):
|
||||
|
||||
### Provisioning examples
|
||||
- [Get started with Loki](https://grafana.com/docs/loki/latest/get-started/)
|
||||
- [Install Loki](https://grafana.com/docs/loki/latest/installation/)
|
||||
- [Loki best practices](https://grafana.com/docs/loki/latest/best-practices/#best-practices)
|
||||
- [LogQL query language](https://grafana.com/docs/loki/latest/logql/)
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
## Additional features
|
||||
|
||||
datasources:
|
||||
- name: Loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://localhost:3100
|
||||
jsonData:
|
||||
timeout: 60
|
||||
maxLines: 1000
|
||||
```
|
||||
After you configure the Loki data source, you can:
|
||||
|
||||
**Using basic authorization and a derived field:**
|
||||
- Create [visualizations](ref:visualizations) to display your log data
|
||||
- Configure and use [templates and variables](ref:variables) for dynamic dashboards
|
||||
- Add [transformations](ref:transformations) to process query results
|
||||
- Add [annotations](ref:loki-annotations) to overlay log events on graphs
|
||||
- Set up [alerting](ref:loki-alerting) to monitor your log data
|
||||
- Use [Explore](ref:explore) for ad-hoc log queries and analysis
|
||||
- Configure [derived fields](ref:configure-loki-derived-fields) to link logs to traces or other data sources
|
||||
|
||||
You must escape the dollar (`$`) character in YAML values because it can be used to interpolate environment variables:
|
||||
If you encounter issues, refer to [Troubleshoot issues with the Loki data source](ref:loki-troubleshooting).
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
## Community dashboards
|
||||
|
||||
datasources:
|
||||
- name: Loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://localhost:3100
|
||||
basicAuth: true
|
||||
basicAuthUser: my_user
|
||||
jsonData:
|
||||
maxLines: 1000
|
||||
derivedFields:
|
||||
# Field with internal link pointing to data source in Grafana.
|
||||
# datasourceUid value can be anything, but it should be unique across all defined data source uids.
|
||||
- datasourceUid: my_jaeger_uid
|
||||
matcherRegex: "traceID=(\\w+)"
|
||||
name: TraceID
|
||||
# url will be interpreted as query for the datasource
|
||||
url: '$${__value.raw}'
|
||||
# optional for URL Label to set a custom display label for the link.
|
||||
urlDisplayLabel: 'View Trace'
|
||||
Grafana doesn't ship pre-configured dashboards with the Loki data source, but you can find community-contributed dashboards on [Grafana Dashboards](https://grafana.com/grafana/dashboards/?dataSource=loki). These dashboards provide ready-made visualizations for common Loki use cases.
|
||||
|
||||
# Field with external link.
|
||||
- matcherRegex: "traceID=(\\w+)"
|
||||
name: TraceID
|
||||
url: 'http://localhost:16686/trace/$${__value.raw}'
|
||||
secureJsonData:
|
||||
basicAuthPassword: test_password
|
||||
```
|
||||
To import a community dashboard:
|
||||
|
||||
**Using a Jaeger data source:**
|
||||
1. Find a dashboard on [grafana.com/grafana/dashboards](https://grafana.com/grafana/dashboards/?dataSource=loki).
|
||||
1. Copy the dashboard ID.
|
||||
1. In Grafana, go to **Dashboards** > **New** > **Import**.
|
||||
1. Paste the dashboard ID and click **Load**.
|
||||
|
||||
In this example, the Jaeger data source's `uid` value should match the Loki data source's `datasourceUid` value.
|
||||
For more information, refer to [Import a dashboard](ref:import-dashboard).
|
||||
|
||||
```
|
||||
datasources:
|
||||
- name: Jaeger
|
||||
type: jaeger
|
||||
url: http://jaeger-tracing-query:16686/
|
||||
access: proxy
|
||||
# UID should match the datasourceUid in derivedFields.
|
||||
uid: my_jaeger_uid
|
||||
```
|
||||
## Related data sources
|
||||
|
||||
## Query the data source
|
||||
Loki integrates with other Grafana data sources to provide full observability across logs, metrics, and traces:
|
||||
|
||||
The Loki data source's query editor helps you create log and metric queries that use Loki's query language, [LogQL](/docs/loki/latest/logql/).
|
||||
- **Tempo:** Use [derived fields](ref:configure-loki-derived-fields) to create links from log lines to traces in Tempo, enabling seamless navigation from logs to distributed traces.
|
||||
- **Prometheus and Mimir:** Display logs alongside metrics on the same dashboard to correlate application behavior with performance data.
|
||||
|
||||
For details, refer to the [query editor documentation](query-editor/).
|
||||
|
||||
## Use template variables
|
||||
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
|
||||
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana refers to such variables as template variables.
|
||||
|
||||
For details, see the [template variables documentation](template-variables/).
|
||||
For more information about building observability workflows, refer to the [Grafana Tempo documentation](https://grafana.com/docs/tempo/latest/) and [Grafana Mimir documentation](https://grafana.com/docs/mimir/latest/).
|
||||
|
||||
226
docs/sources/datasources/loki/alerting/index.md
Normal file
226
docs/sources/datasources/loki/alerting/index.md
Normal file
@@ -0,0 +1,226 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/loki/alerting/
|
||||
description: Use Grafana Alerting with the Loki data source
|
||||
keywords:
|
||||
- grafana
|
||||
- loki
|
||||
- alerting
|
||||
- alerts
|
||||
- logs
|
||||
- recording rules
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Alerting
|
||||
title: Loki alerting
|
||||
weight: 450
|
||||
refs:
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
|
||||
create-alert-rule:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-grafana-managed-rule/
|
||||
configure-loki:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
recording-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-recording-rules/
|
||||
grafana-managed-recording-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
|
||||
data-source-managed-recording-rules:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-data-source-managed-recording-rules/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/alerting-rules/create-recording-rules/create-data-source-managed-recording-rules/
|
||||
---
|
||||
|
||||
# Loki alerting
|
||||
|
||||
You can use Grafana Alerting with Loki to create alerts based on your log data. This allows you to monitor error rates, detect patterns, and receive notifications when specific conditions are met in your logs.
|
||||
|
||||
For general information about Grafana Alerting, refer to [Grafana Alerting](ref:alerting).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before creating alerts with Loki, ensure you have:
|
||||
|
||||
- A [Loki data source configured](ref:configure-loki) in Grafana.
|
||||
- Appropriate permissions to create alert rules.
|
||||
- Understanding of the log patterns you want to monitor.
|
||||
- The **Manage alert rules in Alerting UI** toggle enabled in the Loki data source settings.
|
||||
|
||||
## Supported query types
|
||||
|
||||
Loki alerting requires **metric queries** that return numeric time series data. You must use LogQL metric queries that wrap log stream selectors with aggregation functions.
|
||||
|
||||
### Query types and alerting compatibility
|
||||
|
||||
| Query type | Alerting support | Notes |
|
||||
| ------------- | ---------------- | ----------------------------------------------- |
|
||||
| Metric query | ✅ Full support | Use range aggregation functions like `rate()` |
|
||||
| Log query | ❌ Not supported | Convert to metric query using aggregations |
|
||||
| Instant query | ⚠️ Limited | Range queries recommended for time-based alerts |
|
||||
|
||||
### Common metric functions for alerting
|
||||
|
||||
Use these LogQL functions to convert log queries into metric queries suitable for alerting:
|
||||
|
||||
| Function | Description | Example |
|
||||
| -------------------- | ---------------------------------------------- | --------------------------------------------------- |
|
||||
| `rate()` | Rate of log entries per second | `rate({job="app"}[5m])` |
|
||||
| `count_over_time()` | Count of log entries in the specified interval | `count_over_time({job="app"}[5m])` |
|
||||
| `sum_over_time()` | Sum of extracted numeric values | `sum_over_time({job="app"} \| unwrap latency [5m])` |
|
||||
| `avg_over_time()` | Average of extracted numeric values | `avg_over_time({job="app"} \| unwrap latency [5m])` |
|
||||
| `max_over_time()` | Maximum extracted value in the interval | `max_over_time({job="app"} \| unwrap latency [5m])` |
|
||||
| `bytes_rate()` | Rate of bytes per second | `bytes_rate({job="app"}[5m])` |
|
||||
| `absent_over_time()` | Returns 1 if no logs exist in the interval | `absent_over_time({job="app"}[5m])` |
|
||||
|
||||
## Create an alert rule
|
||||
|
||||
To create an alert rule using Loki:
|
||||
|
||||
1. Navigate to **Alerting** > **Alert rules**.
|
||||
1. Click **New alert rule**.
|
||||
1. Enter a name for the alert rule.
|
||||
1. Select your **Loki** data source.
|
||||
1. Build your metric query:
|
||||
- Start with a log stream selector (for example, `{job="app"}`)
|
||||
- Add filters if needed (for example, `|= "error"`)
|
||||
- Wrap with a metric function (for example, `rate(...[5m])`)
|
||||
1. Configure the alert condition (for example, when the rate is above a threshold).
|
||||
1. Set the evaluation interval and pending period.
|
||||
1. Configure notifications and labels.
|
||||
1. Click **Save rule**.
|
||||
|
||||
For detailed instructions, refer to [Create a Grafana-managed alert rule](ref:create-alert-rule).
|
||||
|
||||
## Example alert queries
|
||||
|
||||
The following examples show common alerting scenarios with Loki.
|
||||
|
||||
### Alert on high error rate
|
||||
|
||||
Monitor the rate of error logs:
|
||||
|
||||
```logql
|
||||
rate({job="app"} |= "error" [5m]) > 0.1
|
||||
```
|
||||
|
||||
This query calculates the rate of log lines containing "error" per second over the last 5 minutes and alerts when it exceeds 0.1 errors per second.
|
||||
|
||||
### Alert on error count threshold
|
||||
|
||||
Monitor the count of errors in a time window:
|
||||
|
||||
```logql
|
||||
sum(count_over_time({job="app", level="error"}[15m])) > 100
|
||||
```
|
||||
|
||||
This query counts error-level logs over 15 minutes and alerts when the count exceeds 100.
|
||||
|
||||
### Alert on high latency
|
||||
|
||||
Monitor request latency extracted from logs:
|
||||
|
||||
```logql
|
||||
avg_over_time({job="api"} | logfmt | unwrap duration [5m]) > 500
|
||||
```
|
||||
|
||||
This query extracts the `duration` field from logfmt-formatted logs and alerts when the average exceeds 500 milliseconds.
|
||||
|
||||
### Alert on missing logs
|
||||
|
||||
Detect when a service stops sending logs:
|
||||
|
||||
```logql
|
||||
absent_over_time({job="critical-service"}[10m])
|
||||
```
|
||||
|
||||
This query alerts when no logs are received from the critical service for 10 minutes.
|
||||
|
||||
### Alert by label grouping
|
||||
|
||||
Monitor errors grouped by service:
|
||||
|
||||
```logql
|
||||
sum by (service) (rate({namespace="production"} |= "error" [5m])) > 0.05
|
||||
```
|
||||
|
||||
This query calculates error rates per service and alerts when any service exceeds the threshold.
|
||||
|
||||
## Recording rules
|
||||
|
||||
Recording rules pre-compute frequently used or expensive LogQL queries and save the results as new time series metrics. This improves query performance and reduces load on your Loki instance.
|
||||
|
||||
For detailed information about recording rules, refer to [Create recording rules](ref:recording-rules).
|
||||
|
||||
### Use cases for Loki recording rules
|
||||
|
||||
Recording rules are useful when you need to:
|
||||
|
||||
- **Pre-aggregate expensive queries:** Convert complex log aggregations into simple metric queries.
|
||||
- **Track trends over time:** Create metrics from log data that would otherwise be too expensive to query repeatedly.
|
||||
- **Reuse queries across dashboards:** Compute a metric once and reference it in multiple dashboards and alerts.
|
||||
- **Reduce query latency:** Query precomputed results instead of scanning logs in real time.
|
||||
|
||||
### Types of recording rules
|
||||
|
||||
Loki supports two types of recording rules:
|
||||
|
||||
- **Grafana-managed recording rules:** Query Loki using LogQL and store results in a Prometheus-compatible data source. This is the recommended option. Refer to [Create Grafana-managed recording rules](ref:grafana-managed-recording-rules).
|
||||
- **Data source-managed recording rules:** Define recording rules directly in Loki using the Loki ruler. Refer to [Create data source-managed recording rules](ref:data-source-managed-recording-rules).
|
||||
|
||||
### Example recording rule
|
||||
|
||||
The following example creates a metric that tracks the error rate per service:
|
||||
|
||||
```logql
|
||||
sum by (service) (rate({namespace="production"} |= "error" [5m]))
|
||||
```
|
||||
|
||||
This query runs on a schedule (for example, every minute) and stores the result as a new metric. You can then query this precomputed metric in dashboards and alert rules instead of running the full LogQL query each time.
|
||||
|
||||
## Limitations
|
||||
|
||||
When using Loki with Grafana Alerting, be aware of the following limitations:
|
||||
|
||||
### Template variables not supported
|
||||
|
||||
Alert queries cannot contain template variables. Grafana evaluates alert rules on the backend without dashboard context, so variables like `$job` or `$namespace` are not resolved.
|
||||
|
||||
If your dashboard query uses template variables, create a separate query for alerting with hard-coded values.
|
||||
|
||||
### Log queries not supported
|
||||
|
||||
Queries that return log lines cannot be used for alerting. You must convert log queries to metric queries using aggregation functions like `rate()` or `count_over_time()`.
|
||||
|
||||
### Query time range
|
||||
|
||||
Alert queries use the evaluation interval to determine the time range, not the dashboard time picker. Ensure your metric function intervals (for example, `[5m]`) align with your alert evaluation frequency.
|
||||
|
||||
## Best practices
|
||||
|
||||
Follow these best practices when creating Loki alerts:
|
||||
|
||||
- **Use metric queries:** Always wrap log stream selectors with metric functions for alerting.
|
||||
- **Match intervals:** Align the LogQL time interval (for example, `[5m]`) with your alert evaluation interval.
|
||||
- **Be specific with selectors:** Use precise label selectors to reduce the amount of data scanned.
|
||||
- **Test queries first:** Verify your query returns expected numeric results in Explore before creating an alert.
|
||||
- **Use meaningful thresholds:** Base alert thresholds on historical patterns in your log data.
|
||||
- **Add context with labels:** Include relevant labels in your alert to help with triage.
|
||||
143
docs/sources/datasources/loki/annotations/index.md
Normal file
143
docs/sources/datasources/loki/annotations/index.md
Normal file
@@ -0,0 +1,143 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/loki/annotations/
|
||||
description: Use Loki log events as annotations in Grafana dashboards
|
||||
keywords:
|
||||
- grafana
|
||||
- loki
|
||||
- annotations
|
||||
- events
|
||||
- logs
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Annotations
|
||||
title: Loki annotations
|
||||
weight: 400
|
||||
refs:
|
||||
annotate-visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
configure-loki:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
---
|
||||
|
||||
# Loki annotations
|
||||
|
||||
Annotations overlay event data on your dashboard graphs, helping you correlate log events with metrics. You can use Loki as a data source for annotations to display events such as deployments, errors, or other significant occurrences on your visualizations.
|
||||
|
||||
For general information about annotations, refer to [Annotate visualizations](ref:annotate-visualizations).
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before creating Loki annotations, ensure you have:
|
||||
|
||||
- A [Loki data source configured](ref:configure-loki) in Grafana.
|
||||
- Logs in Loki containing the events you want to display as annotations.
|
||||
- Read access to the Loki logs you want to query.
|
||||
|
||||
## Create an annotation query
|
||||
|
||||
To add a Loki annotation to your dashboard:
|
||||
|
||||
1. Navigate to your dashboard and click **Dashboard settings** (gear icon).
|
||||
1. Select **Annotations** in the left menu.
|
||||
1. Click **Add annotation query**.
|
||||
1. Enter a **Name** for the annotation.
|
||||
1. Select your **Loki** data source from the **Data source** dropdown.
|
||||
1. Enter a LogQL query in the query field.
|
||||
1. Configure the optional formatting fields (Title, Tags, Text).
|
||||
1. Click **Save dashboard**.
|
||||
|
||||
## Query
|
||||
|
||||
Use the query field to enter a LogQL expression that filters the log events to display as annotations. Only log queries are supported for annotations; metric queries are not supported.
|
||||
|
||||
**Examples:**
|
||||
|
||||
| Query | Description |
|
||||
| ----------------------------------------- | ------------------------------------------------- |
|
||||
| `{job="app"}` | Shows all logs from the "app" job. |
|
||||
| `{job="app"} \|= "error"` | Shows logs containing "error" from the "app" job. |
|
||||
| `{namespace="production"} \|= "deployed"` | Shows deployment events in production. |
|
||||
| `{job="app"} \| logfmt \| level="error"` | Shows error-level logs using logfmt parsing. |
|
||||
| `{job="$job"}` | Uses a template variable to filter by job. |
|
||||
|
||||
You can use template variables in your annotation queries to make them dynamic based on dashboard selections.
|
||||
|
||||
## Formatting options
|
||||
|
||||
Loki annotations support optional formatting fields to customize how annotations are displayed.
|
||||
|
||||
### Title
|
||||
|
||||
The **Title** field specifies a pattern for the annotation title. You can use label values by wrapping the label name in double curly braces.
|
||||
|
||||
- **Default:** Empty (uses the log line as the title)
|
||||
- **Pattern example:** `{{instance}}` displays the value of the `instance` label
|
||||
- **Pattern example:** `{{job}} - {{level}}` combines multiple labels
|
||||
|
||||
### Tags
|
||||
|
||||
The **Tags** field specifies which labels to use as annotation tags. Enter label names as a comma-separated list.
|
||||
|
||||
- **Default:** All labels are used as tags
|
||||
- **Example:** `job,instance,level` uses only these three labels as tags
|
||||
|
||||
Tags help categorize and filter annotations in the dashboard.
|
||||
|
||||
### Text
|
||||
|
||||
The **Text** field specifies a pattern for the annotation text displayed when you hover over the annotation. You can use label values by wrapping the label name in double curly braces.
|
||||
|
||||
- **Default:** The log line content
|
||||
- **Pattern example:** `{{message}}` displays the value of a parsed `message` label
|
||||
- **Pattern example:** `Error on {{instance}}: {{error}}` creates a descriptive message
|
||||
|
||||
### Line limit
|
||||
|
||||
The **Line limit** field controls the maximum number of log lines returned for annotations. This helps prevent performance issues when querying logs with many results.
|
||||
|
||||
- **Default:** Uses the data source's configured maximum lines setting
|
||||
|
||||
## Example: Deployment annotations
|
||||
|
||||
To display deployment events as annotations:
|
||||
|
||||
1. Create an annotation query with the following settings:
|
||||
- **Query:** `{job="deploy-service"} |= "deployed"`
|
||||
- **Title:** `Deployment: {{app}}`
|
||||
- **Tags:** `app,environment`
|
||||
- **Text:** `{{message}}`
|
||||
|
||||
This configuration displays deployment logs with the application name in the title and environment as a tag.
|
||||
|
||||
## Example: Error annotations
|
||||
|
||||
To overlay error events on your metrics graphs:
|
||||
|
||||
1. Create an annotation query with the following settings:
|
||||
- **Query:** `{namespace="production"} | logfmt | level="error"`
|
||||
- **Title:** `{{job}} error`
|
||||
- **Tags:** `job,instance`
|
||||
|
||||
This configuration displays error logs from production, grouped by job and instance.
|
||||
|
||||
## Example: Filter annotations with template variables
|
||||
|
||||
To create dynamic annotations that respond to dashboard variable selections:
|
||||
|
||||
1. Create a template variable named `job` that queries Loki label values.
|
||||
1. Create an annotation query with the following settings:
|
||||
- **Query:** `{job="$job"} |= "alert"`
|
||||
- **Title:** `Alert: {{alertname}}`
|
||||
- **Tags:** `severity`
|
||||
|
||||
This configuration displays only alerts for the selected job, making the annotations relevant to the current dashboard context.
|
||||
@@ -1,146 +0,0 @@
|
||||
---
|
||||
aliases:
|
||||
- ../data-sources/loki/
|
||||
- ../features/datasources/loki/
|
||||
description: Configure the Loki data source
|
||||
keywords:
|
||||
- grafana
|
||||
- loki
|
||||
- logging
|
||||
- guide
|
||||
- data source
|
||||
menuTitle: Configure Loki
|
||||
title: Configure the Loki data source
|
||||
weight: 200
|
||||
refs:
|
||||
log-details:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#labels-and-detected-fields
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#labels-and-detected-fields
|
||||
---
|
||||
|
||||
# Loki data source
|
||||
|
||||
Grafana ships with built-in support for [Loki](/docs/loki/latest/), an open-source log aggregation system by Grafana Labs. If you are new to Loki the following documentation will help you get started:
|
||||
|
||||
- [Getting started](/docs/loki/latest/get-started/)
|
||||
- [Best practices](/docs/loki/latest/best-practices/#best-practices)
|
||||
|
||||
## Configure the Loki data source
|
||||
|
||||
To add the Loki data source, complete the following steps:
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Under **Connections**, click **Add new connection**.
|
||||
1. Enter `Loki` in the search bar.
|
||||
1. Select **Loki data source**.
|
||||
1. Click **Create a Loki data source** in the upper right.
|
||||
|
||||
You will be taken to the **Settings** tab where you will set up your Loki configuration.
|
||||
|
||||
## Configuration options
|
||||
|
||||
The following is a list of configuration options for Loki.
|
||||
|
||||
The first option to configure is the name of your connection:
|
||||
|
||||
- **Name** - The data source name. This is how you refer to the data source in panels and queries. Examples: loki-1, loki_logs.
|
||||
|
||||
- **Default** - Toggle to select as the default name in dashboard panels. When you go to a dashboard panel this will be the default selected data source.
|
||||
|
||||
### HTTP section
|
||||
|
||||
- **URL** - The URL of your Loki server. Loki uses port 3100. If your Loki server is local, use `http://localhost:3100`. If it is on a server within a network, this is the URL with port where you are running Loki. Example: `http://loki.example.orgname:3100`.
|
||||
|
||||
- **Allowed cookies** - Specify cookies by name that should be forwarded to the data source. The Grafana proxy deletes all forwarded cookies by default.
|
||||
|
||||
- **Timeout** - The HTTP request timeout. This must be in seconds. There is no default, so this setting is up to you.
|
||||
|
||||
### Auth section
|
||||
|
||||
There are several authentication methods you can choose in the Authentication section.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
Use TLS (Transport Layer Security) for an additional layer of security when working with Loki. For information on setting up TLS encryption with Loki see [Grafana Loki configuration parameters](/docs/loki/latest/configuration/).
|
||||
{{< /admonition >}}
|
||||
|
||||
- **Basic authentication** - The most common authentication method. Use your `data source` user name and `data source` password to connect.
|
||||
|
||||
- **With credentials** - Toggle on to enable credentials such as cookies or auth headers to be sent with cross-site requests.
|
||||
|
||||
- **TLS client authentication** - Toggle on to use client authentication. When enabled, add the `Server name`, `Client cert` and `Client key`. The client provides a certificate that is validated by the server to establish the client's trusted identity. The client key encrypts the data between client and server.
|
||||
|
||||
- **With CA cert** - Authenticate with a CA certificate. Follow the instructions of the CA (Certificate Authority) to download the certificate file.
|
||||
|
||||
- **Skip TLS verify** - Toggle on to bypass TLS certificate validation.
|
||||
|
||||
- **Forward OAuth identity** - Forward the OAuth access token (and also the OIDC ID token if available) of the user querying the data source.
|
||||
|
||||
### Custom HTTP headers
|
||||
|
||||
- **Header** - Add a custom header. This allows custom headers to be passed based on the needs of your Loki instance.
|
||||
|
||||
- **Value** - The value of the header.
|
||||
|
||||
### Alerting
|
||||
|
||||
- **Manage alert rules in Alerting UI** - Toggle on to manage alert rules for the Loki data source. To manage other alerting resources add an `Alertmanager` data source.
|
||||
|
||||
### Queries
|
||||
|
||||
- **Maximum lines** - Sets the maximum number of log lines returned by Loki. Increase the limit to have a bigger results set for ad-hoc analysis. Decrease the limit if your browser is sluggish when displaying log results. The default is `1000`.
|
||||
|
||||
<!-- {{< admonition type="note" >}}
|
||||
To troubleshoot configuration and other issues, check the log file located at `/var/log/grafana/grafana.log` on Unix systems, or in `<grafana_install_dir>/data/log` on other platforms and manual installations.
|
||||
{{< /admonition >}} -->
|
||||
|
||||
### Derived fields
|
||||
|
||||
Derived Fields are used to extract new fields from your logs and create a link from the value of the field.
|
||||
|
||||
For example, you can link to your tracing backend directly from your logs, or link to a user profile page if the log line contains a corresponding `userId`.
|
||||
These links appear in the [log details](ref:log-details).
|
||||
|
||||
You can add multiple derived fields.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
If you use Grafana Cloud, you can request modifications to this feature by clicking **Open a Support Ticket** from the Grafana Cloud Portal.
|
||||
{{< /admonition >}}
|
||||
|
||||
Each derived field consists of the following:
|
||||
|
||||
- **Name** - Sets the field name. Displayed as a label in the log details.
|
||||
|
||||
- **Type** - Defines the type of the derived field. It can be either:
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
Using complex regular expressions in either type can impact browser performance when processing large volumes of logs. Consider using simpler patterns when possible.
|
||||
{{< /admonition >}}
|
||||
|
||||
- **Regex**: A regular expression to parse a part of the log message and capture it as the value of the new field. Can contain only one capture group.
|
||||
|
||||
- **Label**: A label from the selected log line. This can be any type of label - indexed, parsed or structured metadata. When using this type, the input will match as a regular expression against label keys, allowing you to match variations like `traceid` and `trace_id` with a single regex pattern like `trace[_]?id`. The value of the matched label will be used as the value of the derived field.
|
||||
|
||||
- **URL/query** Sets the full link URL if the link is external, or a query for the target data source if the link is internal. You can interpolate the value from the field with the `${__value.raw}` macro.
|
||||
|
||||
- **URL Label** - Sets a custom display label for the link. This setting overrides the link label, which defaults to the full external URL or name of the linked internal data source.
|
||||
|
||||
- **Internal link** - Toggle on to define an internal link. For internal links, you can select the target data source from a selector. This supports only tracing data sources.
|
||||
|
||||
- **Open in new tab** - Toggle on to open the link in a new tab or window.
|
||||
|
||||
- **Show example log message** - Click to paste an example log line to test the regular expression of your derived fields.
|
||||
|
||||
Click **Save & test** to test your connection.
|
||||
|
||||
#### Troubleshoot interpolation
|
||||
|
||||
You can use a debug section to see what your fields extract and how the URL is interpolated.
|
||||
Select **Show example log message** to display a text area where you can enter a log message.
|
||||
|
||||
{{< figure src="/static/img/docs/v75/loki_derived_fields_settings.png" class="docs-image--no-shadow" max-width="800px" caption="Screenshot of the derived fields debugging" >}}
|
||||
|
||||
The new field with the link shown in log details:
|
||||
|
||||
{{< figure src="/static/img/docs/explore/data-link-9-4.png" max-width="800px" caption="Data link in Explore" >}}
|
||||
363
docs/sources/datasources/loki/configure/index.md
Normal file
363
docs/sources/datasources/loki/configure/index.md
Normal file
@@ -0,0 +1,363 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/loki/configure/
|
||||
description: Configure the Loki data source
|
||||
keywords:
|
||||
- grafana
|
||||
- loki
|
||||
- logging
|
||||
- guide
|
||||
- data source
|
||||
menuTitle: Configure
|
||||
title: Configure the Loki data source
|
||||
weight: 200
|
||||
refs:
|
||||
log-details:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#labels-and-detected-fields
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/logs-integration/#labels-and-detected-fields
|
||||
alerting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/alerting-and-irm/alerting/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
configure-pdc:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
|
||||
provisioning-data-sources:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
|
||||
data-source-management:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
|
||||
loki-query-editor:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/query-editor/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/query-editor/
|
||||
loki-template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/template-variables/
|
||||
---
|
||||
|
||||
# Configure the Loki data source
|
||||
|
||||
This document provides instructions for configuring the Loki data source and explains available configuration options. For general information about data sources, refer to [Data source management](ref:data-source-management).
|
||||
|
||||
Grafana ships with built-in support for [Loki](https://grafana.com/docs/loki/latest/), an open-source log aggregation system by Grafana Labs. If you are new to Loki, the following documentation will help you get started:
|
||||
|
||||
- [Getting started](https://grafana.com/docs/loki/latest/get-started/)
|
||||
- [Best practices](https://grafana.com/docs/loki/latest/best-practices/#best-practices)
|
||||
|
||||
## Before you begin
|
||||
|
||||
Before configuring the Loki data source, ensure you have the following:
|
||||
|
||||
- **Grafana permissions:** You must have the `Organization administrator` role to configure data sources. Organization administrators can also [configure the data source via YAML](#provision-the-data-source) with the Grafana provisioning system or [using Terraform](#provision-the-data-source-using-terraform).
|
||||
|
||||
- **Loki instance:** You need a running Loki instance and its URL. If you don't have one, refer to the [Loki installation documentation](https://grafana.com/docs/loki/latest/setup/install/).
|
||||
|
||||
- **Authentication details (if applicable):** If your Loki instance requires authentication, gather the necessary credentials such as username and password for basic authentication, or any required certificates for TLS authentication.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
The Loki data source plugin is built into Grafana. No additional installation is required.
|
||||
{{< /admonition >}}
|
||||
|
||||
## Add the Loki data source
|
||||
|
||||
To add the Loki data source, complete the following steps:
|
||||
|
||||
1. Click **Connections** in the left-side menu.
|
||||
1. Under **Connections**, click **Add new connection**.
|
||||
1. Enter `Loki` in the search bar.
|
||||
1. Select **Loki data source**.
|
||||
1. Click **Create a Loki data source** in the upper right.
|
||||
|
||||
You are taken to the **Settings** tab where you will set up your Loki configuration.
|
||||
|
||||
## Configure Loki using the UI
|
||||
|
||||
The following are the configuration options for Loki.
|
||||
|
||||
| Name | Description |
|
||||
| ----------- | ---------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Name** | The data source name. This is how you refer to the data source in panels and queries. Examples: `loki-1`, `loki_logs`. |
|
||||
| **Default** | Toggle to set this data source as the default. When enabled, new panels automatically use this data source. |
|
||||
|
||||
### Connection section
|
||||
|
||||
| Name | Description |
|
||||
| ------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **URL** | The URL of your Loki server, including the port. The default Loki port is `3100`. Examples: `http://localhost:3100`, `http://loki.example.org:3100`. |
|
||||
|
||||
### Authentication section
|
||||
|
||||
Select an authentication method from the **Authentication** dropdown.
|
||||
|
||||
| Setting | Description |
|
||||
| -------------------------- | ---------------------------------------------------------------------------------------------------------- |
|
||||
| **No authentication** | No authentication is required to access the data source. |
|
||||
| **Basic authentication** | Authenticate using a username and password. Enter the credentials in the **User** and **Password** fields. |
|
||||
| **Forward OAuth identity** | Forward the OAuth access token (and the OIDC ID token if available) of the user querying the data source. |
|
||||
|
||||
### TLS settings
|
||||
|
||||
Use TLS (Transport Layer Security) for an additional layer of security when working with Loki. For more information on setting up TLS encryption with Loki, refer to [Grafana Loki configuration parameters](https://grafana.com/docs/loki/latest/configuration/).
|
||||
|
||||
| Setting | Description |
|
||||
| ----------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Add self-signed certificate** | Enable to add a self-signed CA certificate. When enabled, enter the certificate in the **CA Certificate** field. The certificate must begin with `-----BEGIN CERTIFICATE-----`. |
|
||||
| **TLS Client Authentication** | Enable to use client certificate authentication. When enabled, enter the **ServerName** (for example, `domain.example.com`), **Client Certificate** (begins with `-----BEGIN CERTIFICATE-----`), and **Client Key** (begins with `-----BEGIN RSA PRIVATE KEY-----`). |
|
||||
| **Skip TLS certificate validation** | Enable to bypass TLS certificate validation. Use this option only for testing or when connecting to Loki instances with self-signed certificates. |
|
||||
|
||||
### HTTP headers
|
||||
|
||||
Use HTTP headers to pass along additional context and metadata about the request/response.
|
||||
|
||||
| Setting | Description |
|
||||
| ---------- | -------------------------------------------------------------- |
|
||||
| **Header** | The name of the custom header. For example, `X-Custom-Header`. |
|
||||
| **Value** | The value of the custom header. For example, `Header value`. |
|
||||
|
||||
Click **+ Add another header** to add additional headers.
|
||||
|
||||
## Additional settings
|
||||
|
||||
Additional settings are optional settings that you can configure for more control over your data source.
|
||||
|
||||
### Advanced HTTP settings
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Allowed cookies** | Specify cookies by name that should be forwarded to the data source. The Grafana proxy deletes all forwarded cookies by default. |
|
||||
| **Timeout** | The HTTP request timeout in seconds. If not set, the default Grafana timeout is used. |
|
||||
|
||||
### Alerting
|
||||
|
||||
Manage alert rules for the Loki data source. For more information, refer to [Alerting](ref:alerting).
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------------------------- | ---------------------------------------------------------------------------------- |
|
||||
| **Manage alert rules in Alerting UI** | Toggle to manage alert rules for this Loki data source in the Grafana Alerting UI. |
|
||||
|
||||
### Queries
|
||||
|
||||
Configure options to customize your querying experience.
|
||||
|
||||
| Setting | Description |
|
||||
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Maximum lines** | The maximum number of log lines returned by Loki. The default is `1000`. Increase for larger result sets during ad-hoc analysis. Decrease if your browser is sluggish when displaying log results. |
|
||||
|
||||
### Derived fields
|
||||
|
||||
Derived fields can be used to extract new fields from a log message and create a link from its value. For example, you can link to your tracing backend directly from your logs. These links appear in the [log details](ref:log-details).
|
||||
|
||||
Click **+ Add** to add a derived field. Each derived field has the following settings:
|
||||
|
||||
| Setting | Description |
|
||||
| ------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Name** | The field name. Displayed as a label in the log details. |
|
||||
| **Type** | The type of derived field. Select **Regex in log line** to extract values using a regular expression, or **Label** to use an existing label value. |
|
||||
| **Regex** | A regular expression to parse a part of the log message and capture it as the value of the new field. Can contain only one capture group. |
|
||||
| **URL** | The full link URL if the link is external, or a query for the target data source if the link is internal. You can interpolate the value from the field with the `${__value.raw}` macro. For example, `http://example.com/${__value.raw}`. |
|
||||
| **URL Label** | A custom display label for the link. This setting overrides the link label, which defaults to the full external URL or name of the linked internal data source. |
|
||||
| **Internal link** | Toggle to define an internal link. When enabled, you can select the target data source from a selector. This supports only tracing data sources. |
|
||||
| **Open in new tab** | Toggle to open the link in a new browser tab or window. |
|
||||
|
||||
{{< admonition type="caution" >}}
|
||||
Using complex regular expressions can impact browser performance when processing large volumes of logs. Consider using simpler patterns when possible.
|
||||
{{< /admonition >}}
|
||||
|
||||
#### Test derived fields
|
||||
|
||||
To test your derived field configuration:
|
||||
|
||||
1. Click **Show example log message** to display the debug section.
|
||||
1. In the **Debug log message** field, paste an example log line to test the regular expressions of your derived fields.
|
||||
1. Verify that the field extracts the expected value and the URL is interpolated correctly.
|
||||
|
||||
### Private data source connect
|
||||
|
||||
_Only for Grafana Cloud users._
|
||||
|
||||
Private data source connect, or PDC, allows you to establish a private, secured connection between a Grafana Cloud instance, or stack, and data sources secured within a private network. Click the drop-down to locate the URL for PDC. For more information regarding Grafana PDC, refer to [Private data source connect (PDC)](ref:private-data-source-connect) and [Configure Grafana private data source connect (PDC)](ref:configure-pdc) for instructions on setting up a PDC connection.
|
||||
|
||||
Click **Manage private data source connect** to open your PDC connection page and view your configuration details.
|
||||
|
||||
## Verify the connection
|
||||
|
||||
After configuring the data source, click **Save & test** to save your settings and verify the connection. A successful connection displays the following message:
|
||||
|
||||
**Data source successfully connected.**
|
||||
|
||||
If the test fails, verify:
|
||||
|
||||
- The Loki URL is correct and accessible from the Grafana server.
|
||||
- Any required authentication credentials are correct.
|
||||
- Network connectivity and firewall rules allow the connection.
|
||||
- TLS certificates are valid (if using HTTPS).
|
||||
|
||||
## Provision the data source
|
||||
|
||||
You can define and configure the data source in YAML files as part of the Grafana provisioning system.
|
||||
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
|
||||
|
||||
### Provisioning examples
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://localhost:3100
|
||||
jsonData:
|
||||
timeout: 60
|
||||
maxLines: 1000
|
||||
```
|
||||
|
||||
**Using basic authorization and a derived field:**
|
||||
|
||||
You must escape the dollar (`$`) character in YAML values because it can be used to interpolate environment variables:
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Loki
|
||||
type: loki
|
||||
access: proxy
|
||||
url: http://localhost:3100
|
||||
basicAuth: true
|
||||
basicAuthUser: my_user
|
||||
jsonData:
|
||||
maxLines: 1000
|
||||
derivedFields:
|
||||
# Field with internal link pointing to data source in Grafana.
|
||||
# datasourceUid value can be anything, but it should be unique across all defined data source uids.
|
||||
- datasourceUid: my_jaeger_uid
|
||||
matcherRegex: "traceID=(\\w+)"
|
||||
name: TraceID
|
||||
# url will be interpreted as query for the datasource
|
||||
url: '$${__value.raw}'
|
||||
# optional for URL Label to set a custom display label for the link.
|
||||
urlDisplayLabel: 'View Trace'
|
||||
|
||||
# Field with external link.
|
||||
- matcherRegex: "traceID=(\\w+)"
|
||||
name: TraceID
|
||||
url: 'http://localhost:16686/trace/$${__value.raw}'
|
||||
secureJsonData:
|
||||
basicAuthPassword: test_password
|
||||
```
|
||||
|
||||
**Using a Jaeger data source:**
|
||||
|
||||
In this example, the Jaeger data source's `uid` value should match the Loki data source's `datasourceUid` value.
|
||||
|
||||
```yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Jaeger
|
||||
type: jaeger
|
||||
url: http://jaeger-tracing-query:16686/
|
||||
access: proxy
|
||||
# UID should match the datasourceUid in derivedFields.
|
||||
uid: my_jaeger_uid
|
||||
```
|
||||
|
||||
## Provision the data source using Terraform
|
||||
|
||||
You can provision the Loki data source using [Terraform](https://www.terraform.io/) with the [Grafana Terraform provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs).
|
||||
|
||||
For more information about provisioning resources with Terraform, refer to the [Grafana as code using Terraform](https://grafana.com/docs/grafana-cloud/developer-resources/infrastructure-as-code/terraform/) documentation.
|
||||
|
||||
### Basic Terraform example
|
||||
|
||||
The following example creates a basic Loki data source:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "loki" {
|
||||
name = "Loki"
|
||||
type = "loki"
|
||||
url = "http://localhost:3100"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
maxLines = 1000
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Terraform example with derived fields
|
||||
|
||||
The following example creates a Loki data source with a derived field that links to a Jaeger data source for trace correlation:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "loki_with_tracing" {
|
||||
name = "Loki"
|
||||
type = "loki"
|
||||
url = "http://localhost:3100"
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
maxLines = 1000
|
||||
derivedFields = [
|
||||
{
|
||||
datasourceUid = grafana_data_source.jaeger.uid
|
||||
matcherRegex = "traceID=(\\w+)"
|
||||
name = "TraceID"
|
||||
url = "$${__value.raw}"
|
||||
urlDisplayLabel = "View Trace"
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### Terraform example with basic authentication
|
||||
|
||||
The following example includes basic authentication:
|
||||
|
||||
```hcl
|
||||
resource "grafana_data_source" "loki_auth" {
|
||||
name = "Loki"
|
||||
type = "loki"
|
||||
url = "http://localhost:3100"
|
||||
|
||||
basic_auth_enabled = true
|
||||
basic_auth_username = "loki_user"
|
||||
|
||||
secure_json_data_encoded = jsonencode({
|
||||
basicAuthPassword = var.loki_password
|
||||
})
|
||||
|
||||
json_data_encoded = jsonencode({
|
||||
maxLines = 1000
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
For all available configuration options, refer to the [Grafana provider data source resource documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source).
|
||||
|
||||
## Next steps
|
||||
|
||||
After configuring your Loki data source, explore these resources:
|
||||
|
||||
- [Query the Loki data source](ref:loki-query-editor) to learn how to build LogQL queries in Grafana
|
||||
- [Use template variables](ref:loki-template-variables) to create dynamic, reusable dashboards
|
||||
- [LogQL documentation](https://grafana.com/docs/loki/latest/query/) to learn more about the Loki query language
|
||||
@@ -16,11 +16,6 @@ menuTitle: Query editor
|
||||
title: Loki query editor
|
||||
weight: 300
|
||||
refs:
|
||||
annotate-visualizations:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
|
||||
logs:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/visualizations/logs/
|
||||
@@ -36,231 +31,247 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
|
||||
template-variables:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/template-variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/template-variables/
|
||||
configure-loki:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
loki-troubleshooting:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/troubleshooting/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/troubleshooting/
|
||||
---
|
||||
|
||||
# Loki query editor
|
||||
|
||||
The Loki data source's query editor helps you create [log](#create-a-log-query) and [metric](#create-a-metric-query) queries that use Loki's query language, [LogQL](/docs/loki/latest/logql/).
|
||||
The Loki data source query editor helps you create [log](#create-a-log-query) and [metric](#create-a-metric-query) queries using [LogQL](https://grafana.com/docs/loki/latest/logql/), Loki's query language.
|
||||
|
||||
You can query and display log data from Loki in [Explore](ref:explore) and in dashboards using the [Logs panel](ref:logs).
|
||||
|
||||
For general documentation on querying data sources in Grafana, refer to [Query and transform data](ref:query-transform-data).
|
||||
|
||||
## Before you begin
|
||||
|
||||
- [Configure the Loki data source](ref:configure-loki).
|
||||
- Familiarize yourself with [LogQL](https://grafana.com/docs/loki/latest/logql/).
|
||||
|
||||
## Choose a query editing mode
|
||||
|
||||
The Loki query editor has two modes:
|
||||
|
||||
- [Builder mode](#builder-mode), which provides a visual query designer.
|
||||
- [Code mode](#code-mode), which provides a feature-rich editor for writing queries.
|
||||
- **Builder mode** - Build queries using a visual interface without manually entering LogQL. Best for users new to Loki and LogQL.
|
||||
- **Code mode** - Write queries using a text editor with autocompletion, syntax highlighting, and query validation.
|
||||
|
||||
To switch between the editor modes, select the corresponding **Builder** and **Code** tabs.
|
||||
To switch between modes, select the **Builder** or **Code** tab at the top of the editor.
|
||||
|
||||
To run a query, select **Run queries** located at the top of the editor.
|
||||
Both modes are synchronized, so you can switch between them without losing your work. However, Builder mode doesn't support some complex queries. When switching from Code mode to Builder mode with an unsupported query, the editor displays a warning explaining which parts of the query might be lost.
|
||||
|
||||
{{< admonition type="note" >}}
|
||||
To run Loki queries in [Explore](ref:explore), select **Run query**.
|
||||
{{< /admonition >}}
|
||||
## Toolbar features
|
||||
|
||||
Each mode is synchronized, so you can switch between them without losing your work, although there are some limitations. Builder mode doesn't support some complex queries.
|
||||
When you switch from Code mode to Builder mode with such a query, the editor displays a warning message that explains how you might lose parts of the query if you continue.
|
||||
You can then decide whether you still want to switch to Builder mode.
|
||||
The query editor toolbar provides features available in both Builder and Code mode.
|
||||
|
||||
You can also augment queries by using [template variables](../template-variables/).
|
||||
### Kick start your query
|
||||
|
||||
## Toolbar elements
|
||||
|
||||
The query editor toolbar contains the following elements:
|
||||
|
||||
- **Kick start your query** - Click to see a list of queries that help you quickly get started creating LogQL queries. You can then continue to complete your query.
|
||||
|
||||
These include:
|
||||
Click **Kick start your query** to see a list of example queries that help you get started quickly. These include:
|
||||
|
||||
- Log query starters
|
||||
- Metric query starters
|
||||
|
||||
Click the arrow next to each to see available query options.
|
||||
Click the arrow next to each category to see available query templates. Selecting a template populates the query editor with a starting query you can customize.
|
||||
|
||||
- **Label browser** - Use the Loki label browser to navigate through your labels and values, and build queries.
|
||||
### Label browser
|
||||
|
||||
To navigate Loki and build a query:
|
||||
Use the label browser to explore available labels and values in your Loki instance:
|
||||
|
||||
1. Choose labels to locate.
|
||||
1. Search for the values of your selected labels.
|
||||
1. Click **Label browser** in the toolbar.
|
||||
1. Select labels to filter.
|
||||
1. Search for values using the search field, which supports fuzzy matching.
|
||||
|
||||
The search field supports fuzzy search, and the label browser also supports faceting to list only possible label combinations.
|
||||
The label browser supports faceting to show only valid label combinations.
|
||||
|
||||
1. Select the **Show logs** button to display log lines based on the selected labels, or select the **Show logs rate** button to show the rate based on metrics such as requests per second. Additionally, you can validate the selector by clicking the **Validate selector** button. Click **Clear** to start from the beginning.
|
||||
Click **Show logs** to display log lines based on the selected labels, or **Show logs rate** to show a rate metric. Use **Validate selector** to check your selection, or **Clear** to start over.
|
||||
|
||||
{{< figure src="/static/img/docs/explore/Loki_label_browser.png" class="docs-image--no-shadow" max-width="800px" caption="The Loki label browser" >}}
|
||||
|
||||
- **Explain query** - Toggle to display a step-by-step explanation of all query components and operations.
|
||||
### Explain query
|
||||
|
||||
{{< figure src="/static/img/docs/prometheus/explain-results.png" max-width="500px" class="docs-image--no-shadow" caption="Explain results" >}}
|
||||
Toggle **Explain query** to display a step-by-step explanation of all query components and operations. This helps you understand how your query works and learn LogQL syntax.
|
||||
|
||||
- **Builder/Code** - Click the corresponding **Builder** or **Code** tab on the toolbar to select an editor mode.
|
||||
{{< figure src="/static/img/docs/prometheus/explain-results.png" max-width="500px" class="docs-image--no-shadow" caption="Explain query results" >}}
|
||||
|
||||
## Builder mode
|
||||
## Build a query in Builder mode
|
||||
|
||||
Builder mode helps you build queries using a visual interface without needing to manually enter LogQL. This option is best for users who have limited or no previous experience working with Loki and LogQL.
|
||||
Builder mode provides a visual interface for constructing LogQL queries without writing code.
|
||||
|
||||
### Label filters
|
||||
### Select labels
|
||||
|
||||
Select labels and their values from the dropdown list.
|
||||
When you select a label, Grafana retrieves available values from the server.
|
||||
Start by selecting labels to filter your log streams:
|
||||
|
||||
Use the `+` button to add a label and the `x` button to remove a label. You can add multiple labels.
|
||||
1. Select a label from the **Label** dropdown.
|
||||
1. Choose a comparison operator:
|
||||
- `=` - equals
|
||||
- `!=` - does not equal
|
||||
- `=~` - matches regex
|
||||
- `!~` - does not match regex
|
||||
1. Select a value from the **Value** dropdown, which displays available values for the selected label.
|
||||
|
||||
Select comparison operators from the following options:
|
||||
Use the `+` button to add additional label filters and the `x` button to remove them.
|
||||
|
||||
- `=` - equal to
|
||||
- `!=` - is not equal
|
||||
- `=~` - matches regex
|
||||
- `!~` - does not match regex
|
||||
### Add operations
|
||||
|
||||
Select values by using the dropdown, which displays all possible values based on the label selected.
|
||||
Select the **+ Operations** button to add operations to your query. The query editor groups operations into the following categories:
|
||||
|
||||
### Operations
|
||||
- **Aggregations** - refer to [Built-in aggregation operators](https://grafana.com/docs/loki/latest/logql/metric_queries/#built-in-aggregation-operators)
|
||||
- **Range functions** - refer to [Range Vector aggregation](https://grafana.com/docs/loki/latest/logql/metric_queries/#range-vector-aggregation)
|
||||
- **Formats** - refer to [Log queries](https://grafana.com/docs/loki/latest/logql/log_queries/#log-queries)
|
||||
- **Binary operations** - refer to [Binary operators](https://grafana.com/docs/loki/latest/logql/#binary-operators)
|
||||
- **Label filters** - refer to [Label filter expression](https://grafana.com/docs/loki/latest/logql/log_queries/#label-filter-expression)
|
||||
- **Line filters** - refer to [Line filter expression](https://grafana.com/docs/loki/latest/logql/log_queries/#line-filter-expression)
|
||||
|
||||
Select the `+ Operations` button to add operations to your query.
|
||||
The query editor groups operations into related sections, and you can type while the operations dropdown is open to search and filter the list.
|
||||
You can type while the operations dropdown is open to search and filter the list.
|
||||
|
||||
The query editor displays a query's operations as boxes in the operations section.
|
||||
Each operation's header displays its name, and additional action buttons appear when you hover your cursor over the header:
|
||||
Each operation appears as a box in the query editor. Hover over an operation's header to reveal action buttons:
|
||||
|
||||
| Button | Action |
|
||||
| ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
|
||||
| {{< figure src="/static/img/docs/v95/loki_operation_replace.png" class="docs-image--no-shadow" max-width="30px" >}} | Replaces the operation with different operation of the same type. |
|
||||
| {{< figure src="/static/img/docs/v95/loki_operation_description.png" class="docs-image--no-shadow" max-width="30px" >}} | Opens the operation's description tooltip. |
|
||||
| {{< figure src="/static/img/docs/v95/loki_operation_remove.png" class="docs-image--no-shadow" max-width="30px" >}} | Removes the operation. |
|
||||
| Button | Action |
|
||||
| ----------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------ |
|
||||
| {{< figure src="/static/img/docs/v95/loki_operation_replace.png" class="docs-image--no-shadow" max-width="30px" >}} | Replace the operation with a different operation of the same type. |
|
||||
| {{< figure src="/static/img/docs/v95/loki_operation_description.png" class="docs-image--no-shadow" max-width="30px" >}} | Open the operation's description tooltip. |
|
||||
| {{< figure src="/static/img/docs/v95/loki_operation_remove.png" class="docs-image--no-shadow" max-width="30px" >}} | Remove the operation. |
|
||||
|
||||
The query editor groups operations into the following sections:
|
||||
Some operations only make sense in a specific order. If adding an operation would result in an invalid query, the editor automatically places it in the correct position. To re-order operations manually, drag the operation box by its name and drop it in the desired location.
|
||||
|
||||
- Aggregations - see [Built-in aggregation operators](/docs/loki/latest/logql/metric_queries/#built-in-aggregation-operators)
|
||||
- Range functions - see [Range Vector aggregation](/docs/loki/latest/logql/metric_queries/#range-vector-aggregation)
|
||||
- Formats - see [Log queries](/docs/loki/latest/logql/log_queries/#log-queries)
|
||||
- Binary operations - see [Binary operators](/docs/loki/latest/logql/#binary-operators)
|
||||
- Label filters - see [Label filter expression](/docs/loki/latest/logql/log_queries/#label-filter-expression)
|
||||
- Line filters - see [Line filter expression](/docs/loki/latest/logql/log_queries/#label-filter-expression)
|
||||
For more information, refer to [Order of operations](https://grafana.com/docs/loki/latest/logql/#order-of-operations).
|
||||
|
||||
Some operations make sense only when used in a specific order. If adding an operation would result in nonsensical query, the query editor adds the operation to the correct place.
|
||||
To re-order operations manually, drag the operation box by its name and drop it into the desired place. For additional information see [Order of operations](/docs/loki/latest/logql/#order-of-operations).
|
||||
### Query preview
|
||||
|
||||
As you build your query, the editor displays a visual preview of the query structure. Each step is numbered and includes a description:
|
||||
|
||||
- **Step 1** typically shows your label selector (for example, `{}` with "Fetch all log lines matching label filters")
|
||||
- **Subsequent steps** show operations you've added (for example, `|= ""` with "Return log lines that contain string")
|
||||
|
||||
The raw LogQL query is displayed at the bottom of the query editor, showing the complete syntax that will be executed.
|
||||
|
||||
### Hints
|
||||
|
||||
In same cases the query editor can detect which operations would be most appropriate for a selected log stream. In such cases it will show a hint next to the `+ Operations` button. Click on the hint to add the operations to your query.
|
||||
The query editor can detect which operations would be most appropriate for a selected log stream. When available, a hint appears next to the **+ Operations** button. Click the hint to add the suggested operations to your query.
|
||||
|
||||
## Code mode
|
||||
## Write a query in Code mode
|
||||
|
||||
In **Code mode**, you can write complex queries using a text editor with autocompletion feature, syntax highlighting, and query validation.
|
||||
It also contains a [label browser](#label-browser) to further help you write queries.
|
||||
Code mode provides a text editor for writing LogQL queries directly. This mode is ideal for complex queries or users familiar with LogQL syntax.
|
||||
|
||||
For more information about Loki's query language, refer to the [Loki documentation](/docs/loki/latest/logql/).
|
||||
### Autocompletion
|
||||
|
||||
### Use autocompletion
|
||||
Autocompletion works automatically as you type. The editor can autocomplete:
|
||||
|
||||
Code mode's autocompletion feature works automatically while typing.
|
||||
- Static functions, aggregations, and keywords
|
||||
- Dynamic items like labels and label values
|
||||
|
||||
The query editor can autocomplete static functions, aggregations, and keywords, and also dynamic items like labels.
|
||||
The autocompletion dropdown includes documentation for the suggested items where available.
|
||||
The autocompletion dropdown includes documentation for suggested items where available.
|
||||
|
||||
## Options
|
||||
## Configure query options
|
||||
|
||||
The following options are the same for both **Builder** and **Code** mode:
|
||||
The following options are available in both Builder and Code mode. Expand the **Options** section to configure them.
|
||||
|
||||
- **Legend** - Controls the time series name, using a name or pattern. For example, `{{hostname}}` is replaced with the label value for the label `hostname`.
|
||||
| Option | Description |
|
||||
| -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Legend** | Controls the time series name using a name or pattern. For example, `{{hostname}}` is replaced with the label value for the label `hostname`. |
|
||||
| **Type** | Selects the query type. `instant` queries a single point in time (uses the "To" time from the time range). `range` queries over the selected time range. |
|
||||
| **Line limit** | Defines the maximum number of log lines returned by a query. Default is `1000`. |
|
||||
| **Direction** | Determines the search order. **Backward** searches from the end of the time range (default). **Forward** searches from the beginning. |
|
||||
| **Step** | Sets the step parameter for metric queries. Default is `$__auto`, calculated using the time range and graph width. |
|
||||
|
||||
- **Type** - Selects the query type to run. The `instant` type queries against a single point in time. We use the "To" time from the time range. The `range` type queries over the selected range of time.
|
||||
### Query stats
|
||||
|
||||
- **Line limit** -Defines the upper limit for the number of log lines returned by a query. The default is `1000`
|
||||
The Options section displays query statistics to help you estimate the size and cost of your query before running it. Stats include:
|
||||
|
||||
- **Direction** - Determines the search order. **Backward** is a backward search starting at the end of the time range. **Forward** is a forward search starting at the beginning of the time range. The default is **Backward**
|
||||
- **Streams** - Number of log streams matching your label selectors
|
||||
- **Chunks** - Number of data chunks to be scanned
|
||||
- **Bytes** - Estimated data size
|
||||
- **Entries** - Estimated number of log entries
|
||||
|
||||
- **Step** Sets the step parameter of Loki metrics queries. The default value equals to the value of `$__auto` variable, which is calculated using the time range and the width of the graph (the number of pixels).
|
||||
These statistics update automatically as you build your query and can help you optimize queries to reduce load on your Loki instance.
|
||||
|
||||
## Run a query
|
||||
|
||||
To execute your query, click **Run queries** at the top of the query editor. The results display in the visualization panel below the editor.
|
||||
|
||||
In Explore, you can also press `Shift+Enter` to run the query.
|
||||
|
||||
## Create a log query
|
||||
|
||||
Loki log queries return the contents of the log lines.
|
||||
You can query and display log data from Loki via [Explore](ref:explore), and with the [Logs panel](ref:logs) in dashboards.
|
||||
Log queries return the contents of log lines. These are the most common type of Loki query.
|
||||
|
||||
To display the results of a log query, select the Loki data source, then enter a LogQL query.
|
||||
To create a log query:
|
||||
|
||||
For more information about log queries and LogQL, refer to the [Loki log queries documentation](/docs/loki/latest/logql/log_queries/).
|
||||
1. Select labels to filter your log streams.
|
||||
1. Optionally add line filters to search for specific text patterns.
|
||||
1. Optionally add parsers (like `json` or `logfmt`) to extract fields from log lines.
|
||||
1. Click **Run queries** to execute the query.
|
||||
|
||||
For more information about log queries and LogQL, refer to the [Loki log queries documentation](https://grafana.com/docs/loki/latest/logql/log_queries/).
|
||||
|
||||
### Show log context
|
||||
|
||||
In Explore, you can can retrieve the context surrounding your log results by clicking the `Show Context` button. You'll be able to investigate the logs from the same log stream that came before and after the log message you're interested in.
|
||||
In Explore, click **Show Context** on any log line to view the surrounding logs from the same log stream.
|
||||
|
||||
The initial log context query is created from all labels defining the stream for the selected log line. You can use the log context query editor to widen the search by removing one or more of the label filters from log stream. Additionally, if you used a parser in your original query, you can refine your search by using extracted labels filters.
|
||||
The initial context query uses all labels from the selected log line. You can widen the search by removing label filters in the log context query editor. If your original query used a parser, you can also refine the search using extracted label filters.
|
||||
|
||||
To reduce the repetition of selecting and removing the same labels when examining multiple log context windows, Grafana stores your selected labels and applies them to each open context window. This lets you seamlessly navigate through various log context windows without having to reapply your filters.
|
||||
Grafana stores your label selections and applies them to each context window you open, so you don't need to reapply filters when examining multiple log lines.
|
||||
|
||||
To reset filters and use the initial log context query, click the `Revert to initial query` button next to the query preview.
|
||||
|
||||
### Tail live logs
|
||||
|
||||
Loki supports live tailing of logs in real-time in [Explore](ref:explore).
|
||||
|
||||
Live tailing relies on two Websocket connections: one between the browser and Grafana server, and another between the Grafana server and Loki server.
|
||||
|
||||
To start tailing logs click the **Live** button in the top right corner of the Explore view.
|
||||
{{< figure src="/static/img/docs/v95/loki_tailing.png" class="docs-image--no-shadow" max-width="80px" >}}
|
||||
|
||||
#### Proxying examples
|
||||
|
||||
If you use reverse proxies, configure them accordingly to use live tailing:
|
||||
|
||||
**Using Apache2 for proxying between the browser and the Grafana server:**
|
||||
|
||||
```
|
||||
ProxyPassMatch "^/(api/datasources/proxy/\d+/loki/api/v1/tail)" "ws://127.0.0.1:3000/$1"
|
||||
```
|
||||
|
||||
**Using NGINX:**
|
||||
|
||||
This example provides a basic NGINX proxy configuration.
|
||||
It assumes that the Grafana server is available at `http://localhost:3000/`, the Loki server is running locally without proxy, and your external site uses HTTPS.
|
||||
If you also host Loki behind an NGINX proxy, repeat the following configuration for Loki.
|
||||
|
||||
In the `http` section of NGINX configuration, add the following map definition:
|
||||
|
||||
```
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
```
|
||||
|
||||
In your `server` section, add the following configuration:
|
||||
|
||||
```
|
||||
location ~ /(api/datasources/proxy/\d+/loki/api/v1/tail) {
|
||||
proxy_pass http://localhost:3000$request_uri;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-for $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto "https";
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:3000/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-for $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto "https";
|
||||
}
|
||||
```
|
||||
To reset filters, click **Revert to initial query** next to the query preview.
|
||||
|
||||
## Create a metric query
|
||||
|
||||
You can use LogQL to wrap a log query with functions that create metrics from your logs.
|
||||
Metric queries use LogQL to extract numeric data from logs. You wrap a log query with aggregation functions to create time series data for visualization and alerting.
|
||||
|
||||
For more information about metric queries, refer to the [Loki metric queries documentation](/docs/loki/latest/logql/metric_queries/).
|
||||
### Common metric query patterns
|
||||
|
||||
## Apply annotations
|
||||
| Function | Description | Example |
|
||||
| ------------------- | ----------------------------------------------- | ---------------------------------------------------- |
|
||||
| `rate()` | Calculates the number of log entries per second | `rate({job="app"}[5m])` |
|
||||
| `count_over_time()` | Counts log entries over the specified interval | `count_over_time({job="app"}[1h])` |
|
||||
| `bytes_rate()` | Calculates bytes per second of log entries | `bytes_rate({job="app"}[5m])` |
|
||||
| `sum_over_time()` | Sums extracted numeric values | `sum_over_time({job="app"} \| unwrap duration [5m])` |
|
||||
|
||||
[Annotations](ref:annotate-visualizations) overlay rich event information on top of graphs.
|
||||
You can add annotation queries in the Dashboard menu's Annotations view.
|
||||
### Build a metric query
|
||||
|
||||
You can only use log queries as a source for annotations.
|
||||
Grafana automatically uses log content as annotation text and your log stream labels as tags.
|
||||
You don't need to create any additional mapping.
|
||||
To create a metric query in Builder mode:
|
||||
|
||||
1. Select labels to filter your log streams.
|
||||
1. Click **+ Operations** and select a range function (for example, **Rate**).
|
||||
1. The editor wraps your log selector with the function and adds a time interval.
|
||||
1. Optionally add aggregations like `sum`, `avg`, or `max` to combine results.
|
||||
|
||||
In Code mode, enter the full LogQL expression directly:
|
||||
|
||||
```logql
|
||||
sum(rate({job="app", level="error"}[5m])) by (instance)
|
||||
```
|
||||
|
||||
This query calculates the per-second rate of error logs, then sums the results grouped by instance.
|
||||
|
||||
For more information, refer to the [Loki metric queries documentation](https://grafana.com/docs/loki/latest/logql/metric_queries/).
|
||||
|
||||
## Tail live logs
|
||||
|
||||
Loki supports live tailing of logs in real-time in [Explore](ref:explore).
|
||||
|
||||
To start tailing logs, click the **Live** button in the top right corner of the Explore view.
|
||||
|
||||
{{< figure src="/static/img/docs/v95/loki_tailing.png" class="docs-image--no-shadow" max-width="80px" >}}
|
||||
|
||||
Live tailing relies on two WebSocket connections: one between the browser and Grafana server, and another between the Grafana server and Loki server.
|
||||
|
||||
If you use reverse proxies, you may need to configure them to support WebSocket connections. For proxy configuration examples, refer to the [Loki troubleshooting documentation](ref:loki-troubleshooting).
|
||||
|
||||
## Use template variables
|
||||
|
||||
You can use template variables in your queries to create dynamic, reusable dashboards. Template variables appear as dropdown menus at the top of dashboards, allowing users to change query parameters without editing the query directly.
|
||||
|
||||
For information on creating and using template variables with Loki, refer to [Loki template variables](ref:template-variables).
|
||||
|
||||
@@ -38,22 +38,32 @@ refs:
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/
|
||||
query-editor-options:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/query-editor/#options
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/query-editor/#options
|
||||
configure-loki:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
---
|
||||
|
||||
# Loki template variables
|
||||
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
|
||||
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
|
||||
Grafana refers to such variables as template variables.
|
||||
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables. Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard. Grafana refers to such variables as template variables.
|
||||
|
||||
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables) documentation.
|
||||
|
||||
## Before you begin
|
||||
|
||||
- Ensure you have [configured the Loki data source](ref:configure-loki).
|
||||
- Your Loki instance should have logs with labels that you want to use as variable values.
|
||||
|
||||
## Use query variables
|
||||
|
||||
Variables of the type _Query_ help you query Loki for lists of labels or label values.
|
||||
The Loki data source provides a form to select the type of values expected for a given variable.
|
||||
|
||||
The form has these options:
|
||||
Use _Query_ type variables to dynamically fetch label names or label values from Loki. When you create a query variable with the Loki data source, you can choose what type of data to retrieve:
|
||||
|
||||
| Query type | Example label | Example stream selector | List returned |
|
||||
| ------------ | ------------- | ----------------------- | ---------------------------------------------------------------- |
|
||||
@@ -61,6 +71,26 @@ The form has these options:
|
||||
| Label values | `label` | | Label values for `label`. |
|
||||
| Label values | `label` | `log stream selector` | Label values for `label` in the specified `log stream selector`. |
|
||||
|
||||
### Create a query variable
|
||||
|
||||
To create a query variable for Loki:
|
||||
|
||||
1. Open the dashboard where you want to add the variable.
|
||||
1. Click **Dashboard settings** (gear icon) in the top navigation.
|
||||
1. Select **Variables** in the left menu.
|
||||
1. Click **Add variable**.
|
||||
1. Enter a **Name** for your variable (for example, `job`, `instance`, `level`).
|
||||
1. In the **Type** dropdown, select **Query**.
|
||||
1. In the **Data source** dropdown, select your Loki data source.
|
||||
1. In the **Query type** dropdown, select **Label names** or **Label values**.
|
||||
1. If you selected **Label values**, enter the label name in the **Label** field (for example, `job`).
|
||||
1. Optionally, enter a **Stream selector** to filter the label values (for example, `{namespace="production"}`).
|
||||
1. Click **Run query** to preview the variable values.
|
||||
1. Configure display options such as **Multi-value** or **Include All option** as needed.
|
||||
1. Click **Apply** to save the variable.
|
||||
|
||||
You can now use the variable in your Loki queries with the syntax `${variable_name}`. For example, `{job="$job"}` filters logs by the selected job.
|
||||
|
||||
## Use ad hoc filters
|
||||
|
||||
Loki supports the special **Ad hoc filters** variable type.
|
||||
@@ -68,27 +98,29 @@ You can use this variable type to specify any number of key/value filters, and G
|
||||
|
||||
For more information, refer to [Add ad hoc filters](ref:add-template-variables-add-ad-hoc-filters).
|
||||
|
||||
## Use $\_\_auto variable for Loki metric queries
|
||||
## Use the $\_\_auto variable for Loki metric queries
|
||||
|
||||
Consider using the `$__auto` variable in your Loki metric queries, which will automatically be substituted with the [step value](https://grafana.com/docs/grafana/next/datasources/loki/query-editor/#options) for range queries, and with the selected time range's value (computed from the starting and ending times) for instant queries.
|
||||
Consider using the `$__auto` variable in your Loki metric queries. This variable is automatically substituted with the [step value](ref:query-editor-options) for range queries, and with the selected time range's value (computed from the starting and ending times) for instant queries.
|
||||
|
||||
For more information about variables, refer to [Global built-in variables](ref:add-template-variables-global-variables).
|
||||
|
||||
## Label extraction and indexing in Loki
|
||||
## Extract and index labels in Loki
|
||||
|
||||
Labels play a fundamental role in Loki's log aggregation and querying capabilities. When logs are ingested into Loki, they are often accompanied by metadata called `labels`, which provide contextual information about the log entries. These labels consist of `key-value` pairs and are essential for organizing, filtering, and searching log data efficiently.
|
||||
|
||||
### Label extraction
|
||||
### Extract labels
|
||||
|
||||
During the ingestion process, Loki performs label extraction from log lines. Loki's approach to label extraction is based on `regular expressions`, allowing users to specify custom patterns for parsing log lines and extracting relevant label key-value pairs. This flexibility enables Loki to adapt to various log formats and schemas.
|
||||
|
||||
For example, suppose you have log lines in the following format:
|
||||
|
||||
**2023-07-25 12:34:56 INFO: Request from IP A.B.C.D to endpoint /api/data**
|
||||
```
|
||||
2023-07-25 12:34:56 INFO: Request from IP A.B.C.D to endpoint /api/data
|
||||
```
|
||||
|
||||
To extract labels from this log format, you could define a regular expression to extract the log level ("INFO"), IP address ("A.B.C.D"), and endpoint ("/api/data") as labels. These labels can later be used to filter and aggregate log entries.
|
||||
To extract labels from this log format, you could define a regular expression to extract the log level (`INFO`), IP address (`A.B.C.D`), and endpoint (`/api/data`) as labels. These labels can later be used to filter and aggregate log entries.
|
||||
|
||||
### Indexing labels
|
||||
### Index labels
|
||||
|
||||
Once labels are extracted, Loki efficiently indexes them. The index serves as a lookup mechanism that maps labels to the corresponding log entries. This indexing process enables faster retrieval of logs based on specific label criteria, significantly enhancing query performance.
|
||||
|
||||
@@ -96,6 +128,4 @@ For instance, if you have a label "job" that represents different services in yo
|
||||
|
||||
By effectively extracting and indexing labels, Loki enables users to perform complex and targeted log queries without compromising on query speed and resource consumption.
|
||||
|
||||
Utilizing Loki's indexed labels in combination with Grafana's template variables provides a powerful way to interactively explore and visualize log data. Template variables allow users to create dynamic queries, selecting and filtering logs based on various labels, such as job names, instance IDs, severity levels, or any other contextual information attached to the log entries.
|
||||
|
||||
In conclusion, Loki's label extraction and indexing mechanisms are key components that contribute to its ability to handle vast amounts of log data efficiently. By making use of labels and template variables, users can easily gain valuable insights from their log data and troubleshoot issues effectively.
|
||||
Combining Loki's indexed labels with Grafana template variables provides a powerful way to interactively explore and visualize log data. Template variables let you create dynamic queries that filter logs based on labels such as job names, instance IDs, or severity levels.
|
||||
|
||||
387
docs/sources/datasources/loki/troubleshooting/index.md
Normal file
387
docs/sources/datasources/loki/troubleshooting/index.md
Normal file
@@ -0,0 +1,387 @@
|
||||
---
|
||||
aliases:
|
||||
- ../../data-sources/loki/troubleshooting/
|
||||
description: Troubleshoot issues with the Loki data source in Grafana
|
||||
keywords:
|
||||
- grafana
|
||||
- loki
|
||||
- troubleshooting
|
||||
- errors
|
||||
- logs
|
||||
labels:
|
||||
products:
|
||||
- cloud
|
||||
- enterprise
|
||||
- oss
|
||||
menuTitle: Troubleshooting
|
||||
title: Troubleshoot issues with the Loki data source
|
||||
weight: 600
|
||||
refs:
|
||||
configure-loki:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/loki/configure/
|
||||
private-data-source-connect:
|
||||
- pattern: /docs/grafana/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
- pattern: /docs/grafana-cloud/
|
||||
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
|
||||
---
|
||||
|
||||
# Troubleshoot issues with the Loki data source
|
||||
|
||||
This document provides troubleshooting information for common errors you may encounter when using the Loki data source in Grafana.
|
||||
|
||||
## Connection errors
|
||||
|
||||
The following errors occur when Grafana cannot establish or maintain a connection to Loki.
|
||||
|
||||
### Unable to connect with Loki
|
||||
|
||||
**Error message:** "Unable to connect with Loki. Please check the server logs for more details."
|
||||
|
||||
**Cause:** Grafana cannot establish a network connection to the Loki server.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the Loki URL is correct in the [data source configuration](ref:configure-loki).
|
||||
1. Check that Loki is running and accessible from the Grafana server.
|
||||
1. Ensure no firewall rules are blocking the connection.
|
||||
1. If using a proxy, verify the proxy settings are correct.
|
||||
1. For Grafana Cloud, ensure you have configured [Private data source connect](ref:private-data-source-connect) if your Loki instance is not publicly accessible.
|
||||
|
||||
### Request timed out
|
||||
|
||||
**Error message:** "context deadline exceeded" or "request timed out"
|
||||
|
||||
**Cause:** The connection to Loki timed out before receiving a response.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the network latency between Grafana and Loki.
|
||||
1. Verify Loki is not overloaded or experiencing performance issues.
|
||||
1. Increase the **Timeout** setting in the data source configuration under **Additional settings** > **Advanced HTTP settings**.
|
||||
1. Check if any network devices (load balancers, proxies) are timing out the connection.
|
||||
1. Reduce the time range or complexity of your query.
|
||||
|
||||
### Failed to parse data source URL
|
||||
|
||||
**Error message:** "Failed to parse data source URL"
|
||||
|
||||
**Cause:** The URL entered in the data source configuration is not valid.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the URL format is correct (for example, `http://localhost:3100` or `https://loki.example.com:3100`).
|
||||
1. Ensure the URL includes the protocol (`http://` or `https://`).
|
||||
1. Remove any trailing slashes or invalid characters from the URL.
|
||||
|
||||
## Authentication errors
|
||||
|
||||
The following errors occur when there are issues with authentication credentials or permissions.
|
||||
|
||||
### Unauthorized (401)
|
||||
|
||||
**Error message:** "Status: 401 Unauthorized"
|
||||
|
||||
**Cause:** The authentication credentials are invalid or missing.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the username and password are correct in the data source configuration.
|
||||
1. Check the authentication method matches your Loki configuration.
|
||||
1. If using a bearer token or API key, ensure it is valid and has not expired.
|
||||
1. Verify the credentials have permission to access the Loki API.
|
||||
|
||||
### Forbidden (403)
|
||||
|
||||
**Error message:** "Status: 403 Forbidden"
|
||||
|
||||
**Cause:** The authenticated user does not have permission to access the requested resource.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the user has read access to the log streams you are querying.
|
||||
1. Check Loki's authentication and authorization configuration.
|
||||
1. If using multi-tenancy, ensure the correct tenant ID (X-Scope-OrgID header) is configured.
|
||||
1. Review any access control policies in your Loki deployment.
|
||||
|
||||
## Query errors
|
||||
|
||||
The following errors occur when there are issues with LogQL query syntax or execution.
|
||||
|
||||
### Parse error
|
||||
|
||||
**Error message:** "parse error" or "syntax error"
|
||||
|
||||
**Cause:** The LogQL query contains invalid syntax.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check the query for typos or missing characters.
|
||||
1. Verify all brackets, braces, and parentheses are properly balanced.
|
||||
1. Ensure label matchers use the correct operators (`=`, `!=`, `=~`, `!~`).
|
||||
1. Verify string values are enclosed in double quotes.
|
||||
1. Refer to the [LogQL documentation](https://grafana.com/docs/loki/latest/query/) for correct syntax.
|
||||
|
||||
**Common syntax issues:**
|
||||
|
||||
| Issue | Incorrect | Correct |
|
||||
| ----------------- | -------------- | -------------- |
|
||||
| Missing quotes | `{job=app}` | `{job="app"}` |
|
||||
| Wrong operator | `{job=="app"}` | `{job="app"}` |
|
||||
| Unbalanced braces | `{job="app"` | `{job="app"}` |
|
||||
| Invalid regex | `{job=~"["}` | `{job=~"\\["}` |
|
||||
|
||||
### Query limits exceeded
|
||||
|
||||
**Error message:** "query returned more than the max number of entries" or "max entries limit exceeded"
|
||||
|
||||
**Cause:** The query returned more log entries than the configured limit allows.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Add more specific label selectors to reduce the number of matching streams.
|
||||
1. Add line filters to narrow down the results (for example, `|= "error"`).
|
||||
1. Reduce the time range of your query.
|
||||
1. Increase the **Maximum lines** setting in the data source configuration.
|
||||
1. If you control the Loki instance, consider adjusting Loki's `max_entries_limit_per_query` setting.
|
||||
|
||||
### Query timeout
|
||||
|
||||
**Error message:** "query timed out"
|
||||
|
||||
**Cause:** The query took longer to execute than the configured timeout.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Simplify the query by adding more selective label matchers.
|
||||
1. Reduce the time range.
|
||||
1. Avoid expensive operations like complex regex patterns on high-cardinality data.
|
||||
1. If you control the Loki instance, check Loki's query timeout settings.
|
||||
|
||||
### Too many outstanding requests
|
||||
|
||||
**Error message:** "too many outstanding requests"
|
||||
|
||||
**Cause:** Loki has reached its limit for concurrent queries.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Wait a moment and retry the query.
|
||||
1. Reduce the number of panels or dashboards querying Loki simultaneously.
|
||||
1. If you control the Loki instance, consider increasing Loki's concurrency limits.
|
||||
|
||||
## Metric query errors
|
||||
|
||||
The following errors occur when using LogQL metric queries.
|
||||
|
||||
### Invalid unwrap expression
|
||||
|
||||
**Error message:** "invalid unwrap expression" or "unwrap: label does not exist"
|
||||
|
||||
**Cause:** The `unwrap` function references a label that doesn't exist or isn't numeric.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the label name in the `unwrap` expression exists in your log data.
|
||||
1. Ensure the label contains numeric values.
|
||||
1. Add a parser stage (`| logfmt`, `| json`, etc.) before `unwrap` to extract the label from log content.
|
||||
|
||||
**Example fix:**
|
||||
|
||||
```logql
|
||||
# Incorrect - label might not exist
|
||||
{job="app"} | unwrap latency
|
||||
|
||||
# Correct - parse the log first
|
||||
{job="app"} | logfmt | unwrap latency
|
||||
```
|
||||
|
||||
### Division by zero
|
||||
|
||||
**Error message:** "division by zero"
|
||||
|
||||
**Cause:** A metric query attempted to divide by zero.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Add conditions to handle cases where the denominator could be zero.
|
||||
1. Use the `or` operator to provide a default value.
|
||||
|
||||
## Common issues
|
||||
|
||||
The following issues don't always produce specific error messages but are commonly encountered.
|
||||
|
||||
### Empty query results
|
||||
|
||||
**Cause:** The query returns no data.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the time range includes data in your Loki instance.
|
||||
1. Check that the label selectors match existing log streams.
|
||||
1. Use the **Label browser** in the query editor to see available labels and values.
|
||||
1. Start with a simple query like `{job="your-job"}` and add filters incrementally.
|
||||
1. Verify logs are being ingested into Loki for the selected time range.
|
||||
|
||||
### Slow query performance
|
||||
|
||||
**Cause:** Queries take a long time to execute.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Add more specific label selectors. Labels are indexed, so filtering by labels is fast.
|
||||
1. Reduce the time range of your query.
|
||||
1. Avoid regex filters on high-volume streams when possible.
|
||||
1. Use line filters (`|=`, `!=`) before expensive regex operations.
|
||||
1. For metric queries, ensure you're using appropriate aggregation intervals.
|
||||
|
||||
**Query optimization tips:**
|
||||
|
||||
| Slow | Fast |
|
||||
| ----------------------------------------- | ------------------------------------------------- |
|
||||
| `{namespace="prod"} \|~ "error.*timeout"` | `{namespace="prod", level="error"} \|= "timeout"` |
|
||||
| `{job=~".+"}` (matches all) | `{job="specific-job"}` |
|
||||
| Wide time range, no filters | Narrow time range with label filters |
|
||||
|
||||
### Labels not appearing in dropdown
|
||||
|
||||
**Cause:** The label browser doesn't show expected labels.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check that logs with those labels exist in the selected time range.
|
||||
1. Verify the labels are indexed in Loki (not just parsed from log content).
|
||||
1. Refresh the label browser by clicking the refresh button.
|
||||
1. Clear your browser cache and reload the page.
|
||||
|
||||
### Log lines truncated
|
||||
|
||||
**Cause:** Long log lines are cut off in the display.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Click on a log line to expand and view the full content.
|
||||
1. Use the **Wrap lines** option in the logs visualization settings.
|
||||
1. The full log content is always available; only the display is truncated.
|
||||
|
||||
### Derived fields not working
|
||||
|
||||
**Cause:** Derived fields configured in the data source aren't appearing in log details.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the regex pattern in your derived field configuration matches your log format.
|
||||
1. Test the regex in the **Debug** section of the derived fields configuration.
|
||||
1. Ensure the derived field has a valid URL or internal data source configured.
|
||||
1. Check that the log lines contain text matching the regex pattern.
|
||||
|
||||
## Live tailing issues
|
||||
|
||||
The following issues occur when using the live log tailing feature.
|
||||
|
||||
### Live tailing not working
|
||||
|
||||
**Cause:** Live tailing relies on WebSocket connections that may be blocked by proxies or firewalls.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify WebSocket connections are allowed through your network infrastructure.
|
||||
1. Check that your reverse proxy is configured to support WebSocket connections.
|
||||
1. Ensure the Grafana server can establish a WebSocket connection to Loki.
|
||||
|
||||
### Configure reverse proxies for live tailing
|
||||
|
||||
If you use reverse proxies, configure them to support WebSocket connections for live tailing.
|
||||
|
||||
**Apache2 configuration:**
|
||||
|
||||
Add the following to proxy WebSocket connections:
|
||||
|
||||
```apache
|
||||
ProxyPassMatch "^/(api/datasources/proxy/\d+/loki/api/v1/tail)" "ws://127.0.0.1:3000/$1"
|
||||
```
|
||||
|
||||
**NGINX configuration:**
|
||||
|
||||
This example assumes the Grafana server is available at `http://localhost:3000/`, the Loki server is running locally without a proxy, and your external site uses HTTPS. If you also host Loki behind NGINX, repeat this configuration for Loki.
|
||||
|
||||
In the `http` section of your NGINX configuration, add the following map definition:
|
||||
|
||||
```nginx
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
```
|
||||
|
||||
In your `server` section, add the following configuration:
|
||||
|
||||
```nginx
|
||||
location ~ /(api/datasources/proxy/\d+/loki/api/v1/tail) {
|
||||
proxy_pass http://localhost:3000$request_uri;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-for $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto "https";
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:3000/;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-for $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto "https";
|
||||
}
|
||||
```
|
||||
|
||||
## Multi-tenancy issues
|
||||
|
||||
The following errors occur when using Loki in multi-tenant mode.
|
||||
|
||||
### No org id
|
||||
|
||||
**Error message:** "no org id" or "X-Scope-OrgID header required"
|
||||
|
||||
**Cause:** Loki is configured for multi-tenancy but no tenant ID was provided.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Add a custom HTTP header `X-Scope-OrgID` with your tenant ID in the data source configuration.
|
||||
1. Navigate to **Additional settings** > **HTTP headers** and add the header.
|
||||
|
||||
### Tenant not found
|
||||
|
||||
**Error message:** "tenant not found" or "invalid tenant"
|
||||
|
||||
**Cause:** The specified tenant ID doesn't exist or the user doesn't have access.
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify the tenant ID is correct.
|
||||
1. Check that the tenant exists in your Loki deployment.
|
||||
1. Verify the user has permission to access the specified tenant.
|
||||
|
||||
## Get additional help
|
||||
|
||||
If you continue to experience issues:
|
||||
|
||||
- Check the [Grafana community forums](https://community.grafana.com/) for similar issues and solutions.
|
||||
- Review the [Loki documentation](https://grafana.com/docs/loki/latest/) for detailed configuration and query guidance.
|
||||
- Contact Grafana Support if you're an Enterprise, Cloud Pro, or Cloud contracted customer.
|
||||
|
||||
When reporting issues, include the following information:
|
||||
|
||||
- Grafana version
|
||||
- Loki version
|
||||
- Deployment type (self-hosted Loki, Grafana Cloud Logs)
|
||||
- Error messages (redact sensitive information)
|
||||
- Steps to reproduce the issue
|
||||
- Relevant configuration such as data source settings, authentication method, and timeout values (redact credentials)
|
||||
- Sample LogQL query (if applicable, with sensitive data redacted)
|
||||
- Time range of the query
|
||||
- Approximate volume of logs being queried
|
||||
@@ -124,6 +124,8 @@ For more information about dashboard permissions, refer to [Dashboard permission
|
||||
## Restore deleted dashboards
|
||||
|
||||
{{% admonition type="caution" %}}
|
||||
Restoring deleted dashboards is currently in private preview. Grafana Labs offers support on a best-effort basis, and breaking changes might occur prior to the feature being made generally available.
|
||||
|
||||
The feature is only available in Grafana Cloud.
|
||||
{{% /admonition %}}
|
||||
|
||||
|
||||
33
go.mod
33
go.mod
@@ -32,13 +32,14 @@ require (
|
||||
github.com/apache/arrow-go/v18 v18.4.1 // @grafana/plugins-platform-backend
|
||||
github.com/armon/go-radix v1.0.0 // @grafana/grafana-app-platform-squad
|
||||
github.com/aws/aws-sdk-go v1.55.7 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.45.3 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.51.0 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2/service/ec2 v1.225.2 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2/service/oam v1.18.3 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 // @grafana/aws-datasources
|
||||
github.com/aws/smithy-go v1.23.1 // @grafana/aws-datasources
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.40.1 // @grafana/grafana-operator-experience-squad
|
||||
github.com/aws/smithy-go v1.23.2 // @grafana/aws-datasources
|
||||
github.com/beevik/etree v1.4.1 // @grafana/grafana-backend-group
|
||||
github.com/benbjohnson/clock v1.3.5 // @grafana/alerting-backend
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect; @grafana/grafana-developer-enablement-squad
|
||||
@@ -99,7 +100,7 @@ require (
|
||||
github.com/grafana/grafana-api-golang-client v0.27.0 // @grafana/alerting-backend
|
||||
github.com/grafana/grafana-app-sdk v0.48.7 // @grafana/grafana-app-platform-squad
|
||||
github.com/grafana/grafana-app-sdk/logging v0.48.7 // @grafana/grafana-app-platform-squad
|
||||
github.com/grafana/grafana-aws-sdk v1.3.0 // @grafana/aws-datasources
|
||||
github.com/grafana/grafana-aws-sdk v1.4.2 // @grafana/aws-datasources
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1 // @grafana/partner-datasources
|
||||
github.com/grafana/grafana-cloud-migration-snapshot v1.9.0 // @grafana/grafana-operator-experience-squad
|
||||
github.com/grafana/grafana-google-sdk-go v0.4.2 // @grafana/partner-datasources
|
||||
@@ -146,6 +147,7 @@ require (
|
||||
github.com/olekukonko/tablewriter v0.0.5 // @grafana/grafana-backend-group
|
||||
github.com/open-feature/go-sdk v1.16.0 // @grafana/grafana-backend-group
|
||||
github.com/open-feature/go-sdk-contrib/providers/go-feature-flag v0.2.6 // @grafana/grafana-backend-group
|
||||
github.com/open-feature/go-sdk-contrib/providers/ofrep v0.1.6 // @grafana/grafana-backend-group
|
||||
github.com/openfga/api/proto v0.0.0-20250909172242-b4b2a12f5c67 // @grafana/identity-access-team
|
||||
github.com/openfga/language/pkg/go v0.2.0-beta.2.0.20251027165255-0f8f255e5f6c // @grafana/identity-access-team
|
||||
github.com/openfga/openfga v1.11.1 // @grafana/identity-access-team
|
||||
@@ -340,23 +342,23 @@ require (
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/at-wat/mqtt-go v0.19.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/kms v1.41.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // indirect
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27 // indirect
|
||||
github.com/bahlo/generic-list-go v0.2.0 // indirect
|
||||
github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df // indirect
|
||||
@@ -454,7 +456,6 @@ require (
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/grafana/jsonparser v0.0.0-20240425183733-ea80629e1a32 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||
github.com/grafana/sqlds/v4 v4.2.7 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20191002090509-6af20e3a5340 // indirect
|
||||
github.com/hashicorp/consul/api v1.31.2 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
@@ -543,7 +544,6 @@ require (
|
||||
github.com/oklog/run v1.1.0 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/oklog/ulid/v2 v2.1.1 // indirect
|
||||
github.com/open-feature/go-sdk-contrib/providers/ofrep v0.1.6 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.124.1 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/core/xidutils v0.124.1 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.124.1 // indirect
|
||||
@@ -682,6 +682,7 @@ require (
|
||||
github.com/go-openapi/swag/typeutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
|
||||
github.com/gophercloud/gophercloud/v2 v2.9.0 // indirect
|
||||
github.com/grafana/sqlds/v5 v5.0.3 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
|
||||
github.com/magiconair/properties v1.8.10 // indirect
|
||||
github.com/moby/go-archive v0.1.0 // indirect
|
||||
|
||||
62
go.sum
62
go.sum
@@ -850,24 +850,24 @@ github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2z
|
||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
|
||||
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 h1:fWZhGAwVRK/fAN2tmt7ilH4PPAE11rDj7HytrmbZ2FE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc=
|
||||
github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11/go.mod h1:dd+Lkp6YmMryke+qxW/VnKyhMBDTYP41Q2Bb+6gNZgY=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10 h1:7LllDZAegXU3yk41mwM6KcPu0wmjKGQB1bg99bNdQm4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10/go.mod h1:Ge6gzXPjqu4v0oHvgAwvGzYcK921GU0hQM25WF/Kl+8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 h1:TxkI7QI+sFkTItN/6cJuMZEIVMFXeu2dI1ZffkXngKI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14/go.mod h1:12x4Uw/vijC11XkctTjy92TNCQ+UnNJkT7fzX0Yd93E=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 h1:gLD09eaJUdiszm7vd1btiQUYE0Hj+0I2b8AS+75z9AY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8/go.mod h1:4RW3oMPt1POR74qVOC4SbubxAwdP4pCT0nSw3jycOU4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.17 h1:QFl8lL6RgakNK86vusim14P2k8BFSxjvUkcWLDjgz9Y=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.17/go.mod h1:V8P7ILjp/Uef/aX8TjGk6OHZN6IKPM5YW6S78QnRD5c=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21 h1:56HGpsgnmD+2/KpG0ikvvR8+3v3COCwaF4r+oWwOeNA=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21/go.mod h1:3YELwedmQbw7cXNaII2Wywd+YY58AmLPwX4LzARgmmA=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 h1:cTXRdLkpBanlDwISl+5chq5ui1d1YWg4PWMR9c3kXyw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84/go.mod h1:kwSy5X7tfIHN39uucmjQVs2LvDdXEjQucgQQEqCggEo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 h1:6bgAZgRyT4RoFWhxS+aoGMFyE0cD1bSzFnEEi4bFPGI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 h1:HhJYoES3zOz34yWEpGENqJvRVPqpmJyR3+AFg9ybhdY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 h1:GMYy2EOWfzdP3wfVAGXBNKY5vK4K8vMET4sYOYltmqs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36/go.mod h1:gDhdAV6wL3PmPqBhiPbnlS447GoWs8HTTOYef9/9Inw=
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.45.3 h1:Nn3qce+OHZuMj/edx4its32uxedAmquCDxtZkrdeiD4=
|
||||
@@ -876,12 +876,12 @@ github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.51.0 h1:e5cbPZYTIY2nUEFie
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.51.0/go.mod h1:UseIHRfrm7PqeZo6fcTb6FUCXzCnh1KJbQbmOfxArGM=
|
||||
github.com/aws/aws-sdk-go-v2/service/ec2 v1.225.2 h1:IfMb3Ar8xEaWjgH/zeVHYD8izwJdQgRP5mKCTDt4GNk=
|
||||
github.com/aws/aws-sdk-go-v2/service/ec2 v1.225.2/go.mod h1:35jGWx7ECvCwTsApqicFYzZ7JFEnBc6oHUuOQ3xIS54=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 h1:nAP2GYbfh8dd2zGZqFRSMlq+/F6cMPBUuCsGAMkN074=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4/go.mod h1:LT10DsiGjLWh4GbjInf9LQejkYEhBgBCjLG5+lvk4EE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 h1:M6JI2aGFEzYxsF6CXIuRBnkge9Wf9a2xU39rNeXgu10=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8/go.mod h1:Fw+MyTwlwjFsSTE31mH211Np+CUslml8mzc0AFEG09s=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 h1:qcLWgdhq45sDM9na4cvXax9dyLitn8EYBRl8Ak4XtG4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17/go.mod h1:M+jkjBFZ2J6DJrjMv2+vkBbuht6kxJYtJiwoVgX4p4U=
|
||||
github.com/aws/aws-sdk-go-v2/service/kms v1.41.2 h1:zJeUxFP7+XP52u23vrp4zMcVhShTWbNO8dHV6xCSvFo=
|
||||
@@ -892,14 +892,16 @@ github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 h1:Pwbxovp
|
||||
github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6/go.mod h1:Z4xLt5mXspLKjBV92i165wAJ/3T6TIv4n7RtIS8pWV0=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.0 h1:0reDqfEN+tB+sozj2r92Bep8MEwBZgtAXTND1Kk9OXg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.0/go.mod h1:kUklwasNoCn5YpyAqC/97r6dzTA1SRKJfKq16SXeoDU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4 h1:FTdEN9dtWPB0EOURNtDPmwGp6GGvMqRJCAihkSl/1No=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4/go.mod h1:mYubxV9Ff42fZH4kexj43gFPhgc/LyC7KqvUKt1watc=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 h1:I7ghctfGXrscr7r1Ga/mDqSJKm7Fkpl5Mwq79Z+rZqU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0/go.mod h1:Zo9id81XP6jbayIFWNuDpA6lMBWhsVy+3ou2jLa4JnA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 h1:+LVB0xBqEgjQoqr9bGZbRzvg212B0f17JdflleJRNR4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5/go.mod h1:xoaxeqnnUaZjPjaICgIy5B+MHCSb/ZSOn4MvkFNOUA0=
|
||||
github.com/aws/smithy-go v1.23.1 h1:sLvcH6dfAFwGkHLZ7dGiYF7aK6mg4CgKA/iDKjLDt9M=
|
||||
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.40.1 h1:w6a0H79HrHf3lr+zrw+pSzR5B+caiQFAKiNHlrUcnoc=
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.40.1/go.mod h1:c6Vg0BRiU7v0MVhHupw90RyL120QBwAMLbDCzptGeMk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 h1:0JPwLz1J+5lEOfy/g0SURC9cxhbQ1lIMHMa+AHZSzz0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 h1:OWs0/j2UYR5LOGi88sD5/lhN6TDLG6SfA7CqsQO9zF0=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 h1:mLlUgHn02ue8whiR4BmxxGJLR2gwU6s6ZzJ5wDamBUs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk=
|
||||
github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
|
||||
github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20191112132149-a4c4c47bc57f/go.mod h1:2stgcRjl6QmW+gU2h5E7BQXg4HU0gzxKWDuT5HviN9s=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27 h1:60m4tnanN1ctzIu4V3bfCNJ39BiOPSm1gHFlFjTkRE0=
|
||||
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c=
|
||||
@@ -1649,8 +1651,8 @@ github.com/grafana/grafana-app-sdk v0.48.7 h1:9mF7nqkqP0QUYYDlznoOt+GIyjzj45wGfU
|
||||
github.com/grafana/grafana-app-sdk v0.48.7/go.mod h1:DWsaaH39ZMHwSOSoUBaeW8paMrRaYsjRYlLwCJYd78k=
|
||||
github.com/grafana/grafana-app-sdk/logging v0.48.7 h1:Oa5qg473gka5+W/WQk61Xbw4YdAv+wV2Z4bJtzeCaQw=
|
||||
github.com/grafana/grafana-app-sdk/logging v0.48.7/go.mod h1:5u3KalezoBAAo2Y3ytDYDAIIPvEqFLLDSxeiK99QxDU=
|
||||
github.com/grafana/grafana-aws-sdk v1.3.0 h1:/bfJzP93rCel1GbWoRSq0oUo424MZXt8jAp2BK9w8tM=
|
||||
github.com/grafana/grafana-aws-sdk v1.3.0/go.mod h1:VGycF0JkCGKND2O5je1ucOqPJ0ZNhZYzV3c2bNBAaGk=
|
||||
github.com/grafana/grafana-aws-sdk v1.4.2 h1:GrUEoLbs46r8rG/GZL4L2b63Bo+rkIYKdtCT7kT5KkM=
|
||||
github.com/grafana/grafana-aws-sdk v1.4.2/go.mod h1:1qnZdYs6gQzxxF0dDodaE7Rn9fiMzuhwvtaAZ7ySnhY=
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1 h1:FFcEA01tW+SmuJIuDbHOdgUBL+d7DPrZ2N4zwzPhfGk=
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1/go.mod h1:Oi4anANlCuTCc66jCyqIzfVbgLXFll8Wja+Y4vfANlc=
|
||||
github.com/grafana/grafana-cloud-migration-snapshot v1.9.0 h1:JOzchPgptwJdruYoed7x28lFDwhzs7kssResYsnC0iI=
|
||||
@@ -1689,8 +1691,8 @@ github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrR
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/grafana/saml v0.4.15-0.20240917091248-ae3bbdad8a56 h1:SDGrP81Vcd102L3UJEryRd1eestRw73wt+b8vnVEFe0=
|
||||
github.com/grafana/saml v0.4.15-0.20240917091248-ae3bbdad8a56/go.mod h1:S4+611dxnKt8z/ulbvaJzcgSHsuhjVc1QHNTcr1R7Fw=
|
||||
github.com/grafana/sqlds/v4 v4.2.7 h1:sFQhsS7DBakNMdxa++yOfJ9BVvkZwFJ0B95o57K0/XA=
|
||||
github.com/grafana/sqlds/v4 v4.2.7/go.mod h1:BQRjUG8rOqrBI4NAaeoWrIMuoNgfi8bdhCJ+5cgEfLU=
|
||||
github.com/grafana/sqlds/v5 v5.0.3 h1:+yUMUxfa0WANQsmS9xtTFSRX1Q55Iv1B9EjlrW4VlBU=
|
||||
github.com/grafana/sqlds/v5 v5.0.3/go.mod h1:GKeTTiC+GeR1X0z3f0Iee+hZnNgN62uQpj5XVMx5Uew=
|
||||
github.com/grafana/tempo v1.5.1-0.20250529124718-87c2dc380cec h1:wnzJov9RhSHGaTYGzTygL4qq986fLen8xSqnQgaMd28=
|
||||
github.com/grafana/tempo v1.5.1-0.20250529124718-87c2dc380cec/go.mod h1:j1IY7J2rUz7TcTjFVVx6HCpyTlYOJPtXuGRZ7sI+vSo=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
|
||||
|
||||
22
go.work.sum
22
go.work.sum
@@ -423,21 +423,31 @@ github.com/aws/aws-msk-iam-sasl-signer-go v1.0.1 h1:nMp7diZObd4XEVUR0pEvn7/E13JI
|
||||
github.com/aws/aws-msk-iam-sasl-signer-go v1.0.1/go.mod h1:MVYeeOhILFFemC/XlYTClvBjYZrg/EPd3ts885KrNTI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.5/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0=
|
||||
github.com/aws/aws-sdk-go-v2 v1.38.1/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.6/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.2/go.mod h1:17ft42Yb2lF6OigqSYiDAiUcX4RIkEMY6XxEMJsrAes=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10/go.mod h1:Ge6gzXPjqu4v0oHvgAwvGzYcK921GU0hQM25WF/Kl+8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.6/go.mod h1:/jdQkh1iVPa01xndfECInp1v1Wnp70v3K4MvtlLGVEc=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14/go.mod h1:12x4Uw/vijC11XkctTjy92TNCQ+UnNJkT7fzX0Yd93E=
|
||||
github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.19.5 h1:oUEqVqonG3xuarrsze1KVJ30KagNYDemikTbdu8KlN8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.19.5/go.mod h1:VNM08cHlOsIbSHRqb6D/M2L4kKXfJv3A2/f0GNbOQSc=
|
||||
github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.7.87 h1:oDPArGgCrG/4aTi86ij3S2PB59XXkTSKYVNQlmqRHXQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.7.87/go.mod h1:ZeQC4gVarhdcWeM1c90DyBLaBCNhEeAbKUXwVI/byvw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.4/go.mod h1:9xzb8/SV62W6gHQGC/8rrvgNXU6ZoYM3sAIJCIrXJxY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8/go.mod h1:4RW3oMPt1POR74qVOC4SbubxAwdP4pCT0nSw3jycOU4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69/go.mod h1:GJj8mmO6YT6EqgduWocwhMoxTLFitkhIrK+owzrYL2I=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36/go.mod h1:Q1lnJArKRXkenyog6+Y+zr7WDpk4e6XlR6gs20bbeNo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.4/go.mod h1:l4bdfCD7XyyZA9BolKBo1eLqgaJxl0/x91PL4Yqe0ao=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36/go.mod h1:UdyGa7Q91id/sdyHPwth+043HhmP6yP9MBHgbZM0xo8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.4/go.mod h1:yDmJgqOiH4EA8Hndnv4KwAo8jCGTSnM5ASG1nBI+toA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
|
||||
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.44.0 h1:A99gjqZDbdhjtjJVZrmVzVKO2+p3MSg35bDWtbMQVxw=
|
||||
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.44.0/go.mod h1:mWB0GE1bqcVSvpW7OtFA0sKuHk52+IqtnsYU2jUfYAs=
|
||||
@@ -445,11 +455,13 @@ github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.26.0 h1:0wOCTKrmwkyC8Bk7
|
||||
github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.26.0/go.mod h1:He/RikglWUczbkV+fkdpcV/3GdL/rTRNVy7VaUiezMo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4/go.mod h1:/xFi9KtvBXP97ppCz1TAEvU1Uf66qvid89rbem3wCzQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.17 h1:x187MqiHwBGjMGAed8Y8K1VGuCtFvQvXb24r+bwmSdo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.10.17/go.mod h1:mC9qMbA6e1pwEq6X3zDGtZRXMG2YaElJkbJlMVHLs5I=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17/go.mod h1:ygpklyoaypuyDvOM5ujWGrYWpAK3h7ugnmKCU/76Ys4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.4/go.mod h1:nLEfLnVMmLvyIG58/6gsSA03F1voKGaCfHV7+lR8S7s=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8/go.mod h1:Fw+MyTwlwjFsSTE31mH211Np+CUslml8mzc0AFEG09s=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
|
||||
github.com/aws/aws-sdk-go-v2/service/kinesis v1.33.0 h1:JPXkrQk5OS/+Q81fKH97Ll/Vmmy0p9vwHhxw+V+tVjg=
|
||||
github.com/aws/aws-sdk-go-v2/service/kinesis v1.33.0/go.mod h1:dJngkoVMrq0K7QvRkdRZYM4NUp6cdWa2GBdpm8zoY8U=
|
||||
@@ -483,14 +495,18 @@ github.com/aws/aws-sdk-go-v2/service/ssm v1.60.1 h1:OwMzNDe5VVTXD4kGmeK/FtqAITiV
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.60.1/go.mod h1:IyVabkWrs8SNdOEZLyFFcW9bUltV4G6OQS0s6H20PHg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.28.2/go.mod h1:n9bTZFZcBa9hGGqVz3i/a6+NG0zmZgtkB9qVVFDqPA8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4/go.mod h1:mYubxV9Ff42fZH4kexj43gFPhgc/LyC7KqvUKt1watc=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.2/go.mod h1:eknndR9rU8UpE/OmFpqU78V1EcXPKFTTm5l/buZYgvM=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0/go.mod h1:Zo9id81XP6jbayIFWNuDpA6lMBWhsVy+3ou2jLa4JnA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.0/go.mod h1:bEPcjW7IbolPfK67G1nilqWyoxYMSPrDiIQ3RdIdKgo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5/go.mod h1:xoaxeqnnUaZjPjaICgIy5B+MHCSb/ZSOn4MvkFNOUA0=
|
||||
github.com/aws/smithy-go v1.22.4/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw=
|
||||
github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/aws/smithy-go v1.23.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/awslabs/aws-lambda-go-api-proxy v0.16.2 h1:CJyGEyO1CIwOnXTU40urf0mchf6t3voxpvUDikOU9LY=
|
||||
github.com/awslabs/aws-lambda-go-api-proxy v0.16.2/go.mod h1:vxxjwBHe/KbgFeNlAP/Tvp4SsVRL3WQamcWRxqVh0z0=
|
||||
github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8=
|
||||
@@ -930,6 +946,7 @@ github.com/grafana/grafana-aws-sdk v1.0.2 h1:98eBuHYFmgvH0xO9kKf4RBsEsgQRp8EOA/9
|
||||
github.com/grafana/grafana-aws-sdk v1.0.2/go.mod h1:hO7q7yWV+t6dmiyJjMa3IbuYnYkBua+G/IAlOPVIYKE=
|
||||
github.com/grafana/grafana-aws-sdk v1.1.0/go.mod h1:7e+47EdHynteYWGoT5Ere9KeOXQObsk8F0vkOLQ1tz8=
|
||||
github.com/grafana/grafana-aws-sdk v1.2.0/go.mod h1:bBo7qOmM3f61vO+2JxTolNUph1l2TmtzmWcU9/Im+8A=
|
||||
github.com/grafana/grafana-aws-sdk v1.3.0/go.mod h1:VGycF0JkCGKND2O5je1ucOqPJ0ZNhZYzV3c2bNBAaGk=
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.1.6/go.mod h1:V7y2BmsWxS3A9Ohebwn4OiSfJJqi//4JQydQ8fHTduo=
|
||||
github.com/grafana/grafana-azure-sdk-go/v2 v2.2.0/go.mod h1:H9sVh9A4yg5egMGZeh0mifxT1Q/uqwKe1LBjBJU6pN8=
|
||||
github.com/grafana/grafana-plugin-sdk-go v0.263.0/go.mod h1:U43Cnrj/9DNYyvFcNdeUWNjMXTKNB0jcTcQGpWKd2gw=
|
||||
@@ -977,6 +994,7 @@ github.com/grafana/prometheus-alertmanager v0.25.1-0.20250604130045-92c8f6389b36
|
||||
github.com/grafana/prometheus-alertmanager v0.25.1-0.20250604130045-92c8f6389b36/go.mod h1:O/QP1BCm0HHIzbKvgMzqb5sSyH88rzkFk84F4TfJjBU=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU=
|
||||
github.com/grafana/sqlds/v4 v4.2.4/go.mod h1:BQRjUG8rOqrBI4NAaeoWrIMuoNgfi8bdhCJ+5cgEfLU=
|
||||
github.com/grafana/sqlds/v4 v4.2.7/go.mod h1:BQRjUG8rOqrBI4NAaeoWrIMuoNgfi8bdhCJ+5cgEfLU=
|
||||
github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 h1:bjh0PVYSVVFxzINqPFYJmAmJNrWPgnVjuSdYJGHmtFU=
|
||||
github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0/go.mod h1:7t5XR+2IA8P2qggOAHTj/GCZfoLBle3OvNSYh1VkRBU=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
|
||||
@@ -1836,6 +1854,7 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.4
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.62.0/go.mod h1:ru6KHrNtNHxM4nD/vd6QrLVWgKhxPYgblq4VAtNawTQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0/go.mod h1:CosX/aS4eHnG9D7nESYpV753l4j9q5j3SL/PUYd2lR8=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0/go.mod h1:HfvuU0kW9HewH14VCOLImqKvUgONodURG7Alj/IrnGI=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.62.0/go.mod h1:WfEApdZDMlLUAev/0QQpr8EJ/z0VWDKYZ5tF5RH5T1U=
|
||||
@@ -1942,6 +1961,7 @@ golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
@@ -1955,6 +1975,7 @@ golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5N
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ=
|
||||
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
|
||||
golang.org/x/exp v0.0.0-20250811191247-51f88131bc50/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM=
|
||||
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.25.0 h1:Y6uW6rH1y5y/LK1J8BPWZtr6yZ7hrsy6hFrXjgsc2fQ=
|
||||
@@ -2048,6 +2069,7 @@ golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
|
||||
@@ -58,14 +58,12 @@
|
||||
"bundle": "rollup -c rollup.config.ts --configPlugin esbuild",
|
||||
"clean": "rimraf ./dist ./compiled ./unstable ./testing ./package.tgz",
|
||||
"typecheck": "tsc --emitDeclarationOnly false --noEmit",
|
||||
"codegen": "rtk-query-codegen-openapi ./scripts/codegen.ts",
|
||||
"prepack": "cp package.json package.json.bak && node ../../scripts/prepare-npm-package.js",
|
||||
"postpack": "mv package.json.bak package.json",
|
||||
"i18n-extract": "i18next-cli extract --sync-primary"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@grafana/test-utils": "workspace:*",
|
||||
"@rtk-query/codegen-openapi": "^2.0.0",
|
||||
"@testing-library/jest-dom": "^6.6.3",
|
||||
"@testing-library/react": "^16.3.0",
|
||||
"@testing-library/user-event": "^14.6.1",
|
||||
@@ -96,6 +94,7 @@
|
||||
"dependencies": {
|
||||
"@emotion/css": "11.13.5",
|
||||
"@faker-js/faker": "^9.8.0",
|
||||
"@grafana/api-clients": "12.4.0-pre",
|
||||
"@grafana/i18n": "12.4.0-pre",
|
||||
"@reduxjs/toolkit": "^2.9.0",
|
||||
"fishery": "^2.3.1",
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# Re-generate the clients
|
||||
|
||||
⚠️ This guide assumes the Backend definitions have been updated in `apps/alerting`.
|
||||
|
||||
## Re-create OpenAPI specification
|
||||
|
||||
Start with re-generating the OpenAPI snapshots by running the test in `pkg/tests/apis/openapi_test.go`.
|
||||
|
||||
This will output the OpenAPI JSON spec file(s) in `pkg/tests/apis/openapi_snapshots`.
|
||||
|
||||
## Process OpenAPI specifications
|
||||
|
||||
Next up run the post-processing of the snapshots with `yarn run process-specs`, this will copy processed specifications to `./data/openapi/`.
|
||||
|
||||
## Generate RTKQ files
|
||||
|
||||
These files are built using the `yarn run codegen` command, make sure to run that in the Grafana Alerting package working directory.
|
||||
|
||||
`yarn --cwd ./packages/grafana-alerting run codegen`.
|
||||
|
||||
API clients will be written to `src/grafana/api/<version>/api.gen.ts`.
|
||||
|
||||
Make sure to create a versioned API client for each API version – see `src/grafana/api/v0alpha1/api.ts` as an example.
|
||||
@@ -1,55 +0,0 @@
|
||||
/**
|
||||
* This script will generate TypeScript type definitions and a RTKQ clients for the alerting k8s APIs.
|
||||
*
|
||||
* Run `yarn run codegen` from the "grafana-alerting" package to invoke this script.
|
||||
*
|
||||
* API clients will be placed in "src/grafana/api/<version>/api.gen.ts"
|
||||
*/
|
||||
import type { ConfigFile } from '@rtk-query/codegen-openapi';
|
||||
|
||||
// ℹ️ append API groups and versions here to generate additional API clients
|
||||
const SPECS = [
|
||||
['notifications.alerting.grafana.app', ['v0alpha1']],
|
||||
['rules.alerting.grafana.app', ['v0alpha1']],
|
||||
// keep this in Grafana Enterprise
|
||||
// ['alertenrichment.grafana.app', ['v1beta1']],
|
||||
] as const;
|
||||
|
||||
type OutputFile = Omit<ConfigFile, 'outputFile'>;
|
||||
type OutputFiles = Record<string, OutputFile>;
|
||||
|
||||
const outputFiles = SPECS.reduce<OutputFiles>((groupAcc, [group, versions]) => {
|
||||
return versions.reduce<OutputFiles>((versionAcc, version) => {
|
||||
// Create a unique export name based on the group
|
||||
const groupName = group.split('.')[0]; // e.g., 'notifications', 'rules', 'alertenrichment'
|
||||
const exportName = `${groupName}API`;
|
||||
|
||||
// ℹ️ these snapshots are generated by running "go test pkg/tests/apis/openapi_test.go" and "scripts/process-specs.ts",
|
||||
// see the README in the "openapi_snapshots" directory
|
||||
const schemaFile = `../../../data/openapi/${group}-${version}.json`;
|
||||
|
||||
// ℹ️ make sure there is a API file in each versioned directory
|
||||
const apiFile = `../src/grafana/api/${groupName}/${version}/api.ts`;
|
||||
|
||||
// output each api client into a versioned directory with group-specific naming
|
||||
const outputPath = `../src/grafana/api/${groupName}/${version}/${groupName}.api.gen.ts`;
|
||||
|
||||
versionAcc[outputPath] = {
|
||||
exportName,
|
||||
schemaFile,
|
||||
apiFile,
|
||||
tag: true, // generate tags for cache invalidation
|
||||
} satisfies OutputFile;
|
||||
|
||||
return versionAcc;
|
||||
}, groupAcc);
|
||||
}, {});
|
||||
|
||||
export default {
|
||||
// these are intentionally empty but will be set for each versioned config file
|
||||
exportName: '',
|
||||
schemaFile: '',
|
||||
apiFile: '',
|
||||
|
||||
outputFiles,
|
||||
} satisfies ConfigFile;
|
||||
@@ -1,18 +0,0 @@
|
||||
import { createApi, fetchBaseQuery } from '@reduxjs/toolkit/query/react';
|
||||
|
||||
import { getAPIBaseURL, getAPIReducerPath } from '../../util';
|
||||
|
||||
import { GROUP, VERSION } from './const';
|
||||
|
||||
const baseUrl = getAPIBaseURL(GROUP, VERSION);
|
||||
const reducerPath = getAPIReducerPath(GROUP, VERSION);
|
||||
|
||||
export const api = createApi({
|
||||
reducerPath,
|
||||
baseQuery: fetchBaseQuery({
|
||||
// Set URL correctly so MSW can intercept requests
|
||||
// https://mswjs.io/docs/runbook#rtk-query-requests-are-not-intercepted
|
||||
baseUrl: new URL(baseUrl, globalThis.location.origin).href,
|
||||
}),
|
||||
endpoints: () => ({}),
|
||||
});
|
||||
@@ -1,2 +0,0 @@
|
||||
export const VERSION = 'v0alpha1' as const;
|
||||
export const GROUP = 'notifications.alerting.grafana.app' as const;
|
||||
@@ -1,8 +1,9 @@
|
||||
import { faker } from '@faker-js/faker';
|
||||
import { Factory } from 'fishery';
|
||||
|
||||
import { API_GROUP, API_VERSION } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { DEFAULT_NAMESPACE, generateResourceVersion, generateTitle, generateUID } from '../../../../../mocks/util';
|
||||
import { GROUP, VERSION } from '../../const';
|
||||
import {
|
||||
ContactPoint,
|
||||
ContactPointMetadataAnnotations,
|
||||
@@ -14,7 +15,7 @@ import { AlertingEntityMetadataAnnotationsFactory } from './common';
|
||||
|
||||
export const ListReceiverApiResponseFactory = Factory.define<EnhancedListReceiverApiResponse>(() => ({
|
||||
kind: 'ReceiverList',
|
||||
apiVersion: `${GROUP}/${VERSION}`,
|
||||
apiVersion: `${API_GROUP}/${API_VERSION}`,
|
||||
metadata: {
|
||||
resourceVersion: generateResourceVersion(),
|
||||
},
|
||||
@@ -26,7 +27,7 @@ export const ContactPointFactory = Factory.define<ContactPoint>(() => {
|
||||
|
||||
return {
|
||||
kind: 'Receiver',
|
||||
apiVersion: `${GROUP}/${VERSION}`,
|
||||
apiVersion: `${API_GROUP}/${API_VERSION}`,
|
||||
metadata: {
|
||||
name: btoa(title),
|
||||
namespace: DEFAULT_NAMESPACE,
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import {
|
||||
API_GROUP,
|
||||
API_VERSION,
|
||||
CreateReceiverApiResponse,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { CreateReceiverApiResponse } from '../../../../v0alpha1/notifications.api.gen';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
|
||||
export function createReceiverHandler(
|
||||
data: CreateReceiverApiResponse | ((info: Parameters<Parameters<typeof http.post>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.post(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers'), function handler(info) {
|
||||
return http.post(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import {
|
||||
API_GROUP,
|
||||
API_VERSION,
|
||||
DeleteReceiverApiResponse,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { DeleteReceiverApiResponse } from '../../../../v0alpha1/notifications.api.gen';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
|
||||
export function deleteReceiverHandler(
|
||||
data: DeleteReceiverApiResponse | ((info: Parameters<Parameters<typeof http.delete>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.delete(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers/:name'), function handler(info) {
|
||||
return http.delete(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers/:name'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import {
|
||||
API_GROUP,
|
||||
API_VERSION,
|
||||
DeletecollectionReceiverApiResponse,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { DeletecollectionReceiverApiResponse } from '../../../../v0alpha1/notifications.api.gen';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
|
||||
export function deletecollectionReceiverHandler(
|
||||
data: DeletecollectionReceiverApiResponse | ((info: Parameters<Parameters<typeof http.delete>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.delete(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers'), function handler(info) {
|
||||
return http.delete(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import {
|
||||
API_GROUP,
|
||||
API_VERSION,
|
||||
GetReceiverApiResponse,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { GetReceiverApiResponse } from '../../../../v0alpha1/notifications.api.gen';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
|
||||
export function getReceiverHandler(
|
||||
data: GetReceiverApiResponse | ((info: Parameters<Parameters<typeof http.get>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.get(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers/:name'), function handler(info) {
|
||||
return http.get(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers/:name'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import { API_GROUP, API_VERSION } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
import { EnhancedListReceiverApiResponse } from '../../../types';
|
||||
|
||||
export function listReceiverHandler(
|
||||
data: EnhancedListReceiverApiResponse | ((info: Parameters<Parameters<typeof http.get>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.get(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers'), function handler(info) {
|
||||
return http.get(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import {
|
||||
API_GROUP,
|
||||
API_VERSION,
|
||||
ReplaceReceiverApiResponse,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { ReplaceReceiverApiResponse } from '../../../../v0alpha1/notifications.api.gen';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
|
||||
export function replaceReceiverHandler(
|
||||
data: ReplaceReceiverApiResponse | ((info: Parameters<Parameters<typeof http.put>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.put(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers/:name'), function handler(info) {
|
||||
return http.put(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers/:name'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
import { HttpResponse, http } from 'msw';
|
||||
|
||||
import {
|
||||
API_GROUP,
|
||||
API_VERSION,
|
||||
UpdateReceiverApiResponse,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { getAPIBaseURLForMocks } from '../../../../../../mocks/util';
|
||||
import { UpdateReceiverApiResponse } from '../../../../v0alpha1/notifications.api.gen';
|
||||
import { GROUP, VERSION } from '../../../const';
|
||||
|
||||
export function updateReceiverHandler(
|
||||
data: UpdateReceiverApiResponse | ((info: Parameters<Parameters<typeof http.patch>[1]>[0]) => Response)
|
||||
) {
|
||||
return http.patch(getAPIBaseURLForMocks(GROUP, VERSION, '/receivers/:name'), function handler(info) {
|
||||
return http.patch(getAPIBaseURLForMocks(API_GROUP, API_VERSION, '/receivers/:name'), function handler(info) {
|
||||
if (typeof data === 'function') {
|
||||
return data(info);
|
||||
}
|
||||
|
||||
@@ -3,7 +3,11 @@
|
||||
*/
|
||||
import { MergeDeep, MergeExclusive, OverrideProperties } from 'type-fest';
|
||||
|
||||
import type { ListReceiverApiResponse, Receiver, ReceiverIntegration } from './notifications.api.gen';
|
||||
import type {
|
||||
ListReceiverApiResponse,
|
||||
Receiver,
|
||||
ReceiverIntegration,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
type GenericIntegration = OverrideProperties<
|
||||
ReceiverIntegration,
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
import { createApi, fetchBaseQuery } from '@reduxjs/toolkit/query/react';
|
||||
|
||||
import { getAPIBaseURL, getAPIReducerPath } from '../../util';
|
||||
|
||||
import { GROUP, VERSION } from './const';
|
||||
|
||||
const baseUrl = getAPIBaseURL(GROUP, VERSION);
|
||||
const reducerPath = getAPIReducerPath(GROUP, VERSION);
|
||||
|
||||
export const api = createApi({
|
||||
reducerPath,
|
||||
baseQuery: fetchBaseQuery({
|
||||
// Set URL correctly so MSW can intercept requests
|
||||
// https://mswjs.io/docs/runbook#rtk-query-requests-are-not-intercepted
|
||||
baseUrl: new URL(baseUrl, globalThis.location.origin).href,
|
||||
}),
|
||||
endpoints: () => ({}),
|
||||
});
|
||||
@@ -1,2 +0,0 @@
|
||||
export const VERSION = 'v0alpha1' as const;
|
||||
export const GROUP = 'rules.alerting.grafana.app' as const;
|
||||
@@ -7,9 +7,10 @@ import { OverrideProperties } from 'type-fest';
|
||||
|
||||
import {
|
||||
CreateReceiverApiArg,
|
||||
type ListReceiverApiArg,
|
||||
notificationsAPI,
|
||||
} from '../../../api/notifications/v0alpha1/notifications.api.gen';
|
||||
ListReceiverApiArg,
|
||||
generatedAPI as notificationsAPIv0alpha1,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import type { ContactPoint, EnhancedListReceiverApiResponse } from '../../../api/notifications/v0alpha1/types';
|
||||
|
||||
// this is a workaround for the fact that the generated types are not narrow enough
|
||||
@@ -22,17 +23,17 @@ type ListContactPointsHookResult = TypedUseQueryHookResult<
|
||||
// Type for the options that can be passed to the hook
|
||||
// Based on the pattern used for mutation options in this file
|
||||
type ListContactPointsQueryArgs = Parameters<
|
||||
typeof notificationsAPI.endpoints.listReceiver.useQuery<ListContactPointsHookResult>
|
||||
typeof notificationsAPIv0alpha1.endpoints.listReceiver.useQuery<ListContactPointsHookResult>
|
||||
>[0];
|
||||
|
||||
type ListContactPointsQueryOptions = Parameters<
|
||||
typeof notificationsAPI.endpoints.listReceiver.useQuery<ListContactPointsHookResult>
|
||||
typeof notificationsAPIv0alpha1.endpoints.listReceiver.useQuery<ListContactPointsHookResult>
|
||||
>[1];
|
||||
|
||||
/**
|
||||
* useListContactPoints is a hook that fetches a list of contact points
|
||||
*
|
||||
* This function wraps the notificationsAPI.useListReceiverQuery with proper typing
|
||||
* This function wraps the notificationsAPIv0alpha1.useListReceiverQuery with proper typing
|
||||
* to ensure that the returned ContactPoints are correctly typed in the data.items array.
|
||||
*
|
||||
* It automatically uses the configured namespace for the query.
|
||||
@@ -43,8 +44,8 @@ type ListContactPointsQueryOptions = Parameters<
|
||||
export function useListContactPoints(
|
||||
queryArgs: ListContactPointsQueryArgs = {},
|
||||
queryOptions: ListContactPointsQueryOptions = {}
|
||||
) {
|
||||
return notificationsAPI.useListReceiverQuery<ListContactPointsHookResult>(queryArgs, queryOptions);
|
||||
): ListContactPointsHookResult {
|
||||
return notificationsAPIv0alpha1.useListReceiverQuery<ListContactPointsHookResult>(queryArgs, queryOptions);
|
||||
}
|
||||
|
||||
// type narrowing mutations requires us to define a few helper types
|
||||
@@ -60,7 +61,7 @@ type CreateContactPointMutation = TypedUseMutationResult<
|
||||
>;
|
||||
|
||||
type UseCreateContactPointOptions = Parameters<
|
||||
typeof notificationsAPI.endpoints.createReceiver.useMutation<CreateContactPointMutation>
|
||||
typeof notificationsAPIv0alpha1.endpoints.createReceiver.useMutation<CreateContactPointMutation>
|
||||
>[0];
|
||||
|
||||
/**
|
||||
@@ -69,8 +70,16 @@ type UseCreateContactPointOptions = Parameters<
|
||||
* This function wraps the notificationsAPI.useCreateReceiverMutation with proper typing
|
||||
* to ensure that the payload supports type narrowing.
|
||||
*/
|
||||
export function useCreateContactPoint(options?: UseCreateContactPointOptions) {
|
||||
const [updateFn, result] = notificationsAPI.endpoints.createReceiver.useMutation<CreateContactPointMutation>(options);
|
||||
export function useCreateContactPoint(
|
||||
options?: UseCreateContactPointOptions
|
||||
): readonly [
|
||||
(
|
||||
args: CreateContactPointArgs
|
||||
) => ReturnType<ReturnType<typeof notificationsAPIv0alpha1.endpoints.createReceiver.useMutation>[0]>,
|
||||
ReturnType<typeof notificationsAPIv0alpha1.endpoints.createReceiver.useMutation<CreateContactPointMutation>>[1],
|
||||
] {
|
||||
const [updateFn, result] =
|
||||
notificationsAPIv0alpha1.endpoints.createReceiver.useMutation<CreateContactPointMutation>(options);
|
||||
|
||||
const typedUpdateFn = (args: CreateContactPointArgs) => {
|
||||
// @ts-expect-error this one is just impossible for me to figure out
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { countBy, isEmpty } from 'lodash';
|
||||
|
||||
import { Receiver } from '../api/notifications/v0alpha1/notifications.api.gen';
|
||||
import { Receiver } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { ContactPoint } from '../api/notifications/v0alpha1/types';
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { RoutingTreeMatcher } from '../api/notifications/v0alpha1/notifications.api.gen';
|
||||
import { RoutingTreeMatcher } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
export type Label = [string, string];
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { VERSION } from '../../api/notifications/v0alpha1/const';
|
||||
import { API_VERSION, RoutingTree } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { LabelMatcherFactory, RouteFactory } from '../../api/notifications/v0alpha1/mocks/fakes/Routes';
|
||||
import { RoutingTree } from '../../api/notifications/v0alpha1/notifications.api.gen';
|
||||
import { Label } from '../../matchers/types';
|
||||
|
||||
import { matchInstancesToRouteTrees } from './useMatchPolicies';
|
||||
@@ -16,7 +16,7 @@ describe('matchInstancesToRouteTrees', () => {
|
||||
const trees: RoutingTree[] = [
|
||||
{
|
||||
kind: 'RoutingTree',
|
||||
apiVersion: VERSION,
|
||||
apiVersion: API_VERSION,
|
||||
metadata: { name: treeName },
|
||||
spec: {
|
||||
defaults: {
|
||||
@@ -24,7 +24,6 @@ describe('matchInstancesToRouteTrees', () => {
|
||||
},
|
||||
routes: [route],
|
||||
},
|
||||
status: {},
|
||||
},
|
||||
];
|
||||
|
||||
@@ -51,7 +50,7 @@ describe('matchInstancesToRouteTrees', () => {
|
||||
const trees: RoutingTree[] = [
|
||||
{
|
||||
kind: 'RoutingTree',
|
||||
apiVersion: VERSION,
|
||||
apiVersion: API_VERSION,
|
||||
metadata: { name: treeName },
|
||||
spec: {
|
||||
defaults: {
|
||||
@@ -59,7 +58,6 @@ describe('matchInstancesToRouteTrees', () => {
|
||||
},
|
||||
routes: [route],
|
||||
},
|
||||
status: {},
|
||||
},
|
||||
];
|
||||
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
import { useCallback } from 'react';
|
||||
|
||||
import { RoutingTree, notificationsAPI } from '../../api/notifications/v0alpha1/notifications.api.gen';
|
||||
import {
|
||||
RoutingTree,
|
||||
generatedAPI as notificationsAPIv0alpha1,
|
||||
} from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { Label } from '../../matchers/types';
|
||||
import { USER_DEFINED_TREE_NAME } from '../consts';
|
||||
import { Route, RouteWithID } from '../types';
|
||||
@@ -24,6 +28,11 @@ export type InstanceMatchResult = {
|
||||
matchedRoutes: RouteMatch[];
|
||||
};
|
||||
|
||||
interface UseMatchInstancesToRouteTreesReturnType
|
||||
extends ReturnType<typeof notificationsAPIv0alpha1.endpoints.listRoutingTree.useQuery> {
|
||||
matchInstancesToRouteTrees: (instances: Label[][]) => InstanceMatchResult[];
|
||||
}
|
||||
|
||||
/**
|
||||
* React hook that finds notification policy routes in all routing trees that match the provided set of alert instances.
|
||||
*
|
||||
@@ -35,8 +44,8 @@ export type InstanceMatchResult = {
|
||||
* @returns An object containing a `matchInstancesToRoutingTrees` function that takes alert instances
|
||||
* and returns an array of InstanceMatchResult objects, each containing the matched routes and matching details
|
||||
*/
|
||||
export function useMatchInstancesToRouteTrees() {
|
||||
const { data, ...rest } = notificationsAPI.endpoints.listRoutingTree.useQuery(
|
||||
export function useMatchInstancesToRouteTrees(): UseMatchInstancesToRouteTreesReturnType {
|
||||
const { data, ...rest } = notificationsAPIv0alpha1.endpoints.listRoutingTree.useQuery(
|
||||
{},
|
||||
{
|
||||
refetchOnFocus: true,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { OverrideProperties } from 'type-fest';
|
||||
|
||||
import { RoutingTreeRoute } from '../api/notifications/v0alpha1/notifications.api.gen';
|
||||
import { RoutingTreeRoute } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { LabelMatcher } from '../matchers/types';
|
||||
|
||||
// type-narrow the route tree
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { groupBy, isArray, pick, reduce, uniqueId } from 'lodash';
|
||||
|
||||
import { RoutingTree, RoutingTreeRoute } from '../api/notifications/v0alpha1/notifications.api.gen';
|
||||
import { RoutingTree, RoutingTreeRoute } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
|
||||
import { Label } from '../matchers/types';
|
||||
import { LabelMatchDetails, matchLabels } from '../matchers/utils';
|
||||
|
||||
|
||||
@@ -19,5 +19,5 @@ export { type LabelMatcher, type Label } from './grafana/matchers/types';
|
||||
export { matchLabelsSet, matchLabels, isLabelMatch, type LabelMatchDetails } from './grafana/matchers/utils';
|
||||
|
||||
// API endpoints
|
||||
export { notificationsAPI as notificationsAPIv0alpha1 } from './grafana/api/notifications/v0alpha1/notifications.api.gen';
|
||||
export { rulesAPI as rulesAPIv0alpha1 } from './grafana/api/rules/v0alpha1/rules.api.gen';
|
||||
export { generatedAPI as notificationsAPIv0alpha1 } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
export { generatedAPI as rulesAPIv0alpha1 } from '@grafana/api-clients/rtkq/rules.alerting/v0alpha1';
|
||||
|
||||
@@ -2,13 +2,25 @@ import { configureStore } from '@reduxjs/toolkit';
|
||||
import { useEffect } from 'react';
|
||||
import { Provider } from 'react-redux';
|
||||
|
||||
import { notificationsAPIv0alpha1 } from '../src/unstable';
|
||||
import { MockBackendSrv } from '@grafana/api-clients';
|
||||
import { generatedAPI as notificationsAPIv0alpha1 } from '@grafana/api-clients/rtkq/notifications.alerting/v0alpha1';
|
||||
import { generatedAPI as rulesAPIv0alpha1 } from '@grafana/api-clients/rtkq/rules.alerting/v0alpha1';
|
||||
import { setBackendSrv } from '@grafana/runtime';
|
||||
|
||||
// Initialize BackendSrv for tests - this allows RTKQ to make HTTP requests
|
||||
// The actual HTTP requests will be intercepted by MSW (setupMockServer)
|
||||
// We only need to implement fetch() which is what RTKQ uses
|
||||
// we could remove this once @grafana/api-client no longer uses the BackendSrv
|
||||
// @ts-ignore
|
||||
setBackendSrv(new MockBackendSrv());
|
||||
|
||||
// create an empty store
|
||||
export const store = configureStore({
|
||||
middleware: (getDefaultMiddleware) => getDefaultMiddleware().concat(notificationsAPIv0alpha1.middleware),
|
||||
export const store: ReturnType<typeof configureStore> = configureStore({
|
||||
middleware: (getDefaultMiddleware) =>
|
||||
getDefaultMiddleware().concat(notificationsAPIv0alpha1.middleware).concat(rulesAPIv0alpha1.middleware),
|
||||
reducer: {
|
||||
[notificationsAPIv0alpha1.reducerPath]: notificationsAPIv0alpha1.reducer,
|
||||
[rulesAPIv0alpha1.reducerPath]: rulesAPIv0alpha1.reducer,
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
@@ -13,7 +13,14 @@ import '@testing-library/jest-dom';
|
||||
* method which wraps the passed element in all of the necessary Providers,
|
||||
* so it can render correctly in the context of the application
|
||||
*/
|
||||
const customRender = (ui: React.ReactNode, renderOptions: RenderOptions = {}) => {
|
||||
const customRender = (
|
||||
ui: React.ReactNode,
|
||||
renderOptions: RenderOptions = {}
|
||||
): {
|
||||
renderResult: ReturnType<typeof render>;
|
||||
user: ReturnType<typeof userEvent.setup>;
|
||||
store: typeof store;
|
||||
} => {
|
||||
const user = userEvent.setup();
|
||||
const Providers = renderOptions.wrapper || getDefaultWrapper();
|
||||
|
||||
|
||||
@@ -116,6 +116,18 @@
|
||||
"import": "./dist/esm/clients/rtkq/shorturl/v1beta1/index.mjs",
|
||||
"require": "./dist/cjs/clients/rtkq/shorturl/v1beta1/index.cjs"
|
||||
},
|
||||
"./rtkq/notifications.alerting/v0alpha1": {
|
||||
"@grafana-app/source": "./src/clients/rtkq/notifications.alerting/v0alpha1/index.ts",
|
||||
"types": "./dist/types/clients/rtkq/notifications.alerting/v0alpha1/index.d.ts",
|
||||
"import": "./dist/esm/clients/rtkq/notifications.alerting/v0alpha1/index.mjs",
|
||||
"require": "./dist/cjs/clients/rtkq/notifications.alerting/v0alpha1/index.cjs"
|
||||
},
|
||||
"./rtkq/rules.alerting/v0alpha1": {
|
||||
"@grafana-app/source": "./src/clients/rtkq/rules.alerting/v0alpha1/index.ts",
|
||||
"types": "./dist/types/clients/rtkq/rules.alerting/v0alpha1/index.d.ts",
|
||||
"import": "./dist/esm/clients/rtkq/rules.alerting/v0alpha1/index.mjs",
|
||||
"require": "./dist/cjs/clients/rtkq/rules.alerting/v0alpha1/index.cjs"
|
||||
},
|
||||
"./rtkq/historian.alerting/v0alpha1": {
|
||||
"@grafana-app/source": "./src/clients/rtkq/historian.alerting/v0alpha1/index.ts",
|
||||
"types": "./dist/types/clients/rtkq/historian.alerting/v0alpha1/index.d.ts",
|
||||
|
||||
@@ -10,10 +10,12 @@ import { generatedAPI as historianAlertingAPIv0alpha1 } from './historian.alerti
|
||||
import { generatedAPI as iamAPIv0alpha1 } from './iam/v0alpha1';
|
||||
import { generatedAPI as logsdrilldownAPIv1alpha1 } from './logsdrilldown/v1alpha1';
|
||||
import { generatedAPI as migrateToCloudAPI } from './migrate-to-cloud';
|
||||
import { generatedAPI as notificationsAlertingAPIv0alpha1 } from './notifications.alerting/v0alpha1';
|
||||
import { generatedAPI as playlistAPIv0alpha1 } from './playlist/v0alpha1';
|
||||
import { generatedAPI as preferencesUserAPI } from './preferences/user';
|
||||
import { generatedAPI as preferencesAPIv1alpha1 } from './preferences/v1alpha1';
|
||||
import { generatedAPI as provisioningAPIv0alpha1 } from './provisioning/v0alpha1';
|
||||
import { generatedAPI as rulesAlertingAPIv0alpha1 } from './rules.alerting/v0alpha1';
|
||||
import { generatedAPI as shortURLAPIv1beta1 } from './shorturl/v1beta1';
|
||||
import { generatedAPI as legacyUserAPI } from './user';
|
||||
// PLOP_INJECT_IMPORT
|
||||
@@ -33,6 +35,8 @@ export const allMiddleware = [
|
||||
shortURLAPIv1beta1.middleware,
|
||||
correlationsAPIv0alpha1.middleware,
|
||||
legacyUserAPI.middleware,
|
||||
notificationsAlertingAPIv0alpha1.middleware,
|
||||
rulesAlertingAPIv0alpha1.middleware,
|
||||
historianAlertingAPIv0alpha1.middleware,
|
||||
logsdrilldownAPIv1alpha1.middleware,
|
||||
// PLOP_INJECT_MIDDLEWARE
|
||||
@@ -53,6 +57,8 @@ export const allReducers = {
|
||||
[shortURLAPIv1beta1.reducerPath]: shortURLAPIv1beta1.reducer,
|
||||
[correlationsAPIv0alpha1.reducerPath]: correlationsAPIv0alpha1.reducer,
|
||||
[legacyUserAPI.reducerPath]: legacyUserAPI.reducer,
|
||||
[notificationsAlertingAPIv0alpha1.reducerPath]: notificationsAlertingAPIv0alpha1.reducer,
|
||||
[rulesAlertingAPIv0alpha1.reducerPath]: rulesAlertingAPIv0alpha1.reducer,
|
||||
[historianAlertingAPIv0alpha1.reducerPath]: historianAlertingAPIv0alpha1.reducer,
|
||||
[logsdrilldownAPIv1alpha1.reducerPath]: logsdrilldownAPIv1alpha1.reducer,
|
||||
// PLOP_INJECT_REDUCER
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
import { createApi } from '@reduxjs/toolkit/query/react';
|
||||
|
||||
import { getAPIBaseURL } from '../../../../utils/utils';
|
||||
import { createBaseQuery } from '../../createBaseQuery';
|
||||
|
||||
export const API_GROUP = 'notifications.alerting.grafana.app' as const;
|
||||
export const API_VERSION = 'v0alpha1' as const;
|
||||
export const BASE_URL = getAPIBaseURL(API_GROUP, API_VERSION);
|
||||
|
||||
export const api = createApi({
|
||||
reducerPath: 'notificationsAlertingAPIv0alpha1',
|
||||
baseQuery: createBaseQuery({
|
||||
baseURL: BASE_URL,
|
||||
}),
|
||||
endpoints: () => ({}),
|
||||
});
|
||||
@@ -1,4 +1,4 @@
|
||||
import { api } from './api';
|
||||
import { api } from './baseAPI';
|
||||
export const addTagTypes = ['API Discovery', 'Receiver', 'RoutingTree', 'TemplateGroup', 'TimeInterval'] as const;
|
||||
const injectedRtkApi = api
|
||||
.enhanceEndpoints({
|
||||
@@ -7,7 +7,7 @@ const injectedRtkApi = api
|
||||
.injectEndpoints({
|
||||
endpoints: (build) => ({
|
||||
getApiResources: build.query<GetApiResourcesApiResponse, GetApiResourcesApiArg>({
|
||||
query: () => ({ url: `/apis/notifications.alerting.grafana.app/v0alpha1/` }),
|
||||
query: () => ({ url: `/` }),
|
||||
providesTags: ['API Discovery'],
|
||||
}),
|
||||
listReceiver: build.query<ListReceiverApiResponse, ListReceiverApiArg>({
|
||||
@@ -119,44 +119,6 @@ const injectedRtkApi = api
|
||||
}),
|
||||
invalidatesTags: ['Receiver'],
|
||||
}),
|
||||
getReceiverStatus: build.query<GetReceiverStatusApiResponse, GetReceiverStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/receivers/${queryArg.name}/status`,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
},
|
||||
}),
|
||||
providesTags: ['Receiver'],
|
||||
}),
|
||||
replaceReceiverStatus: build.mutation<ReplaceReceiverStatusApiResponse, ReplaceReceiverStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/receivers/${queryArg.name}/status`,
|
||||
method: 'PUT',
|
||||
body: queryArg.receiver,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['Receiver'],
|
||||
}),
|
||||
updateReceiverStatus: build.mutation<UpdateReceiverStatusApiResponse, UpdateReceiverStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/receivers/${queryArg.name}/status`,
|
||||
method: 'PATCH',
|
||||
body: queryArg.patch,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
force: queryArg.force,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['Receiver'],
|
||||
}),
|
||||
listRoutingTree: build.query<ListRoutingTreeApiResponse, ListRoutingTreeApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/routingtrees`,
|
||||
@@ -269,44 +231,6 @@ const injectedRtkApi = api
|
||||
}),
|
||||
invalidatesTags: ['RoutingTree'],
|
||||
}),
|
||||
getRoutingTreeStatus: build.query<GetRoutingTreeStatusApiResponse, GetRoutingTreeStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/routingtrees/${queryArg.name}/status`,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
},
|
||||
}),
|
||||
providesTags: ['RoutingTree'],
|
||||
}),
|
||||
replaceRoutingTreeStatus: build.mutation<ReplaceRoutingTreeStatusApiResponse, ReplaceRoutingTreeStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/routingtrees/${queryArg.name}/status`,
|
||||
method: 'PUT',
|
||||
body: queryArg.routingTree,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['RoutingTree'],
|
||||
}),
|
||||
updateRoutingTreeStatus: build.mutation<UpdateRoutingTreeStatusApiResponse, UpdateRoutingTreeStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/routingtrees/${queryArg.name}/status`,
|
||||
method: 'PATCH',
|
||||
body: queryArg.patch,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
force: queryArg.force,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['RoutingTree'],
|
||||
}),
|
||||
listTemplateGroup: build.query<ListTemplateGroupApiResponse, ListTemplateGroupApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/templategroups`,
|
||||
@@ -419,47 +343,6 @@ const injectedRtkApi = api
|
||||
}),
|
||||
invalidatesTags: ['TemplateGroup'],
|
||||
}),
|
||||
getTemplateGroupStatus: build.query<GetTemplateGroupStatusApiResponse, GetTemplateGroupStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/templategroups/${queryArg.name}/status`,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
},
|
||||
}),
|
||||
providesTags: ['TemplateGroup'],
|
||||
}),
|
||||
replaceTemplateGroupStatus: build.mutation<
|
||||
ReplaceTemplateGroupStatusApiResponse,
|
||||
ReplaceTemplateGroupStatusApiArg
|
||||
>({
|
||||
query: (queryArg) => ({
|
||||
url: `/templategroups/${queryArg.name}/status`,
|
||||
method: 'PUT',
|
||||
body: queryArg.templateGroup,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['TemplateGroup'],
|
||||
}),
|
||||
updateTemplateGroupStatus: build.mutation<UpdateTemplateGroupStatusApiResponse, UpdateTemplateGroupStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/templategroups/${queryArg.name}/status`,
|
||||
method: 'PATCH',
|
||||
body: queryArg.patch,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
force: queryArg.force,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['TemplateGroup'],
|
||||
}),
|
||||
listTimeInterval: build.query<ListTimeIntervalApiResponse, ListTimeIntervalApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/timeintervals`,
|
||||
@@ -572,48 +455,10 @@ const injectedRtkApi = api
|
||||
}),
|
||||
invalidatesTags: ['TimeInterval'],
|
||||
}),
|
||||
getTimeIntervalStatus: build.query<GetTimeIntervalStatusApiResponse, GetTimeIntervalStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/timeintervals/${queryArg.name}/status`,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
},
|
||||
}),
|
||||
providesTags: ['TimeInterval'],
|
||||
}),
|
||||
replaceTimeIntervalStatus: build.mutation<ReplaceTimeIntervalStatusApiResponse, ReplaceTimeIntervalStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/timeintervals/${queryArg.name}/status`,
|
||||
method: 'PUT',
|
||||
body: queryArg.timeInterval,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['TimeInterval'],
|
||||
}),
|
||||
updateTimeIntervalStatus: build.mutation<UpdateTimeIntervalStatusApiResponse, UpdateTimeIntervalStatusApiArg>({
|
||||
query: (queryArg) => ({
|
||||
url: `/timeintervals/${queryArg.name}/status`,
|
||||
method: 'PATCH',
|
||||
body: queryArg.patch,
|
||||
params: {
|
||||
pretty: queryArg.pretty,
|
||||
dryRun: queryArg.dryRun,
|
||||
fieldManager: queryArg.fieldManager,
|
||||
fieldValidation: queryArg.fieldValidation,
|
||||
force: queryArg.force,
|
||||
},
|
||||
}),
|
||||
invalidatesTags: ['TimeInterval'],
|
||||
}),
|
||||
}),
|
||||
overrideExisting: false,
|
||||
});
|
||||
export { injectedRtkApi as notificationsAPI };
|
||||
export { injectedRtkApi as generatedAPI };
|
||||
export type GetApiResourcesApiResponse = /** status 200 OK */ ApiResourceList;
|
||||
export type GetApiResourcesApiArg = void;
|
||||
export type ListReceiverApiResponse = /** status 200 OK */ ReceiverList;
|
||||
@@ -781,43 +626,6 @@ export type UpdateReceiverApiArg = {
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type GetReceiverStatusApiResponse = /** status 200 OK */ Receiver;
|
||||
export type GetReceiverStatusApiArg = {
|
||||
/** name of the Receiver */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
};
|
||||
export type ReplaceReceiverStatusApiResponse = /** status 200 OK */ Receiver | /** status 201 Created */ Receiver;
|
||||
export type ReplaceReceiverStatusApiArg = {
|
||||
/** name of the Receiver */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
receiver: Receiver;
|
||||
};
|
||||
export type UpdateReceiverStatusApiResponse = /** status 200 OK */ Receiver | /** status 201 Created */ Receiver;
|
||||
export type UpdateReceiverStatusApiArg = {
|
||||
/** name of the Receiver */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
/** Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. */
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type ListRoutingTreeApiResponse = /** status 200 OK */ RoutingTreeList;
|
||||
export type ListRoutingTreeApiArg = {
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
@@ -983,47 +791,6 @@ export type UpdateRoutingTreeApiArg = {
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type GetRoutingTreeStatusApiResponse = /** status 200 OK */ RoutingTree;
|
||||
export type GetRoutingTreeStatusApiArg = {
|
||||
/** name of the RoutingTree */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
};
|
||||
export type ReplaceRoutingTreeStatusApiResponse = /** status 200 OK */
|
||||
| RoutingTree
|
||||
| /** status 201 Created */ RoutingTree;
|
||||
export type ReplaceRoutingTreeStatusApiArg = {
|
||||
/** name of the RoutingTree */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
routingTree: RoutingTree;
|
||||
};
|
||||
export type UpdateRoutingTreeStatusApiResponse = /** status 200 OK */
|
||||
| RoutingTree
|
||||
| /** status 201 Created */ RoutingTree;
|
||||
export type UpdateRoutingTreeStatusApiArg = {
|
||||
/** name of the RoutingTree */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
/** Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. */
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type ListTemplateGroupApiResponse = /** status 200 OK */ TemplateGroupList;
|
||||
export type ListTemplateGroupApiArg = {
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
@@ -1193,47 +960,6 @@ export type UpdateTemplateGroupApiArg = {
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type GetTemplateGroupStatusApiResponse = /** status 200 OK */ TemplateGroup;
|
||||
export type GetTemplateGroupStatusApiArg = {
|
||||
/** name of the TemplateGroup */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
};
|
||||
export type ReplaceTemplateGroupStatusApiResponse = /** status 200 OK */
|
||||
| TemplateGroup
|
||||
| /** status 201 Created */ TemplateGroup;
|
||||
export type ReplaceTemplateGroupStatusApiArg = {
|
||||
/** name of the TemplateGroup */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
templateGroup: TemplateGroup;
|
||||
};
|
||||
export type UpdateTemplateGroupStatusApiResponse = /** status 200 OK */
|
||||
| TemplateGroup
|
||||
| /** status 201 Created */ TemplateGroup;
|
||||
export type UpdateTemplateGroupStatusApiArg = {
|
||||
/** name of the TemplateGroup */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
/** Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. */
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type ListTimeIntervalApiResponse = /** status 200 OK */ TimeIntervalList;
|
||||
export type ListTimeIntervalApiArg = {
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
@@ -1399,47 +1125,6 @@ export type UpdateTimeIntervalApiArg = {
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type GetTimeIntervalStatusApiResponse = /** status 200 OK */ TimeInterval;
|
||||
export type GetTimeIntervalStatusApiArg = {
|
||||
/** name of the TimeInterval */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
};
|
||||
export type ReplaceTimeIntervalStatusApiResponse = /** status 200 OK */
|
||||
| TimeInterval
|
||||
| /** status 201 Created */ TimeInterval;
|
||||
export type ReplaceTimeIntervalStatusApiArg = {
|
||||
/** name of the TimeInterval */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
timeInterval: TimeInterval;
|
||||
};
|
||||
export type UpdateTimeIntervalStatusApiResponse = /** status 200 OK */
|
||||
| TimeInterval
|
||||
| /** status 201 Created */ TimeInterval;
|
||||
export type UpdateTimeIntervalStatusApiArg = {
|
||||
/** name of the TimeInterval */
|
||||
name: string;
|
||||
/** If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). */
|
||||
pretty?: string;
|
||||
/** When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed */
|
||||
dryRun?: string;
|
||||
/** fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). */
|
||||
fieldManager?: string;
|
||||
/** fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. */
|
||||
fieldValidation?: string;
|
||||
/** Force is going to "force" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. */
|
||||
force?: boolean;
|
||||
patch: Patch;
|
||||
};
|
||||
export type ApiResource = {
|
||||
/** categories is a list of the grouped resources this resource belongs to (e.g. 'all') */
|
||||
categories?: string[];
|
||||
@@ -1572,34 +1257,6 @@ export type ReceiverSpec = {
|
||||
integrations: ReceiverIntegration[];
|
||||
title: string;
|
||||
};
|
||||
export type ReceiverOperatorState = {
|
||||
/** descriptiveState is an optional more descriptive state field which has no requirements on format */
|
||||
descriptiveState?: string;
|
||||
/** details contains any extra information that is operator-specific */
|
||||
details?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** lastEvaluation is the ResourceVersion last evaluated */
|
||||
lastEvaluation: string;
|
||||
/** state describes the state of the lastEvaluation.
|
||||
It is limited to three possible states for machine evaluation. */
|
||||
state: 'success' | 'in_progress' | 'failed';
|
||||
};
|
||||
export type ReceiverStatus = {
|
||||
/** additionalFields is reserved for future use */
|
||||
additionalFields?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** operatorStates is a map of operator ID to operator state evaluations.
|
||||
Any operator which consumes this kind SHOULD add its state evaluation information to this field. */
|
||||
operatorStates?: {
|
||||
[key: string]: ReceiverOperatorState;
|
||||
};
|
||||
};
|
||||
export type Receiver = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
apiVersion: string;
|
||||
@@ -1607,7 +1264,6 @@ export type Receiver = {
|
||||
kind: string;
|
||||
metadata: ObjectMeta;
|
||||
spec: ReceiverSpec;
|
||||
status?: ReceiverStatus;
|
||||
};
|
||||
export type ListMeta = {
|
||||
/** continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message. */
|
||||
@@ -1700,34 +1356,6 @@ export type RoutingTreeSpec = {
|
||||
defaults: RoutingTreeRouteDefaults;
|
||||
routes: RoutingTreeRoute[];
|
||||
};
|
||||
export type RoutingTreeOperatorState = {
|
||||
/** descriptiveState is an optional more descriptive state field which has no requirements on format */
|
||||
descriptiveState?: string;
|
||||
/** details contains any extra information that is operator-specific */
|
||||
details?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** lastEvaluation is the ResourceVersion last evaluated */
|
||||
lastEvaluation: string;
|
||||
/** state describes the state of the lastEvaluation.
|
||||
It is limited to three possible states for machine evaluation. */
|
||||
state: 'success' | 'in_progress' | 'failed';
|
||||
};
|
||||
export type RoutingTreeStatus = {
|
||||
/** additionalFields is reserved for future use */
|
||||
additionalFields?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** operatorStates is a map of operator ID to operator state evaluations.
|
||||
Any operator which consumes this kind SHOULD add its state evaluation information to this field. */
|
||||
operatorStates?: {
|
||||
[key: string]: RoutingTreeOperatorState;
|
||||
};
|
||||
};
|
||||
export type RoutingTree = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
apiVersion: string;
|
||||
@@ -1735,7 +1363,6 @@ export type RoutingTree = {
|
||||
kind: string;
|
||||
metadata: ObjectMeta;
|
||||
spec: RoutingTreeSpec;
|
||||
status?: RoutingTreeStatus;
|
||||
};
|
||||
export type RoutingTreeList = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
@@ -1745,38 +1372,12 @@ export type RoutingTreeList = {
|
||||
kind?: string;
|
||||
metadata: ListMeta;
|
||||
};
|
||||
export type TemplateGroupTemplateKind = 'grafana' | 'mimir';
|
||||
export type TemplateGroupSpec = {
|
||||
content: string;
|
||||
kind: TemplateGroupTemplateKind;
|
||||
title: string;
|
||||
};
|
||||
export type TemplateGroupOperatorState = {
|
||||
/** descriptiveState is an optional more descriptive state field which has no requirements on format */
|
||||
descriptiveState?: string;
|
||||
/** details contains any extra information that is operator-specific */
|
||||
details?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** lastEvaluation is the ResourceVersion last evaluated */
|
||||
lastEvaluation: string;
|
||||
/** state describes the state of the lastEvaluation.
|
||||
It is limited to three possible states for machine evaluation. */
|
||||
state: 'success' | 'in_progress' | 'failed';
|
||||
};
|
||||
export type TemplateGroupStatus = {
|
||||
/** additionalFields is reserved for future use */
|
||||
additionalFields?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** operatorStates is a map of operator ID to operator state evaluations.
|
||||
Any operator which consumes this kind SHOULD add its state evaluation information to this field. */
|
||||
operatorStates?: {
|
||||
[key: string]: TemplateGroupOperatorState;
|
||||
};
|
||||
};
|
||||
export type TemplateGroup = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
apiVersion: string;
|
||||
@@ -1784,7 +1385,6 @@ export type TemplateGroup = {
|
||||
kind: string;
|
||||
metadata: ObjectMeta;
|
||||
spec: TemplateGroupSpec;
|
||||
status?: TemplateGroupStatus;
|
||||
};
|
||||
export type TemplateGroupList = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
@@ -1810,34 +1410,6 @@ export type TimeIntervalSpec = {
|
||||
name: string;
|
||||
time_intervals: TimeIntervalInterval[];
|
||||
};
|
||||
export type TimeIntervalOperatorState = {
|
||||
/** descriptiveState is an optional more descriptive state field which has no requirements on format */
|
||||
descriptiveState?: string;
|
||||
/** details contains any extra information that is operator-specific */
|
||||
details?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** lastEvaluation is the ResourceVersion last evaluated */
|
||||
lastEvaluation: string;
|
||||
/** state describes the state of the lastEvaluation.
|
||||
It is limited to three possible states for machine evaluation. */
|
||||
state: 'success' | 'in_progress' | 'failed';
|
||||
};
|
||||
export type TimeIntervalStatus = {
|
||||
/** additionalFields is reserved for future use */
|
||||
additionalFields?: {
|
||||
[key: string]: {
|
||||
[key: string]: any;
|
||||
};
|
||||
};
|
||||
/** operatorStates is a map of operator ID to operator state evaluations.
|
||||
Any operator which consumes this kind SHOULD add its state evaluation information to this field. */
|
||||
operatorStates?: {
|
||||
[key: string]: TimeIntervalOperatorState;
|
||||
};
|
||||
};
|
||||
export type TimeInterval = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
apiVersion: string;
|
||||
@@ -1845,7 +1417,6 @@ export type TimeInterval = {
|
||||
kind: string;
|
||||
metadata: ObjectMeta;
|
||||
spec: TimeIntervalSpec;
|
||||
status?: TimeIntervalStatus;
|
||||
};
|
||||
export type TimeIntervalList = {
|
||||
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
|
||||
@@ -1855,3 +1426,43 @@ export type TimeIntervalList = {
|
||||
kind?: string;
|
||||
metadata: ListMeta;
|
||||
};
|
||||
export const {
|
||||
useGetApiResourcesQuery,
|
||||
useLazyGetApiResourcesQuery,
|
||||
useListReceiverQuery,
|
||||
useLazyListReceiverQuery,
|
||||
useCreateReceiverMutation,
|
||||
useDeletecollectionReceiverMutation,
|
||||
useGetReceiverQuery,
|
||||
useLazyGetReceiverQuery,
|
||||
useReplaceReceiverMutation,
|
||||
useDeleteReceiverMutation,
|
||||
useUpdateReceiverMutation,
|
||||
useListRoutingTreeQuery,
|
||||
useLazyListRoutingTreeQuery,
|
||||
useCreateRoutingTreeMutation,
|
||||
useDeletecollectionRoutingTreeMutation,
|
||||
useGetRoutingTreeQuery,
|
||||
useLazyGetRoutingTreeQuery,
|
||||
useReplaceRoutingTreeMutation,
|
||||
useDeleteRoutingTreeMutation,
|
||||
useUpdateRoutingTreeMutation,
|
||||
useListTemplateGroupQuery,
|
||||
useLazyListTemplateGroupQuery,
|
||||
useCreateTemplateGroupMutation,
|
||||
useDeletecollectionTemplateGroupMutation,
|
||||
useGetTemplateGroupQuery,
|
||||
useLazyGetTemplateGroupQuery,
|
||||
useReplaceTemplateGroupMutation,
|
||||
useDeleteTemplateGroupMutation,
|
||||
useUpdateTemplateGroupMutation,
|
||||
useListTimeIntervalQuery,
|
||||
useLazyListTimeIntervalQuery,
|
||||
useCreateTimeIntervalMutation,
|
||||
useDeletecollectionTimeIntervalMutation,
|
||||
useGetTimeIntervalQuery,
|
||||
useLazyGetTimeIntervalQuery,
|
||||
useReplaceTimeIntervalMutation,
|
||||
useDeleteTimeIntervalMutation,
|
||||
useUpdateTimeIntervalMutation,
|
||||
} = injectedRtkApi;
|
||||
@@ -0,0 +1,5 @@
|
||||
export { BASE_URL, API_GROUP, API_VERSION } from './baseAPI';
|
||||
import { generatedAPI as rawAPI } from './endpoints.gen';
|
||||
|
||||
export * from './endpoints.gen';
|
||||
export const generatedAPI = rawAPI.enhanceEndpoints({});
|
||||
@@ -0,0 +1,16 @@
|
||||
import { createApi } from '@reduxjs/toolkit/query/react';
|
||||
|
||||
import { getAPIBaseURL } from '../../../../utils/utils';
|
||||
import { createBaseQuery } from '../../createBaseQuery';
|
||||
|
||||
export const API_GROUP = 'rules.alerting.grafana.app' as const;
|
||||
export const API_VERSION = 'v0alpha1' as const;
|
||||
export const BASE_URL = getAPIBaseURL(API_GROUP, API_VERSION);
|
||||
|
||||
export const api = createApi({
|
||||
reducerPath: 'rulesAlertingAPIv0alpha1',
|
||||
baseQuery: createBaseQuery({
|
||||
baseURL: BASE_URL,
|
||||
}),
|
||||
endpoints: () => ({}),
|
||||
});
|
||||
@@ -1,4 +1,4 @@
|
||||
import { api } from './api';
|
||||
import { api } from './baseAPI';
|
||||
export const addTagTypes = ['API Discovery', 'AlertRule', 'RecordingRule'] as const;
|
||||
const injectedRtkApi = api
|
||||
.enhanceEndpoints({
|
||||
@@ -7,7 +7,7 @@ const injectedRtkApi = api
|
||||
.injectEndpoints({
|
||||
endpoints: (build) => ({
|
||||
getApiResources: build.query<GetApiResourcesApiResponse, GetApiResourcesApiArg>({
|
||||
query: () => ({ url: `/apis/rules.alerting.grafana.app/v0alpha1/` }),
|
||||
query: () => ({ url: `/` }),
|
||||
providesTags: ['API Discovery'],
|
||||
}),
|
||||
listAlertRule: build.query<ListAlertRuleApiResponse, ListAlertRuleApiArg>({
|
||||
@@ -313,7 +313,7 @@ const injectedRtkApi = api
|
||||
}),
|
||||
overrideExisting: false,
|
||||
});
|
||||
export { injectedRtkApi as rulesAPI };
|
||||
export { injectedRtkApi as generatedAPI };
|
||||
export type GetApiResourcesApiResponse = /** status 200 OK */ ApiResourceList;
|
||||
export type GetApiResourcesApiArg = void;
|
||||
export type ListAlertRuleApiResponse = /** status 200 OK */ AlertRuleList;
|
||||
@@ -1085,3 +1085,33 @@ export type RecordingRuleList = {
|
||||
kind?: string;
|
||||
metadata: ListMeta;
|
||||
};
|
||||
export const {
|
||||
useGetApiResourcesQuery,
|
||||
useLazyGetApiResourcesQuery,
|
||||
useListAlertRuleQuery,
|
||||
useLazyListAlertRuleQuery,
|
||||
useCreateAlertRuleMutation,
|
||||
useDeletecollectionAlertRuleMutation,
|
||||
useGetAlertRuleQuery,
|
||||
useLazyGetAlertRuleQuery,
|
||||
useReplaceAlertRuleMutation,
|
||||
useDeleteAlertRuleMutation,
|
||||
useUpdateAlertRuleMutation,
|
||||
useGetAlertRuleStatusQuery,
|
||||
useLazyGetAlertRuleStatusQuery,
|
||||
useReplaceAlertRuleStatusMutation,
|
||||
useUpdateAlertRuleStatusMutation,
|
||||
useListRecordingRuleQuery,
|
||||
useLazyListRecordingRuleQuery,
|
||||
useCreateRecordingRuleMutation,
|
||||
useDeletecollectionRecordingRuleMutation,
|
||||
useGetRecordingRuleQuery,
|
||||
useLazyGetRecordingRuleQuery,
|
||||
useReplaceRecordingRuleMutation,
|
||||
useDeleteRecordingRuleMutation,
|
||||
useUpdateRecordingRuleMutation,
|
||||
useGetRecordingRuleStatusQuery,
|
||||
useLazyGetRecordingRuleStatusQuery,
|
||||
useReplaceRecordingRuleStatusMutation,
|
||||
useUpdateRecordingRuleStatusMutation,
|
||||
} = injectedRtkApi;
|
||||
@@ -0,0 +1,5 @@
|
||||
export { BASE_URL, API_GROUP, API_VERSION } from './baseAPI';
|
||||
import { generatedAPI as rawAPI } from './endpoints.gen';
|
||||
|
||||
export * from './endpoints.gen';
|
||||
export const generatedAPI = rawAPI.enhanceEndpoints({});
|
||||
@@ -1 +1,4 @@
|
||||
export { getAPINamespace, getAPIBaseURL, normalizeError, handleRequestError } from './utils/utils';
|
||||
|
||||
/* @TODO figure out how to automatically set the MockBackendSrv when consumers of this package write tests using the exported clients */
|
||||
export { MockBackendSrv } from './utils/backendSrv.mock';
|
||||
|
||||
@@ -108,6 +108,8 @@ const config: ConfigFile = {
|
||||
...createAPIConfig('preferences', 'v1alpha1'),
|
||||
...createAPIConfig('provisioning', 'v0alpha1'),
|
||||
...createAPIConfig('shorturl', 'v1beta1'),
|
||||
...createAPIConfig('notifications.alerting', 'v0alpha1'),
|
||||
...createAPIConfig('rules.alerting', 'v0alpha1'),
|
||||
...createAPIConfig('historian.alerting', 'v0alpha1'),
|
||||
...createAPIConfig('logsdrilldown', 'v1alpha1'),
|
||||
// PLOP_INJECT_API_CLIENT - Used by the API client generator
|
||||
|
||||
46
packages/grafana-api-clients/src/utils/backendSrv.mock.ts
Normal file
46
packages/grafana-api-clients/src/utils/backendSrv.mock.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import { Observable } from 'rxjs';
|
||||
import { fromFetch } from 'rxjs/fetch';
|
||||
|
||||
import { BackendSrv, BackendSrvRequest, FetchResponse } from '@grafana/runtime';
|
||||
|
||||
/**
|
||||
* Minimal mock implementation of BackendSrv for testing.
|
||||
* Only implements the fetch() method which is used by RTKQ.
|
||||
* HTTP requests are intercepted by MSW in tests.
|
||||
*/
|
||||
export class MockBackendSrv implements Partial<BackendSrv> {
|
||||
fetch<T>(options: BackendSrvRequest): Observable<FetchResponse<T>> {
|
||||
const init: RequestInit = {
|
||||
method: options.method || 'GET',
|
||||
headers: options.headers,
|
||||
body: options.data ? JSON.stringify(options.data) : undefined,
|
||||
credentials: options.credentials,
|
||||
signal: options.abortSignal,
|
||||
};
|
||||
|
||||
return new Observable((observer) => {
|
||||
fromFetch(options.url, init).subscribe({
|
||||
next: async (response) => {
|
||||
try {
|
||||
const data = await response.json();
|
||||
observer.next({
|
||||
data,
|
||||
status: response.status,
|
||||
statusText: response.statusText,
|
||||
ok: response.ok,
|
||||
headers: response.headers,
|
||||
redirected: response.redirected,
|
||||
type: response.type,
|
||||
url: response.url,
|
||||
config: options,
|
||||
});
|
||||
observer.complete();
|
||||
} catch (error) {
|
||||
observer.error(error);
|
||||
}
|
||||
},
|
||||
error: (error) => observer.error(error),
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1259,4 +1259,8 @@ export interface FeatureToggles {
|
||||
* Enables the ASAP smoothing transformation for time series data
|
||||
*/
|
||||
smoothingTransformation?: boolean;
|
||||
/**
|
||||
* Enables the creation of keepers that manage secrets stored on AWS secrets manager
|
||||
*/
|
||||
secretsManagementAppPlatformAwsKeeper?: boolean;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,834 +0,0 @@
|
||||
{
|
||||
"_comment": "Core Grafana history https://github.com/grafana/grafana/blob/v11.0.0-preview/public/app/plugins/datasource/prometheus/dashboards/prometheus_stats.json",
|
||||
"__inputs": [
|
||||
{
|
||||
"name": "DS_GDEV-PROMETHEUS",
|
||||
"label": "gdev-prometheus",
|
||||
"description": "",
|
||||
"type": "datasource",
|
||||
"pluginId": "prometheus",
|
||||
"pluginName": "Prometheus"
|
||||
}
|
||||
],
|
||||
"__requires": [
|
||||
{
|
||||
"type": "grafana",
|
||||
"id": "grafana",
|
||||
"name": "Grafana",
|
||||
"version": "8.1.0-pre"
|
||||
},
|
||||
{
|
||||
"type": "datasource",
|
||||
"id": "prometheus",
|
||||
"name": "Prometheus",
|
||||
"version": "1.0.0"
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "stat",
|
||||
"name": "Stat",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "text",
|
||||
"name": "Text",
|
||||
"version": ""
|
||||
},
|
||||
{
|
||||
"type": "panel",
|
||||
"id": "timeseries",
|
||||
"name": "Time series",
|
||||
"version": ""
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"gnetId": null,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"iteration": 1624859749459,
|
||||
"links": [
|
||||
{
|
||||
"icon": "info",
|
||||
"tags": [],
|
||||
"targetBlank": true,
|
||||
"title": "Grafana Docs",
|
||||
"tooltip": "",
|
||||
"type": "link",
|
||||
"url": "https://grafana.com/docs/grafana/latest/"
|
||||
},
|
||||
{
|
||||
"icon": "info",
|
||||
"tags": [],
|
||||
"targetBlank": true,
|
||||
"title": "Prometheus Docs",
|
||||
"type": "link",
|
||||
"url": "http://prometheus.io/docs/introduction/overview/"
|
||||
}
|
||||
],
|
||||
"panels": [
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"decimals": 1,
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"match": "null",
|
||||
"result": {
|
||||
"text": "N/A"
|
||||
}
|
||||
},
|
||||
"type": "special"
|
||||
}
|
||||
],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "s"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 6,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 5,
|
||||
"interval": null,
|
||||
"links": [],
|
||||
"maxDataPoints": 100,
|
||||
"options": {
|
||||
"colorMode": "none",
|
||||
"graphMode": "none",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "horizontal",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "(time() - process_start_time_seconds{job=\"prometheus\", instance=~\"$node\"})",
|
||||
"intervalFactor": 2,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Uptime",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"fixedColor": "rgb(31, 120, 193)",
|
||||
"mode": "fixed"
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "rgba(50, 172, 45, 0.97)",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "rgba(237, 129, 40, 0.89)",
|
||||
"value": 1
|
||||
},
|
||||
{
|
||||
"color": "rgba(245, 54, 54, 0.9)",
|
||||
"value": 5
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 6,
|
||||
"x": 6,
|
||||
"y": 0
|
||||
},
|
||||
"id": 6,
|
||||
"interval": null,
|
||||
"links": [],
|
||||
"maxDataPoints": 100,
|
||||
"options": {
|
||||
"colorMode": "none",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "horizontal",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "prometheus_local_storage_memory_series{instance=~\"$node\"}",
|
||||
"intervalFactor": 2,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Local Storage Memory Series",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"mappings": [
|
||||
{
|
||||
"options": {
|
||||
"0": {
|
||||
"text": "Empty"
|
||||
}
|
||||
},
|
||||
"type": "value"
|
||||
}
|
||||
],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "rgba(50, 172, 45, 0.97)",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "rgba(237, 129, 40, 0.89)",
|
||||
"value": 500
|
||||
},
|
||||
{
|
||||
"color": "rgba(245, 54, 54, 0.9)",
|
||||
"value": 4000
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 6,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 7,
|
||||
"interval": null,
|
||||
"links": [],
|
||||
"maxDataPoints": 100,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "horizontal",
|
||||
"reduceOptions": {
|
||||
"calcs": ["lastNotNull"],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"text": {},
|
||||
"textMode": "auto"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "prometheus_local_storage_indexing_queue_length{instance=~\"$node\"}",
|
||||
"intervalFactor": 2,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Internal Storage Queue Length",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": null,
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 6,
|
||||
"x": 18,
|
||||
"y": 0
|
||||
},
|
||||
"id": 9,
|
||||
"links": [],
|
||||
"options": {
|
||||
"content": "<span style=\"font-family: 'Open Sans', 'Helvetica Neue', Helvetica; font-size: 25px;vertical-align: text-top;color: #bbbfc2;margin-left: 10px;\">Prometheus</span>\n\n<p style=\"margin-top: 10px;\">You're using Prometheus, an open-source systems monitoring and alerting toolkit originally built at SoundCloud. For more information, check out the <a href=\"https://grafana.com/\">Grafana</a> and <a href=\"http://prometheus.io/\">Prometheus</a> projects.</p>",
|
||||
"mode": "html"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"style": {},
|
||||
"transparent": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"links": [],
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "short"
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "prometheus"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "color",
|
||||
"value": {
|
||||
"fixedColor": "#C15C17",
|
||||
"mode": "fixed"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "{instance=\"localhost:9090\",job=\"prometheus\"}"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "color",
|
||||
"value": {
|
||||
"fixedColor": "#C15C17",
|
||||
"mode": "fixed"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 6,
|
||||
"w": 18,
|
||||
"x": 0,
|
||||
"y": 5
|
||||
},
|
||||
"id": 3,
|
||||
"links": [],
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(prometheus_local_storage_ingested_samples_total{instance=~\"$node\"}[5m])",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{job}}",
|
||||
"metric": "",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Samples ingested (rate-5m)",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": null,
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"gridPos": {
|
||||
"h": 6,
|
||||
"w": 4,
|
||||
"x": 18,
|
||||
"y": 5
|
||||
},
|
||||
"id": 8,
|
||||
"links": [],
|
||||
"options": {
|
||||
"content": "#### Samples Ingested\nThis graph displays the count of samples ingested by the Prometheus server, as measured over the last 5 minutes, per time series in the range vector. When troubleshooting an issue on IRC or GitHub, this is often the first stat requested by the Prometheus team. ",
|
||||
"mode": "markdown"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"style": {},
|
||||
"transparent": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"links": [],
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "short"
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "prometheus"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "color",
|
||||
"value": {
|
||||
"fixedColor": "#F9BA8F",
|
||||
"mode": "fixed"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "{instance=\"localhost:9090\",interval=\"5s\",job=\"prometheus\"}"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "color",
|
||||
"value": {
|
||||
"fixedColor": "#F9BA8F",
|
||||
"mode": "fixed"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 10,
|
||||
"x": 0,
|
||||
"y": 11
|
||||
},
|
||||
"id": 2,
|
||||
"links": [],
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(prometheus_target_interval_length_seconds_count{instance=~\"$node\"}[5m])",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{job}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Target Scrapes (last 5m)",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"links": [],
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "short"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 8,
|
||||
"x": 10,
|
||||
"y": 11
|
||||
},
|
||||
"id": 14,
|
||||
"links": [],
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "prometheus_target_interval_length_seconds{quantile!=\"0.01\", quantile!=\"0.05\",instance=~\"$node\"}",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{quantile}} ({{interval}})",
|
||||
"metric": "",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Scrape Duration",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": null,
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 6,
|
||||
"x": 18,
|
||||
"y": 11
|
||||
},
|
||||
"id": 11,
|
||||
"links": [],
|
||||
"options": {
|
||||
"content": "#### Scrapes\nPrometheus scrapes metrics from instrumented jobs, either directly or via an intermediary push gateway for short-lived jobs. Target scrapes will show how frequently targets are scraped, as measured over the last 5 minutes, per time series in the range vector. Scrape Duration will show how long the scrapes are taking, with percentiles available as series. ",
|
||||
"mode": "markdown"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"style": {},
|
||||
"transparent": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"links": [],
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "percentunit"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 18,
|
||||
"x": 0,
|
||||
"y": 18
|
||||
},
|
||||
"id": 12,
|
||||
"links": [],
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "prometheus_evaluator_duration_seconds{quantile!=\"0.01\", quantile!=\"0.05\",instance=~\"$node\"}",
|
||||
"interval": "",
|
||||
"intervalFactor": 2,
|
||||
"legendFormat": "{{quantile}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Rule Eval Duration",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": null,
|
||||
"editable": true,
|
||||
"error": false,
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 6,
|
||||
"x": 18,
|
||||
"y": 18
|
||||
},
|
||||
"id": 15,
|
||||
"links": [],
|
||||
"options": {
|
||||
"content": "#### Rule Evaluation Duration\nThis graph panel plots the duration for all evaluations to execute. The 50th percentile, 90th percentile and 99th percentile are shown as three separate series to help identify outliers that may be skewing the data.",
|
||||
"mode": "markdown"
|
||||
},
|
||||
"pluginVersion": "8.1.0-pre",
|
||||
"style": {},
|
||||
"transparent": true,
|
||||
"type": "text"
|
||||
}
|
||||
],
|
||||
"refresh": false,
|
||||
"revision": "1.0",
|
||||
"schemaVersion": 30,
|
||||
"tags": ["prometheus"],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"allValue": null,
|
||||
"current": {},
|
||||
"datasource": "${DS_GDEV-PROMETHEUS}",
|
||||
"definition": "",
|
||||
"description": null,
|
||||
"error": null,
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "HOST:",
|
||||
"multi": false,
|
||||
"name": "node",
|
||||
"options": [],
|
||||
"query": {
|
||||
"query": "label_values(prometheus_build_info, instance)",
|
||||
"refId": "gdev-prometheus-node-Variable-Query"
|
||||
},
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tagsQuery": "",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-5m",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"now": true,
|
||||
"refresh_intervals": ["5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d"]
|
||||
},
|
||||
"timezone": "browser",
|
||||
"title": "Prometheus Stats",
|
||||
"uid": "rpfmFFz7z",
|
||||
"version": 2
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useId, memo, HTMLAttributes, ReactNode } from 'react';
|
||||
import { useId, memo, HTMLAttributes, ReactNode, SVGProps } from 'react';
|
||||
|
||||
import { FieldDisplay } from '@grafana/data';
|
||||
|
||||
@@ -50,14 +50,13 @@ export const RadialArcPath = memo(
|
||||
}: RadialArcPathProps) => {
|
||||
const id = useId();
|
||||
|
||||
const bgDivStyle: HTMLAttributes<HTMLDivElement>['style'] = { width: '100%', height: '100%' };
|
||||
if ('color' in rest) {
|
||||
bgDivStyle.backgroundColor = rest.color;
|
||||
} else {
|
||||
bgDivStyle.backgroundImage = getGradientCss(rest.gradient, shape);
|
||||
}
|
||||
const isGradient = 'gradient' in rest;
|
||||
|
||||
const { radius, centerX, centerY, barWidth } = dimensions;
|
||||
const { vizWidth, vizHeight, radius, centerX, centerY, barWidth } = dimensions;
|
||||
const pad = Math.ceil(Math.max(2, barWidth / 2)); // pad to cover stroke caps and glow in Safari
|
||||
const boxX = Math.round(centerX - radius - barWidth - pad);
|
||||
const boxY = Math.round(centerY - radius - barWidth - pad);
|
||||
const boxSize = Math.round((radius + barWidth) * 2 + pad * 2);
|
||||
|
||||
const path = drawRadialArcPath(angle, arcLengthDeg, dimensions, roundedBars);
|
||||
|
||||
@@ -72,9 +71,14 @@ export const RadialArcPath = memo(
|
||||
const dotRadius =
|
||||
endpointMarker === 'point' ? Math.min((barWidth / 2) * DOT_RADIUS_FACTOR, MAX_DOT_RADIUS) : barWidth / 2;
|
||||
|
||||
const bgDivStyle: HTMLAttributes<HTMLDivElement>['style'] = { width: boxSize, height: vizHeight, marginLeft: boxX };
|
||||
|
||||
const pathProps: SVGProps<SVGPathElement> = {};
|
||||
let barEndcapColors: [string, string] | undefined;
|
||||
let endpointMarks: ReactNode = null;
|
||||
if ('gradient' in rest) {
|
||||
if (isGradient) {
|
||||
bgDivStyle.backgroundImage = getGradientCss(rest.gradient, shape);
|
||||
|
||||
if (endpointMarker && (rest.gradient?.length ?? 0) > 0) {
|
||||
switch (endpointMarker) {
|
||||
case 'point':
|
||||
@@ -115,25 +119,39 @@ export const RadialArcPath = memo(
|
||||
if (barEndcaps) {
|
||||
barEndcapColors = getBarEndcapColors(rest.gradient, fieldDisplay.display.percent);
|
||||
}
|
||||
|
||||
pathProps.fill = 'none';
|
||||
pathProps.stroke = 'white';
|
||||
} else {
|
||||
bgDivStyle.backgroundColor = rest.color;
|
||||
|
||||
pathProps.fill = 'none';
|
||||
pathProps.stroke = rest.color;
|
||||
}
|
||||
|
||||
const pathEl = (
|
||||
<path d={path} strokeWidth={barWidth} strokeLinecap={roundedBars ? 'round' : 'butt'} {...pathProps} />
|
||||
);
|
||||
|
||||
return (
|
||||
<>
|
||||
{/* FIXME: optimize this by only using clippath + foreign obj for gradients */}
|
||||
<clipPath id={id}>
|
||||
<path d={path} />
|
||||
</clipPath>
|
||||
{isGradient && (
|
||||
<defs>
|
||||
<mask id={id} maskUnits="userSpaceOnUse" maskContentUnits="userSpaceOnUse">
|
||||
<rect x={boxX} y={boxY} width={boxSize} height={boxSize} fill="black" />
|
||||
{pathEl}
|
||||
</mask>
|
||||
</defs>
|
||||
)}
|
||||
|
||||
<g filter={glowFilter}>
|
||||
<foreignObject
|
||||
x={centerX - radius - barWidth}
|
||||
y={centerY - radius - barWidth}
|
||||
width={(radius + barWidth) * 2}
|
||||
height={(radius + barWidth) * 2}
|
||||
clipPath={`url(#${id})`}
|
||||
>
|
||||
<div style={bgDivStyle} />
|
||||
</foreignObject>
|
||||
{isGradient ? (
|
||||
<foreignObject x={0} y={0} width={vizWidth} height={vizHeight} mask={`url(#${id})`}>
|
||||
<div style={bgDivStyle} />
|
||||
</foreignObject>
|
||||
) : (
|
||||
pathEl
|
||||
)}
|
||||
{barEndcapColors?.[0] && <circle cx={xStart} cy={yStart} r={barWidth / 2} fill={barEndcapColors[0]} />}
|
||||
{barEndcapColors?.[1] && (
|
||||
<circle cx={xEnd} cy={yEnd} r={barWidth / 2} fill={barEndcapColors[1]} opacity={0.5} />
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { css, cx } from '@emotion/css';
|
||||
import { useId } from 'react';
|
||||
import { useId, ReactNode } from 'react';
|
||||
|
||||
import { DisplayValueAlignmentFactors, FALLBACK_COLOR, FieldDisplay, GrafanaTheme2, TimeRange } from '@grafana/data';
|
||||
import { selectors } from '@grafana/e2e-selectors';
|
||||
@@ -107,14 +107,14 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
const startAngle = shape === 'gauge' ? 250 : 0;
|
||||
const endAngle = shape === 'gauge' ? 110 : 360;
|
||||
|
||||
const defs: React.ReactNode[] = [];
|
||||
const graphics: React.ReactNode[] = [];
|
||||
let sparklineElement: React.ReactNode | null = null;
|
||||
const defs: ReactNode[] = [];
|
||||
const graphics: ReactNode[] = [];
|
||||
let sparklineElement: ReactNode | null = null;
|
||||
|
||||
for (let barIndex = 0; barIndex < values.length; barIndex++) {
|
||||
const displayValue = values[barIndex];
|
||||
const { angle, angleRange } = getValueAngleForValue(displayValue, startAngle, endAngle);
|
||||
const gradientStops = buildGradientColors(gradient, theme, displayValue);
|
||||
const gradientStops = gradient ? buildGradientColors(theme, displayValue) : undefined;
|
||||
const color = displayValue.display.color ?? FALLBACK_COLOR;
|
||||
const dimensions = calculateDimensions(
|
||||
width,
|
||||
@@ -131,7 +131,9 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
// FIXME: I want to move the ids for these filters into a context which the children
|
||||
// can reference via a hook, rather than passing them down as props
|
||||
const spotlightGradientId = `spotlight-${barIndex}-${gaugeId}`;
|
||||
const spotlightGradientRef = endpointMarker === 'glow' ? `url(#${spotlightGradientId})` : undefined;
|
||||
const glowFilterId = `glow-${gaugeId}`;
|
||||
const glowFilterRef = glowBar ? `url(#${glowFilterId})` : undefined;
|
||||
|
||||
if (endpointMarker === 'glow') {
|
||||
defs.push(
|
||||
@@ -154,7 +156,7 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
fieldDisplay={displayValue}
|
||||
angleRange={angleRange}
|
||||
startAngle={startAngle}
|
||||
glowFilter={`url(#${glowFilterId})`}
|
||||
glowFilter={glowFilterRef}
|
||||
segmentCount={segmentCount}
|
||||
segmentSpacing={segmentSpacing}
|
||||
shape={shape}
|
||||
@@ -170,8 +172,8 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
angleRange={angleRange}
|
||||
startAngle={startAngle}
|
||||
roundedBars={roundedBars}
|
||||
glowFilter={`url(#${glowFilterId})`}
|
||||
endpointMarkerGlowFilter={`url(#${spotlightGradientId})`}
|
||||
glowFilter={glowFilterRef}
|
||||
endpointMarkerGlowFilter={spotlightGradientRef}
|
||||
shape={shape}
|
||||
gradient={gradientStops}
|
||||
fieldDisplay={displayValue}
|
||||
@@ -183,7 +185,7 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
// These elements are only added for first value / bar
|
||||
if (barIndex === 0) {
|
||||
if (glowBar) {
|
||||
defs.push(<GlowGradient key="glow-filter" id={glowFilterId} barWidth={dimensions.barWidth} />);
|
||||
defs.push(<GlowGradient key={glowFilterId} id={glowFilterId} barWidth={dimensions.barWidth} />);
|
||||
}
|
||||
|
||||
if (glowCenter) {
|
||||
@@ -234,7 +236,7 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
endAngle={endAngle}
|
||||
angleRange={angleRange}
|
||||
roundedBars={roundedBars}
|
||||
glowFilter={`url(#${glowFilterId})`}
|
||||
glowFilter={glowFilterRef}
|
||||
shape={shape}
|
||||
gradient={gradientStops}
|
||||
/>
|
||||
@@ -260,7 +262,7 @@ export function RadialGauge(props: RadialGaugeProps) {
|
||||
const body = (
|
||||
<>
|
||||
<svg width={width} height={height} role="img" aria-label={t('gauge.category-gauge', 'Gauge')}>
|
||||
<defs>{defs}</defs>
|
||||
{defs.length > 0 && <defs>{defs}</defs>}
|
||||
{graphics}
|
||||
</svg>
|
||||
{sparklineElement}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { css } from '@emotion/css';
|
||||
import { memo } from 'react';
|
||||
|
||||
import {
|
||||
@@ -9,7 +8,6 @@ import {
|
||||
GrafanaTheme2,
|
||||
} from '@grafana/data';
|
||||
|
||||
import { useStyles2 } from '../../themes/ThemeContext';
|
||||
import { calculateFontSize } from '../../utils/measureText';
|
||||
|
||||
import { RadialShape, RadialTextMode, RadialGaugeDimensions } from './types';
|
||||
@@ -50,7 +48,6 @@ export const RadialText = memo(
|
||||
valueManualFontSize,
|
||||
nameManualFontSize,
|
||||
}: RadialTextProps) => {
|
||||
const styles = useStyles2(getStyles);
|
||||
const { centerX, centerY, radius, barWidth } = dimensions;
|
||||
|
||||
if (textMode === 'none') {
|
||||
@@ -106,10 +103,9 @@ export const RadialText = memo(
|
||||
const valueY = showName ? centerY - nameHeight * (1 - VALUE_SPACE_PERCENTAGE) : centerY;
|
||||
const nameY = showValue ? valueY + valueHeight * VALUE_SPACE_PERCENTAGE : centerY;
|
||||
const nameColor = showValue ? theme.colors.text.secondary : theme.colors.text.primary;
|
||||
const suffixShift = (valueFontSize - unitFontSize * LINE_HEIGHT_FACTOR) / 2;
|
||||
|
||||
// adjust the text up on gauges and when sparklines are present
|
||||
let yOffset = 0;
|
||||
let yOffset = valueFontSize / 4;
|
||||
if (shape === 'gauge') {
|
||||
// we render from the center of the gauge, so move up by half of half of the total height
|
||||
yOffset -= (valueHeight + nameHeight) / 4;
|
||||
@@ -126,15 +122,12 @@ export const RadialText = memo(
|
||||
y={valueY}
|
||||
fontSize={valueFontSize}
|
||||
fill={theme.colors.text.primary}
|
||||
className={styles.text}
|
||||
textAnchor="middle"
|
||||
dominantBaseline="middle"
|
||||
dominantBaseline="text-bottom"
|
||||
>
|
||||
<tspan fontSize={unitFontSize}>{displayValue.prefix ?? ''}</tspan>
|
||||
<tspan>{displayValue.text}</tspan>
|
||||
<tspan className={styles.text} fontSize={unitFontSize} dy={suffixShift}>
|
||||
{displayValue.suffix ?? ''}
|
||||
</tspan>
|
||||
<tspan fontSize={unitFontSize}>{displayValue.suffix ?? ''}</tspan>
|
||||
</text>
|
||||
)}
|
||||
{showName && (
|
||||
@@ -143,7 +136,7 @@ export const RadialText = memo(
|
||||
x={centerX}
|
||||
y={nameY}
|
||||
textAnchor="middle"
|
||||
dominantBaseline="middle"
|
||||
dominantBaseline="text-bottom"
|
||||
fill={nameColor}
|
||||
>
|
||||
{displayValue.title}
|
||||
@@ -155,9 +148,3 @@ export const RadialText = memo(
|
||||
);
|
||||
|
||||
RadialText.displayName = 'RadialText';
|
||||
|
||||
const getStyles = (_theme: GrafanaTheme2) => ({
|
||||
text: css({
|
||||
verticalAlign: 'bottom',
|
||||
}),
|
||||
});
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for center x and y 1`] = `"M 150 110 A 90 90 0 1 1 149.98429203681178 110.00000137077838 A 10 10 0 0 1 149.98778269529805 130.00000106616096 A 70 70 0 1 0 150 130 A 10 10 0 0 1 150 110 Z"`;
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for center x and y 1`] = `"M 150 120 A 80 80 0 1 1 149.98603736605492 120.00000121846968"`;
|
||||
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for half arc 1`] = `"M 100 10 A 90 90 0 0 1 100 190 L 100 170 A 70 70 0 0 0 100 30 L 100 10 Z"`;
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for half arc 1`] = `"M 100 20 A 80 80 0 0 1 100 180"`;
|
||||
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for narrow bar width 1`] = `"M 100 17.5 A 82.5 82.5 0 0 1 100 182.5 L 100 177.5 A 77.5 77.5 0 0 0 100 22.5 L 100 17.5 Z"`;
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for narrow bar width 1`] = `"M 100 20 A 80 80 0 0 1 100 180"`;
|
||||
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for narrow radius 1`] = `"M 100 40 A 60 60 0 0 1 100 160 L 100 140 A 40 40 0 0 0 100 60 L 100 40 Z"`;
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for narrow radius 1`] = `"M 100 50 A 50 50 0 0 1 100 150"`;
|
||||
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for quarter arc 1`] = `"M 100 10 A 90 90 0 0 1 190 100 L 170 100 A 70 70 0 0 0 100 30 L 100 10 Z"`;
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for quarter arc 1`] = `"M 100 20 A 80 80 0 0 1 180 100"`;
|
||||
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for rounded bars 1`] = `"M 100 10 A 90 90 0 1 1 10 100.00000000000001 A 10 10 0 0 1 30 100.00000000000001 A 70 70 0 1 0 100 30 A 10 10 0 0 1 100 10 Z"`;
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for rounded bars 1`] = `"M 100 20 A 80 80 0 1 1 20 100.00000000000001"`;
|
||||
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for three quarter arc 1`] = `"M 100 10 A 90 90 0 1 1 10 100.00000000000001 L 30 100.00000000000001 A 70 70 0 1 0 100 30 L 100 10 Z"`;
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for three quarter arc 1`] = `"M 100 20 A 80 80 0 1 1 20 100.00000000000001"`;
|
||||
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for wide bar width 1`] = `"M 100 -5 A 105 105 0 0 1 100 205 L 100 155 A 55 55 0 0 0 100 45 L 100 -5 Z"`;
|
||||
exports[`RadialGauge utils drawRadialArcPath should draw correct path for wide bar width 1`] = `"M 100 20 A 80 80 0 0 1 100 180"`;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { defaultsDeep } from 'lodash';
|
||||
|
||||
import { createTheme, FALLBACK_COLOR, Field, FieldDisplay, FieldType, ThresholdsMode } from '@grafana/data';
|
||||
import { createTheme, Field, FieldDisplay, FieldType, ThresholdsMode } from '@grafana/data';
|
||||
import { FieldColorModeId } from '@grafana/schema';
|
||||
|
||||
import {
|
||||
@@ -50,35 +50,9 @@ describe('RadialGauge color utils', () => {
|
||||
},
|
||||
});
|
||||
|
||||
it('should return the baseColor if gradient is false-y', () => {
|
||||
expect(
|
||||
buildGradientColors(false, createTheme(), buildFieldDisplay(createField(FieldColorModeId.Fixed)), '#FF0000')
|
||||
).toEqual([
|
||||
{ color: '#FF0000', percent: 0 },
|
||||
{ color: '#FF0000', percent: 1 },
|
||||
]);
|
||||
|
||||
expect(
|
||||
buildGradientColors(undefined, createTheme(), buildFieldDisplay(createField(FieldColorModeId.Fixed)), '#FF0000')
|
||||
).toEqual([
|
||||
{ color: '#FF0000', percent: 0 },
|
||||
{ color: '#FF0000', percent: 1 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('uses the fallback color if no baseColor is set', () => {
|
||||
expect(buildGradientColors(false, createTheme(), buildFieldDisplay(createField(FieldColorModeId.Fixed)))).toEqual(
|
||||
[
|
||||
{ color: FALLBACK_COLOR, percent: 0 },
|
||||
{ color: FALLBACK_COLOR, percent: 1 },
|
||||
]
|
||||
);
|
||||
});
|
||||
|
||||
it('should map threshold colors correctly (with baseColor if displayProcessor does not return colors)', () => {
|
||||
expect(
|
||||
buildGradientColors(
|
||||
true,
|
||||
createTheme(),
|
||||
buildFieldDisplay(createField(FieldColorModeId.Thresholds), {
|
||||
view: { getFieldDisplayProcessor: jest.fn(() => jest.fn(() => ({ color: '#444444' }))) },
|
||||
@@ -89,14 +63,13 @@ describe('RadialGauge color utils', () => {
|
||||
|
||||
it('should map threshold colors correctly (with baseColor if displayProcessor does not return colors)', () => {
|
||||
expect(
|
||||
buildGradientColors(true, createTheme(), buildFieldDisplay(createField(FieldColorModeId.Thresholds)), '#FF0000')
|
||||
buildGradientColors(createTheme(), buildFieldDisplay(createField(FieldColorModeId.Thresholds)), '#FF0000')
|
||||
).toMatchSnapshot();
|
||||
});
|
||||
|
||||
it('should return gradient colors for continuous color modes', () => {
|
||||
expect(
|
||||
buildGradientColors(
|
||||
true,
|
||||
createTheme(),
|
||||
buildFieldDisplay(createField(FieldColorModeId.ContinuousCividis)),
|
||||
'#00FF00'
|
||||
@@ -107,7 +80,6 @@ describe('RadialGauge color utils', () => {
|
||||
it.each(['dark', 'light'] as const)('should return gradient colors for by-value color mode in %s theme', (mode) => {
|
||||
expect(
|
||||
buildGradientColors(
|
||||
true,
|
||||
createTheme({ colors: { mode } }),
|
||||
buildFieldDisplay(createField(FieldColorModeId.ContinuousBlues))
|
||||
)
|
||||
@@ -117,7 +89,6 @@ describe('RadialGauge color utils', () => {
|
||||
it.each(['dark', 'light'] as const)('should return gradient colors for fixed color mode in %s theme', (mode) => {
|
||||
expect(
|
||||
buildGradientColors(
|
||||
true,
|
||||
createTheme({ colors: { mode } }),
|
||||
buildFieldDisplay(createField(FieldColorModeId.Fixed)),
|
||||
'#442299'
|
||||
|
||||
@@ -7,18 +7,10 @@ import { GradientStop, RadialShape } from './types';
|
||||
import { getFieldConfigMinMax, getFieldDisplayProcessor, getValuePercentageForValue } from './utils';
|
||||
|
||||
export function buildGradientColors(
|
||||
gradient = false,
|
||||
theme: GrafanaTheme2,
|
||||
fieldDisplay: FieldDisplay,
|
||||
baseColor = fieldDisplay.display.color ?? FALLBACK_COLOR
|
||||
): GradientStop[] {
|
||||
if (!gradient) {
|
||||
return [
|
||||
{ color: baseColor, percent: 0 },
|
||||
{ color: baseColor, percent: 1 },
|
||||
];
|
||||
}
|
||||
|
||||
const colorMode = getFieldColorMode(fieldDisplay.field.color?.mode);
|
||||
|
||||
// thresholds get special handling
|
||||
|
||||
@@ -2,14 +2,20 @@ import { colorManipulator, GrafanaTheme2 } from '@grafana/data';
|
||||
|
||||
import { RadialGaugeDimensions } from './types';
|
||||
|
||||
// some utility transparent white colors for gradients
|
||||
const TRANSPARENT_WHITE = '#ffffff00';
|
||||
const MOSTLY_TRANSPARENT_WHITE = '#ffffff88';
|
||||
const MOSTLY_OPAQUE_WHITE = '#ffffffbb';
|
||||
const OPAQUE_WHITE = '#ffffff';
|
||||
|
||||
const MIN_GLOW_SIZE = 0.75;
|
||||
const GLOW_FACTOR = 0.08;
|
||||
|
||||
export interface GlowGradientProps {
|
||||
id: string;
|
||||
barWidth: number;
|
||||
}
|
||||
|
||||
const MIN_GLOW_SIZE = 0.75;
|
||||
const GLOW_FACTOR = 0.08;
|
||||
|
||||
export function GlowGradient({ id, barWidth }: GlowGradientProps) {
|
||||
// 0.75 is the minimum glow size, and it scales with bar width
|
||||
const glowSize = MIN_GLOW_SIZE + barWidth * GLOW_FACTOR;
|
||||
@@ -27,16 +33,6 @@ export function GlowGradient({ id, barWidth }: GlowGradientProps) {
|
||||
|
||||
const CENTER_GLOW_OPACITY = 0.25;
|
||||
|
||||
export function CenterGlowGradient({ gaugeId, color }: { gaugeId: string; color: string }) {
|
||||
const transparentColor = colorManipulator.alpha(color, CENTER_GLOW_OPACITY);
|
||||
return (
|
||||
<radialGradient id={`circle-glow-${gaugeId}`} r="50%" fr="0%">
|
||||
<stop offset="0%" stopColor={transparentColor} />
|
||||
<stop offset="90%" stopColor={'#ffffff00'} />
|
||||
</radialGradient>
|
||||
);
|
||||
}
|
||||
|
||||
export interface CenterGlowProps {
|
||||
dimensions: RadialGaugeDimensions;
|
||||
gaugeId: string;
|
||||
@@ -52,7 +48,7 @@ export function MiddleCircleGlow({ dimensions, gaugeId, color }: CenterGlowProps
|
||||
<defs>
|
||||
<radialGradient id={gradientId} r="50%" fr="0%">
|
||||
<stop offset="0%" stopColor={transparentColor} />
|
||||
<stop offset="90%" stopColor="#ffffff00" />
|
||||
<stop offset="90%" stopColor={TRANSPARENT_WHITE} />
|
||||
</radialGradient>
|
||||
</defs>
|
||||
<g>
|
||||
@@ -62,19 +58,15 @@ export function MiddleCircleGlow({ dimensions, gaugeId, color }: CenterGlowProps
|
||||
);
|
||||
}
|
||||
|
||||
export function SpotlightGradient({
|
||||
id,
|
||||
dimensions,
|
||||
roundedBars,
|
||||
angle,
|
||||
theme,
|
||||
}: {
|
||||
interface SpotlightGradientProps {
|
||||
id: string;
|
||||
dimensions: RadialGaugeDimensions;
|
||||
angle: number;
|
||||
roundedBars: boolean;
|
||||
theme: GrafanaTheme2;
|
||||
}) {
|
||||
}
|
||||
|
||||
export function SpotlightGradient({ id, dimensions, roundedBars, angle, theme }: SpotlightGradientProps) {
|
||||
if (theme.isLight) {
|
||||
return null;
|
||||
}
|
||||
@@ -88,9 +80,9 @@ export function SpotlightGradient({
|
||||
|
||||
return (
|
||||
<linearGradient x1={x1} y1={y1} x2={x2} y2={y2} id={id} gradientUnits="userSpaceOnUse">
|
||||
<stop offset="0%" stopColor="#ffffff00" />
|
||||
<stop offset="95%" stopColor="#ffffff88" />
|
||||
{roundedBars && <stop offset="100%" stopColor={roundedBars ? '#ffffffbb' : 'white'} />}
|
||||
<stop offset="0%" stopColor={TRANSPARENT_WHITE} />
|
||||
<stop offset="95%" stopColor={MOSTLY_TRANSPARENT_WHITE} />
|
||||
{roundedBars && <stop offset="100%" stopColor={roundedBars ? MOSTLY_OPAQUE_WHITE : OPAQUE_WHITE} />}
|
||||
</linearGradient>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ export type RadialTextMode = 'auto' | 'value_and_name' | 'value' | 'name' | 'non
|
||||
export type RadialShape = 'circle' | 'gauge';
|
||||
|
||||
export interface RadialGaugeDimensions {
|
||||
vizHeight: number;
|
||||
vizWidth: number;
|
||||
margin: number;
|
||||
radius: number;
|
||||
centerX: number;
|
||||
|
||||
@@ -283,7 +283,9 @@ describe('RadialGauge utils', () => {
|
||||
});
|
||||
|
||||
describe('drawRadialArcPath', () => {
|
||||
const defaultDims: RadialGaugeDimensions = Object.freeze({
|
||||
const defaultDims = Object.freeze({
|
||||
vizHeight: 220,
|
||||
vizWidth: 220,
|
||||
centerX: 100,
|
||||
centerY: 100,
|
||||
radius: 80,
|
||||
@@ -297,7 +299,7 @@ describe('RadialGauge utils', () => {
|
||||
scaleLabelsSpacing: 0,
|
||||
scaleLabelsRadius: 0,
|
||||
gaugeBottomY: 0,
|
||||
});
|
||||
}) satisfies RadialGaugeDimensions;
|
||||
|
||||
it.each([
|
||||
{ description: 'quarter arc', startAngle: 0, endAngle: 90 },
|
||||
@@ -324,11 +326,6 @@ describe('RadialGauge utils', () => {
|
||||
expect(drawRadialArcPath(0, 360, defaultDims)).toEqual(drawRadialArcPath(0, 359.99, defaultDims));
|
||||
expect(drawRadialArcPath(0, 380, defaultDims)).toEqual(drawRadialArcPath(0, 380, defaultDims));
|
||||
});
|
||||
|
||||
it('should return empty string if inner radius collapses to zero or below', () => {
|
||||
const smallRadiusDims = { ...defaultDims, radius: 5, barWidth: 20 };
|
||||
expect(drawRadialArcPath(0, 180, smallRadiusDims)).toBe('');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -341,7 +338,9 @@ describe('RadialGauge utils', () => {
|
||||
|
||||
describe('getOptimalSegmentCount', () => {
|
||||
it('should adjust segment count based on dimensions and spacing', () => {
|
||||
const dimensions: RadialGaugeDimensions = {
|
||||
const dimensions = {
|
||||
vizHeight: 220,
|
||||
vizWidth: 220,
|
||||
centerX: 100,
|
||||
centerY: 100,
|
||||
radius: 80,
|
||||
@@ -355,7 +354,7 @@ describe('RadialGauge utils', () => {
|
||||
scaleLabelsSpacing: 0,
|
||||
scaleLabelsRadius: 0,
|
||||
gaugeBottomY: 0,
|
||||
};
|
||||
} satisfies RadialGaugeDimensions;
|
||||
|
||||
expect(getOptimalSegmentCount(dimensions, 2, 10, 360)).toBe(8);
|
||||
expect(getOptimalSegmentCount(dimensions, 1, 5, 360)).toBe(5);
|
||||
|
||||
@@ -155,6 +155,8 @@ export function calculateDimensions(
|
||||
}
|
||||
|
||||
return {
|
||||
vizWidth: width,
|
||||
vizHeight: height,
|
||||
margin,
|
||||
gaugeBottomY: centerY + belowCenterY,
|
||||
radius: innerRadius,
|
||||
@@ -185,7 +187,7 @@ export function drawRadialArcPath(
|
||||
dimensions: RadialGaugeDimensions,
|
||||
roundedBars?: boolean
|
||||
): string {
|
||||
const { radius, centerX, centerY, barWidth } = dimensions;
|
||||
const { radius, centerX, centerY } = dimensions;
|
||||
|
||||
// For some reason a 100% full arc cannot be rendered
|
||||
if (endAngle >= 360) {
|
||||
@@ -197,66 +199,12 @@ export function drawRadialArcPath(
|
||||
|
||||
const largeArc = endAngle > 180 ? 1 : 0;
|
||||
|
||||
const outerR = radius + barWidth / 2;
|
||||
const innerR = Math.max(0, radius - barWidth / 2);
|
||||
if (innerR <= 0) {
|
||||
return ''; // cannot draw arc with 0 inner radius
|
||||
}
|
||||
let x1 = centerX + radius * Math.cos(startRadians);
|
||||
let y1 = centerY + radius * Math.sin(startRadians);
|
||||
let x2 = centerX + radius * Math.cos(endRadians);
|
||||
let y2 = centerY + radius * Math.sin(endRadians);
|
||||
|
||||
// get points for both an inner and outer arc. we draw
|
||||
// the arc entirely with a path's fill instead of using stroke
|
||||
// so that it can be used as a clip-path.
|
||||
const ox1 = centerX + outerR * Math.cos(startRadians);
|
||||
const oy1 = centerY + outerR * Math.sin(startRadians);
|
||||
const ox2 = centerX + outerR * Math.cos(endRadians);
|
||||
const oy2 = centerY + outerR * Math.sin(endRadians);
|
||||
|
||||
const ix1 = centerX + innerR * Math.cos(startRadians);
|
||||
const iy1 = centerY + innerR * Math.sin(startRadians);
|
||||
const ix2 = centerX + innerR * Math.cos(endRadians);
|
||||
const iy2 = centerY + innerR * Math.sin(endRadians);
|
||||
|
||||
// calculate the cap width in case we're drawing rounded bars
|
||||
const capR = barWidth / 2;
|
||||
|
||||
const pathParts = [
|
||||
// start at outer start
|
||||
'M',
|
||||
ox1,
|
||||
oy1,
|
||||
// outer arc from start to end (clockwise)
|
||||
'A',
|
||||
outerR,
|
||||
outerR,
|
||||
0,
|
||||
largeArc,
|
||||
1,
|
||||
ox2,
|
||||
oy2,
|
||||
];
|
||||
|
||||
if (roundedBars) {
|
||||
// rounded end cap: small arc connecting outer end to inner end
|
||||
pathParts.push('A', capR, capR, 0, 0, 1, ix2, iy2);
|
||||
} else {
|
||||
// straight line to inner end (square butt)
|
||||
pathParts.push('L', ix2, iy2);
|
||||
}
|
||||
|
||||
// inner arc from end back to start (counter-clockwise)
|
||||
pathParts.push('A', innerR, innerR, 0, largeArc, 0, ix1, iy1);
|
||||
|
||||
if (roundedBars) {
|
||||
// rounded start cap: small arc connecting inner start back to outer start
|
||||
pathParts.push('A', capR, capR, 0, 0, 1, ox1, oy1);
|
||||
} else {
|
||||
// straight line back to outer start (square butt)
|
||||
pathParts.push('L', ox1, oy1);
|
||||
}
|
||||
|
||||
pathParts.push('Z');
|
||||
|
||||
return pathParts.join(' ');
|
||||
return ['M', x1, y1, 'A', radius, radius, 0, largeArc, 1, x2, y2].join(' ');
|
||||
}
|
||||
|
||||
export function getAngleBetweenSegments(segmentSpacing: number, segmentCount: number, range: number) {
|
||||
|
||||
@@ -1108,12 +1108,18 @@ export function parseStyleJson(rawValue: unknown): CSSProperties | void {
|
||||
}
|
||||
}
|
||||
|
||||
// Safari 26 introduced rendering bugs which require us to disable several features of the table.
|
||||
// Safari 26.0 introduced rendering bugs which require us to disable several features of the table.
|
||||
// The bugs were later fixed in Safari 26.2.
|
||||
export const IS_SAFARI_26 = (() => {
|
||||
if (navigator == null) {
|
||||
return false;
|
||||
}
|
||||
const userAgent = navigator.userAgent;
|
||||
const safariVersionMatch = userAgent.match(/Version\/(\d+)\./);
|
||||
return safariVersionMatch && parseInt(safariVersionMatch[1], 10) === 26;
|
||||
const safariVersionMatch = userAgent.match(/Version\/(\d+)\.(\d+)/);
|
||||
if (!safariVersionMatch) {
|
||||
return false;
|
||||
}
|
||||
const majorVersion = +safariVersionMatch[1];
|
||||
const minorVersion = +safariVersionMatch[2];
|
||||
return majorVersion === 26 && minorVersion <= 1;
|
||||
})();
|
||||
|
||||
@@ -66,6 +66,6 @@ export interface UserView {
|
||||
avatarUrl?: string;
|
||||
};
|
||||
/** Datetime string when the user was last active */
|
||||
lastActiveAt: DateTimeInput;
|
||||
lastActiveAt?: DateTimeInput;
|
||||
}
|
||||
```
|
||||
|
||||
@@ -10,7 +10,7 @@ import { Tooltip } from '../Tooltip/Tooltip';
|
||||
import { UserView } from './types';
|
||||
|
||||
export interface UserIconProps {
|
||||
/** An object that contains the user's details and 'lastActiveAt' status */
|
||||
/** An object that contains the user's details and an optional 'lastActiveAt' status */
|
||||
userView: UserView;
|
||||
/** A boolean value that determines whether the tooltip should be shown or not */
|
||||
showTooltip?: boolean;
|
||||
@@ -64,7 +64,8 @@ export const UserIcon = ({
|
||||
showTooltip = true,
|
||||
}: PropsWithChildren<UserIconProps>) => {
|
||||
const { user, lastActiveAt } = userView;
|
||||
const isActive = dateTime(lastActiveAt).diff(dateTime(), 'minutes', true) >= -15;
|
||||
const hasActive = lastActiveAt !== undefined && lastActiveAt !== null;
|
||||
const isActive = hasActive && dateTime(lastActiveAt).diff(dateTime(), 'minutes', true) >= -15;
|
||||
const theme = useTheme2();
|
||||
const styles = useMemo(() => getStyles(theme, isActive), [theme, isActive]);
|
||||
const content = (
|
||||
@@ -88,18 +89,20 @@ export const UserIcon = ({
|
||||
const tooltip = (
|
||||
<div className={styles.tooltipContainer}>
|
||||
<div className={styles.tooltipName}>{user.name}</div>
|
||||
<div className={styles.tooltipDate}>
|
||||
{isActive ? (
|
||||
<div className={styles.dotContainer}>
|
||||
<span>
|
||||
<Trans i18nKey="grafana-ui.user-icon.active-text">Active last 15m</Trans>
|
||||
</span>
|
||||
<span className={styles.dot}></span>
|
||||
</div>
|
||||
) : (
|
||||
formatViewed(lastActiveAt)
|
||||
)}
|
||||
</div>
|
||||
{hasActive && (
|
||||
<div className={styles.tooltipDate}>
|
||||
{isActive ? (
|
||||
<div className={styles.dotContainer}>
|
||||
<span>
|
||||
<Trans i18nKey="grafana-ui.user-icon.active-text">Active last 15m</Trans>
|
||||
</span>
|
||||
<span className={styles.dot}></span>
|
||||
</div>
|
||||
) : (
|
||||
formatViewed(lastActiveAt)
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
|
||||
|
||||
@@ -60,6 +60,6 @@ export interface UserView {
|
||||
avatarUrl?: string;
|
||||
};
|
||||
/** Datetime string when the user was last active */
|
||||
lastActiveAt: DateTimeInput;
|
||||
lastActiveAt?: DateTimeInput;
|
||||
}
|
||||
```
|
||||
|
||||
@@ -9,7 +9,7 @@ import { UserIcon } from './UserIcon';
|
||||
import { UserView } from './types';
|
||||
|
||||
export interface UsersIndicatorProps {
|
||||
/** An object that contains the user's details and 'lastActiveAt' status */
|
||||
/** An object that contains the user's details and an optional 'lastActiveAt' status */
|
||||
users: UserView[];
|
||||
/** A limit of how many user icons to show before collapsing them and showing a number of users instead */
|
||||
limit?: number;
|
||||
@@ -40,7 +40,7 @@ export const UsersIndicator = ({ users, onClick, limit = 4 }: UsersIndicatorProp
|
||||
aria-label={t('grafana-ui.users-indicator.container-label', 'Users indicator container')}
|
||||
>
|
||||
{limitReached && (
|
||||
<UserIcon onClick={onClick} userView={{ user: { name: 'Extra users' }, lastActiveAt: '' }} showTooltip={false}>
|
||||
<UserIcon onClick={onClick} userView={{ user: { name: 'Extra users' } }} showTooltip={false}>
|
||||
{tooManyUsers
|
||||
? // eslint-disable-next-line @grafana/i18n/no-untranslated-strings
|
||||
'...'
|
||||
|
||||
@@ -8,5 +8,5 @@ export interface UserView {
|
||||
avatarUrl?: string;
|
||||
};
|
||||
/** Datetime string when the user was last active */
|
||||
lastActiveAt: DateTimeInput;
|
||||
lastActiveAt?: DateTimeInput;
|
||||
}
|
||||
|
||||
@@ -46,6 +46,7 @@ import (
|
||||
_ "sigs.k8s.io/randfill"
|
||||
_ "xorm.io/builder"
|
||||
|
||||
_ "github.com/aws/aws-sdk-go-v2/service/secretsmanager"
|
||||
_ "github.com/grafana/authlib/authn"
|
||||
_ "github.com/grafana/authlib/authz"
|
||||
_ "github.com/grafana/authlib/cache"
|
||||
|
||||
@@ -2,54 +2,84 @@ package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/grafana/grafana/pkg/infra/log"
|
||||
"log/slog"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// loggerFactory is a function that creates a Logger given a name.
|
||||
// It can be set by calling SetLoggerFactory to use a custom logger implementation.
|
||||
var loggerFactory func(name string) Logger
|
||||
|
||||
// SetLoggerFactory sets the factory function used to create loggers.
|
||||
// This should be called during initialization to register a custom logger implementation.
|
||||
// If not set, a default slog-based logger will be used.
|
||||
func SetLoggerFactory(factory func(name string) Logger) {
|
||||
loggerFactory = factory
|
||||
}
|
||||
|
||||
var slogLogManager = &slogLoggerManager{
|
||||
cache: sync.Map{},
|
||||
}
|
||||
|
||||
func New(name string) Logger {
|
||||
return &grafanaInfraLogWrapper{
|
||||
l: log.New(name),
|
||||
if loggerFactory != nil {
|
||||
return loggerFactory(name)
|
||||
}
|
||||
// add a caching layer since slog doesn't perform any caching itself
|
||||
return slogLogManager.getOrCreate(name)
|
||||
}
|
||||
|
||||
type grafanaInfraLogWrapper struct {
|
||||
l *log.ConcreteLogger
|
||||
type slogLoggerManager struct {
|
||||
cache sync.Map
|
||||
}
|
||||
|
||||
func (d *grafanaInfraLogWrapper) New(ctx ...any) Logger {
|
||||
func (m *slogLoggerManager) getOrCreate(name string) Logger {
|
||||
if cached, ok := m.cache.Load(name); ok {
|
||||
return cached.(*slogLogger)
|
||||
}
|
||||
|
||||
logger := &slogLogger{
|
||||
logger: slog.Default().With("logger", name),
|
||||
name: name,
|
||||
}
|
||||
actual, _ := m.cache.LoadOrStore(name, logger)
|
||||
return actual.(*slogLogger)
|
||||
}
|
||||
|
||||
type slogLogger struct {
|
||||
logger *slog.Logger
|
||||
name string
|
||||
}
|
||||
|
||||
func (l *slogLogger) New(ctx ...any) Logger {
|
||||
if len(ctx) == 0 {
|
||||
return &grafanaInfraLogWrapper{
|
||||
l: d.l.New(),
|
||||
return &slogLogger{
|
||||
logger: l.logger,
|
||||
name: l.name,
|
||||
}
|
||||
}
|
||||
|
||||
return &grafanaInfraLogWrapper{
|
||||
l: d.l.New(ctx...),
|
||||
return &slogLogger{
|
||||
logger: l.logger.With(ctx...),
|
||||
name: l.name,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *grafanaInfraLogWrapper) Debug(msg string, ctx ...any) {
|
||||
d.l.Debug(msg, ctx...)
|
||||
func (l *slogLogger) Debug(msg string, ctx ...any) {
|
||||
l.logger.Debug(msg, ctx...)
|
||||
}
|
||||
|
||||
func (d *grafanaInfraLogWrapper) Info(msg string, ctx ...any) {
|
||||
d.l.Info(msg, ctx...)
|
||||
func (l *slogLogger) Info(msg string, ctx ...any) {
|
||||
l.logger.Info(msg, ctx...)
|
||||
}
|
||||
|
||||
func (d *grafanaInfraLogWrapper) Warn(msg string, ctx ...any) {
|
||||
d.l.Warn(msg, ctx...)
|
||||
func (l *slogLogger) Warn(msg string, ctx ...any) {
|
||||
l.logger.Warn(msg, ctx...)
|
||||
}
|
||||
|
||||
func (d *grafanaInfraLogWrapper) Error(msg string, ctx ...any) {
|
||||
d.l.Error(msg, ctx...)
|
||||
func (l *slogLogger) Error(msg string, ctx ...any) {
|
||||
l.logger.Error(msg, ctx...)
|
||||
}
|
||||
|
||||
func (d *grafanaInfraLogWrapper) FromContext(ctx context.Context) Logger {
|
||||
concreteInfraLogger, ok := d.l.FromContext(ctx).(*log.ConcreteLogger)
|
||||
if !ok {
|
||||
return d.New()
|
||||
}
|
||||
return &grafanaInfraLogWrapper{
|
||||
l: concreteInfraLogger,
|
||||
}
|
||||
func (l *slogLogger) FromContext(_ context.Context) Logger {
|
||||
return l
|
||||
}
|
||||
|
||||
@@ -170,6 +170,13 @@ func (s *DashboardStarsStorage) write(ctx context.Context, obj *collections.Star
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Send an error if we try to save a non-dashboard star
|
||||
for _, res := range obj.Spec.Resource {
|
||||
if res.Group != "dashboard.grafana.app" || res.Kind != "Dashboard" {
|
||||
return nil, fmt.Errorf("only dashboard stars are supported until the migration to unified storage is complete")
|
||||
}
|
||||
}
|
||||
|
||||
user, err := s.users.GetByUID(ctx, &user.GetUserByUIDQuery{
|
||||
UID: owner.Identifier,
|
||||
})
|
||||
|
||||
@@ -276,7 +276,7 @@ func (b *APIBuilder) oneFlagHandler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if b.providerType == setting.GOFFProviderType {
|
||||
if b.providerType == setting.GOFFProviderType || b.providerType == setting.OFREPProviderType {
|
||||
b.proxyFlagReq(ctx, flagKey, isAuthedReq, w, r)
|
||||
return
|
||||
}
|
||||
@@ -304,7 +304,7 @@ func (b *APIBuilder) allFlagsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
isAuthedReq := b.isAuthenticatedRequest(r)
|
||||
span.SetAttributes(attribute.Bool("authenticated", isAuthedReq))
|
||||
|
||||
if b.providerType == setting.GOFFProviderType {
|
||||
if b.providerType == setting.GOFFProviderType || b.providerType == setting.OFREPProviderType {
|
||||
b.proxyAllFlagReq(ctx, isAuthedReq, w, r)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -9,6 +9,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
const (
|
||||
// This constant can be used as a key in resource tags
|
||||
GrafanaSecretsManagerName = "grafana-secrets-manager"
|
||||
)
|
||||
|
||||
var (
|
||||
// The name used to refer to the system keeper
|
||||
SystemKeeperName = "system"
|
||||
@@ -102,8 +107,8 @@ func (s ExternalID) String() string {
|
||||
// Keeper is the interface for secret keepers.
|
||||
type Keeper interface {
|
||||
Store(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64, exposedValueOrRef string) (ExternalID, error)
|
||||
Update(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64, exposedValueOrRef string) error
|
||||
Expose(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64) (secretv1beta1.ExposedSecureValue, error)
|
||||
RetrieveReference(ctx context.Context, cfg secretv1beta1.KeeperConfig, ref string) (secretv1beta1.ExposedSecureValue, error)
|
||||
Delete(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64) error
|
||||
}
|
||||
|
||||
|
||||
@@ -21,8 +21,10 @@ type DecryptSecureValue struct {
|
||||
}
|
||||
|
||||
var (
|
||||
ErrSecureValueNotFound = errors.New("secure value not found")
|
||||
ErrSecureValueAlreadyExists = errors.New("secure value already exists")
|
||||
ErrSecureValueNotFound = errors.New("secure value not found")
|
||||
ErrSecureValueAlreadyExists = errors.New("secure value already exists")
|
||||
ErrReferenceWithSystemKeeper = errors.New("tried to create secure value using reference with system keeper, references can only be used with 3rd party keepers")
|
||||
ErrSecureValueMissingSecretAndRef = errors.New("secure value spec doesn't have neither a secret or reference")
|
||||
)
|
||||
|
||||
type ReadOpts struct {
|
||||
|
||||
@@ -103,9 +103,12 @@ func (w *Worker) Cleanup(ctx context.Context, sv *secretv1beta1.SecureValue) err
|
||||
return fmt.Errorf("getting keeper for config: namespace=%+v keeperName=%+v %w", sv.Namespace, sv.Status.Keeper, err)
|
||||
}
|
||||
|
||||
// Keeper deletion is idempotent
|
||||
if err := keeper.Delete(ctx, keeperCfg, xkube.Namespace(sv.Namespace), sv.Name, sv.Status.Version); err != nil {
|
||||
return fmt.Errorf("deleting secure value from keeper: %w", err)
|
||||
// If the secure value doesn't use a reference, delete the secret
|
||||
if sv.Spec.Ref == nil {
|
||||
// Keeper deletion is idempotent
|
||||
if err := keeper.Delete(ctx, keeperCfg, xkube.Namespace(sv.Namespace), sv.Name, sv.Status.Version); err != nil {
|
||||
return fmt.Errorf("deleting secure value from keeper: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Metadata deletion is not idempotent but not found errors are ignored
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package garbagecollectionworker_test
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -97,27 +96,33 @@ func TestBasic(t *testing.T) {
|
||||
require.NoError(t, sut.GarbageCollectionWorker.Cleanup(t.Context(), sv))
|
||||
require.NoError(t, sut.GarbageCollectionWorker.Cleanup(t.Context(), sv))
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
decryptersGen = rapid.SampledFrom([]string{"svc1", "svc2", "svc3", "svc4", "svc5"})
|
||||
nameGen = rapid.SampledFrom([]string{"n1", "n2", "n3", "n4", "n5"})
|
||||
namespaceGen = rapid.SampledFrom([]string{"ns1", "ns2", "ns3", "ns4", "ns5"})
|
||||
anySecureValueGen = rapid.Custom(func(t *rapid.T) *secretv1beta1.SecureValue {
|
||||
return &secretv1beta1.SecureValue{
|
||||
t.Run("cleaning up secure values that use references", func(t *testing.T) {
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
keeper, err := sut.CreateAWSKeeper(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(keeper.Namespace), keeper.Name))
|
||||
|
||||
sv, err := sut.CreateSv(t.Context(), testutils.CreateSvWithSv(&secretv1beta1.SecureValue{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nameGen.Draw(t, "name"),
|
||||
Namespace: namespaceGen.Draw(t, "ns"),
|
||||
Namespace: keeper.Namespace,
|
||||
Name: "sv1",
|
||||
},
|
||||
Spec: secretv1beta1.SecureValueSpec{
|
||||
Description: rapid.SampledFrom([]string{"d1", "d2", "d3", "d4", "d5"}).Draw(t, "description"),
|
||||
Value: ptr.To(secretv1beta1.NewExposedSecureValue(rapid.SampledFrom([]string{"v1", "v2", "v3", "v4", "v5"}).Draw(t, "value"))),
|
||||
Decrypters: rapid.SliceOfDistinct(decryptersGen, func(v string) string { return v }).Draw(t, "decrypters"),
|
||||
Description: "desc1",
|
||||
Ref: ptr.To("ref1"),
|
||||
Decrypters: []string{"decrypter1"},
|
||||
},
|
||||
Status: secretv1beta1.SecureValueStatus{},
|
||||
}
|
||||
}))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = sut.DeleteSv(t.Context(), sv.Namespace, sv.Name)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, sut.GarbageCollectionWorker.Cleanup(t.Context(), sv))
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
func TestProperty(t *testing.T) {
|
||||
t.Parallel()
|
||||
@@ -126,26 +131,59 @@ func TestProperty(t *testing.T) {
|
||||
|
||||
rapid.Check(t, func(t *rapid.T) {
|
||||
sut := testutils.Setup(tt)
|
||||
model := newModel()
|
||||
model := testutils.NewModelGsm(nil)
|
||||
|
||||
t.Repeat(map[string]func(*rapid.T){
|
||||
"create": func(t *rapid.T) {
|
||||
sv := anySecureValueGen.Draw(t, "sv")
|
||||
var sv *secretv1beta1.SecureValue
|
||||
if rapid.Bool().Draw(t, "withRef") {
|
||||
sv = testutils.AnySecureValueWithRefGen.Draw(t, "sv")
|
||||
} else {
|
||||
sv = testutils.AnySecureValueGen.Draw(t, "sv")
|
||||
}
|
||||
|
||||
svCopy := sv.DeepCopy()
|
||||
|
||||
createdSv, err := sut.CreateSv(t.Context(), testutils.CreateSvWithSv(sv))
|
||||
svCopy.UID = createdSv.UID
|
||||
modelErr := model.create(sut.Clock.Now(), svCopy)
|
||||
if err == nil {
|
||||
svCopy.UID = createdSv.UID
|
||||
}
|
||||
_, modelErr := model.Create(sut.Clock.Now(), svCopy)
|
||||
require.ErrorIs(t, err, modelErr)
|
||||
},
|
||||
"createKeeper": func(t *rapid.T) {
|
||||
input := testutils.AnyKeeperGen.Draw(t, "keeper")
|
||||
modelKeeper, modelErr := model.CreateKeeper(input)
|
||||
keeper, err := sut.KeeperMetadataStorage.Create(t.Context(), input, "actor-uid")
|
||||
if err != nil || modelErr != nil {
|
||||
require.ErrorIs(t, err, modelErr)
|
||||
return
|
||||
}
|
||||
require.Equal(t, modelKeeper.Name, keeper.Name)
|
||||
},
|
||||
"setKeeperAsActive": func(t *rapid.T) {
|
||||
namespace := testutils.NamespaceGen.Draw(t, "namespace")
|
||||
var keeper string
|
||||
if rapid.Bool().Draw(t, "systemKeeper") {
|
||||
keeper = contracts.SystemKeeperName
|
||||
} else {
|
||||
keeper = testutils.KeeperNameGen.Draw(t, "keeper")
|
||||
}
|
||||
modelErr := model.SetKeeperAsActive(namespace, keeper)
|
||||
err := sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(namespace), keeper)
|
||||
if err != nil || modelErr != nil {
|
||||
require.ErrorIs(t, err, modelErr)
|
||||
return
|
||||
}
|
||||
},
|
||||
"delete": func(t *rapid.T) {
|
||||
if len(model.items) == 0 {
|
||||
if len(model.SecureValues) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
i := rapid.IntRange(0, len(model.items)-1).Draw(t, "index")
|
||||
sv := model.items[i]
|
||||
modelErr := model.delete(sv.Namespace, sv.Name)
|
||||
i := rapid.IntRange(0, len(model.SecureValues)-1).Draw(t, "index")
|
||||
sv := model.SecureValues[i]
|
||||
_, modelErr := model.Delete(sv.Namespace, sv.Name)
|
||||
_, err := sut.DeleteSv(t.Context(), sv.Namespace, sv.Name)
|
||||
require.ErrorIs(t, err, modelErr)
|
||||
},
|
||||
@@ -153,7 +191,7 @@ func TestProperty(t *testing.T) {
|
||||
// Taken from secureValueMetadataStorage.acquireLeases
|
||||
minAge := 300 * time.Second
|
||||
maxBatchSize := sut.GarbageCollectionWorker.Cfg.SecretsManagement.GCWorkerMaxBatchSize
|
||||
modelDeleted, modelErr := model.cleanupInactiveSecureValues(sut.Clock.Now(), minAge, maxBatchSize)
|
||||
modelDeleted, modelErr := model.CleanupInactiveSecureValues(sut.Clock.Now(), minAge, maxBatchSize)
|
||||
deleted, err := sut.GarbageCollectionWorker.CleanupInactiveSecureValues(t.Context())
|
||||
require.ErrorIs(t, err, modelErr)
|
||||
|
||||
@@ -174,77 +212,3 @@ func TestProperty(t *testing.T) {
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
type model struct {
|
||||
items []*modelSecureValue
|
||||
}
|
||||
|
||||
type modelSecureValue struct {
|
||||
*secretv1beta1.SecureValue
|
||||
active bool
|
||||
created time.Time
|
||||
}
|
||||
|
||||
func newModel() *model {
|
||||
return &model{
|
||||
items: make([]*modelSecureValue, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *model) create(now time.Time, sv *secretv1beta1.SecureValue) error {
|
||||
created := now
|
||||
for _, item := range m.items {
|
||||
if item.active && item.Namespace == sv.Namespace && item.Name == sv.Name {
|
||||
item.active = false
|
||||
created = item.created
|
||||
break
|
||||
}
|
||||
}
|
||||
m.items = append(m.items, &modelSecureValue{SecureValue: sv, active: true, created: created})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *model) delete(ns string, name string) error {
|
||||
for _, sv := range m.items {
|
||||
if sv.active && sv.Namespace == ns && sv.Name == name {
|
||||
sv.active = false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return contracts.ErrSecureValueNotFound
|
||||
}
|
||||
|
||||
func (m *model) cleanupInactiveSecureValues(now time.Time, minAge time.Duration, maxBatchSize uint16) ([]*modelSecureValue, error) {
|
||||
// Using a slice to allow duplicates
|
||||
toDelete := make([]*modelSecureValue, 0)
|
||||
|
||||
// The implementation query sorts by created time ascending
|
||||
slices.SortFunc(m.items, func(a, b *modelSecureValue) int {
|
||||
if a.created.Before(b.created) {
|
||||
return -1
|
||||
} else if a.created.After(b.created) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
|
||||
for _, sv := range m.items {
|
||||
if len(toDelete) >= int(maxBatchSize) {
|
||||
break
|
||||
}
|
||||
|
||||
if !sv.active && now.Sub(sv.created) > minAge {
|
||||
toDelete = append(toDelete, sv)
|
||||
}
|
||||
}
|
||||
|
||||
// PERF: The slices are always small
|
||||
m.items = slices.DeleteFunc(m.items, func(v1 *modelSecureValue) bool {
|
||||
return slices.ContainsFunc(toDelete, func(v2 *modelSecureValue) bool {
|
||||
return v2.UID == v1.UID
|
||||
})
|
||||
})
|
||||
|
||||
return toDelete, nil
|
||||
}
|
||||
|
||||
@@ -107,6 +107,10 @@ func (s *SQLKeeper) Expose(ctx context.Context, cfg secretv1beta1.KeeperConfig,
|
||||
return exposedValue, nil
|
||||
}
|
||||
|
||||
func (s *SQLKeeper) RetrieveReference(ctx context.Context, cfg secretv1beta1.KeeperConfig, ref string) (secretv1beta1.ExposedSecureValue, error) {
|
||||
return "", fmt.Errorf("reference is not implemented by the SQLKeeper")
|
||||
}
|
||||
|
||||
func (s *SQLKeeper) Delete(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64) error {
|
||||
ctx, span := s.tracer.Start(ctx, "SQLKeeper.Delete", trace.WithAttributes(
|
||||
attribute.String("namespace", namespace.String()),
|
||||
@@ -125,27 +129,3 @@ func (s *SQLKeeper) Delete(ctx context.Context, cfg secretv1beta1.KeeperConfig,
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SQLKeeper) Update(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64, exposedValueOrRef string) error {
|
||||
ctx, span := s.tracer.Start(ctx, "SQLKeeper.Update", trace.WithAttributes(
|
||||
attribute.String("namespace", namespace.String()),
|
||||
attribute.String("name", name),
|
||||
attribute.Int64("version", version),
|
||||
))
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
encryptedData, err := s.encryptionManager.Encrypt(ctx, namespace, []byte(exposedValueOrRef))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to encrypt value: %w", err)
|
||||
}
|
||||
|
||||
err = s.store.Update(ctx, namespace, name, version, encryptedData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update encrypted value: %w", err)
|
||||
}
|
||||
|
||||
s.metrics.UpdateDuration.WithLabelValues(string(cfg.Type())).Observe(time.Since(start).Seconds())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func Test_SQLKeeperSetup(t *testing.T) {
|
||||
plaintext1 := "very secret string in namespace 1"
|
||||
plaintext2 := "very secret string in namespace 2"
|
||||
|
||||
keeperCfg := &secretv1beta1.SystemKeeperConfig{}
|
||||
keeperCfg := secretv1beta1.NewNamedKeeperConfig("k1", &secretv1beta1.SystemKeeperConfig{})
|
||||
|
||||
t.Run("storing an encrypted value returns no error", func(t *testing.T) {
|
||||
sut := testutils.Setup(t)
|
||||
@@ -123,31 +123,6 @@ func Test_SQLKeeperSetup(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("updating an existent encrypted value returns no error", func(t *testing.T) {
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
_, err := sut.SQLKeeper.Store(t.Context(), keeperCfg, namespace1, name1, version1, plaintext1)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sut.SQLKeeper.Update(t.Context(), keeperCfg, namespace1, name1, version1, plaintext2)
|
||||
require.NoError(t, err)
|
||||
|
||||
exposedVal, err := sut.SQLKeeper.Expose(t.Context(), keeperCfg, namespace1, name1, version1)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, exposedVal)
|
||||
assert.Equal(t, plaintext2, exposedVal.DangerouslyExposeAndConsumeValue())
|
||||
})
|
||||
|
||||
t.Run("updating a non existent encrypted value returns error", func(t *testing.T) {
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
_, err := sut.SQLKeeper.Store(t.Context(), keeperCfg, namespace1, name1, version1, plaintext1)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sut.SQLKeeper.Update(t.Context(), nil, namespace1, "non_existing_name", version1, plaintext2)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("data key migration only runs if both secrets db migrations are enabled", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -141,7 +141,7 @@ func (s *SecureValueService) Update(ctx context.Context, newSecureValue *secretv
|
||||
return nil, false, fmt.Errorf("fetching keeper config: namespace=%+v keeper: %q %w", newSecureValue.Namespace, currentVersion.Status.Keeper, err)
|
||||
}
|
||||
|
||||
if newSecureValue.Spec.Value == nil {
|
||||
if newSecureValue.Spec.Value == nil && newSecureValue.Spec.Ref == nil {
|
||||
keeper, err := s.keeperService.KeeperForConfig(keeperCfg)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("getting keeper for config: namespace=%+v keeperName=%+v %w", newSecureValue.Namespace, newSecureValue.Status.Keeper, err)
|
||||
@@ -150,7 +150,7 @@ func (s *SecureValueService) Update(ctx context.Context, newSecureValue *secretv
|
||||
|
||||
secret, err := keeper.Expose(ctx, keeperCfg, xkube.Namespace(newSecureValue.Namespace), newSecureValue.Name, currentVersion.Status.Version)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("reading secret value from keeper: %w", err)
|
||||
return nil, false, fmt.Errorf("reading secret value from keeper: %w %w", contracts.ErrSecureValueMissingSecretAndRef, err)
|
||||
}
|
||||
|
||||
newSecureValue.Spec.Value = &secret
|
||||
@@ -174,6 +174,10 @@ func (s *SecureValueService) createNewVersion(ctx context.Context, keeperName st
|
||||
return nil, contracts.NewErrValidateSecureValue(errorList)
|
||||
}
|
||||
|
||||
if sv.Spec.Ref != nil && keeperCfg.Type() == secretv1beta1.SystemKeeperType {
|
||||
return nil, contracts.ErrReferenceWithSystemKeeper
|
||||
}
|
||||
|
||||
createdSv, err := s.secureValueMetadataStorage.Create(ctx, keeperName, sv, actorUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating secure value: %w", err)
|
||||
@@ -189,18 +193,28 @@ func (s *SecureValueService) createNewVersion(ctx context.Context, keeperName st
|
||||
return nil, fmt.Errorf("getting keeper for config: namespace=%+v keeperName=%+v %w", createdSv.Namespace, keeperName, err)
|
||||
}
|
||||
logging.FromContext(ctx).Debug("retrieved keeper", "namespace", createdSv.Namespace, "type", keeperCfg.Type())
|
||||
|
||||
// TODO: can we stop using external id?
|
||||
// TODO: store uses only the namespace and returns and id. It could be a kv instead.
|
||||
// TODO: check that the encrypted store works with multiple versions
|
||||
externalID, err := keeper.Store(ctx, keeperCfg, xkube.Namespace(createdSv.Namespace), createdSv.Name, createdSv.Status.Version, sv.Spec.Value.DangerouslyExposeAndConsumeValue())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storing secure value in keeper: %w", err)
|
||||
}
|
||||
createdSv.Status.ExternalID = string(externalID)
|
||||
switch {
|
||||
case sv.Spec.Value != nil:
|
||||
externalID, err := keeper.Store(ctx, keeperCfg, xkube.Namespace(createdSv.Namespace), createdSv.Name, createdSv.Status.Version, sv.Spec.Value.DangerouslyExposeAndConsumeValue())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storing secure value in keeper: %w", err)
|
||||
}
|
||||
createdSv.Status.ExternalID = string(externalID)
|
||||
|
||||
if err := s.secureValueMetadataStorage.SetExternalID(ctx, xkube.Namespace(createdSv.Namespace), createdSv.Name, createdSv.Status.Version, externalID); err != nil {
|
||||
return nil, fmt.Errorf("setting secure value external id: %w", err)
|
||||
if err := s.secureValueMetadataStorage.SetExternalID(ctx, xkube.Namespace(createdSv.Namespace), createdSv.Name, createdSv.Status.Version, externalID); err != nil {
|
||||
return nil, fmt.Errorf("setting secure value external id: %w", err)
|
||||
}
|
||||
|
||||
case sv.Spec.Ref != nil:
|
||||
// No-op, there's nothing to store in the keeper since the
|
||||
// secret is already stored in the 3rd party secret store
|
||||
// and it's being referenced.
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("secure value doesn't specify either a secret value or a reference")
|
||||
}
|
||||
|
||||
if err := s.secureValueMetadataStorage.SetVersionToActive(ctx, xkube.Namespace(createdSv.Namespace), createdSv.Name, createdSv.Status.Version); err != nil {
|
||||
@@ -366,3 +380,20 @@ func (s *SecureValueService) Delete(ctx context.Context, namespace xkube.Namespa
|
||||
|
||||
return sv, nil
|
||||
}
|
||||
|
||||
func (s *SecureValueService) SetKeeperAsActive(ctx context.Context, namespace xkube.Namespace, name string) error {
|
||||
// The system keeper is not in the database, so skip checking it exists.
|
||||
// TODO: should the system keeper be in the database?
|
||||
if name != contracts.SystemKeeperName {
|
||||
// Check keeper exists. No need to worry about time of check to time of use
|
||||
// since trying to activate a just deleted keeper will result in all
|
||||
// keepers being inactive and defaulting to the system keeper.
|
||||
if _, err := s.keeperMetadataStorage.Read(ctx, namespace, name, contracts.ReadOpts{}); err != nil {
|
||||
return fmt.Errorf("reading keeper before setting as active: %w", err)
|
||||
}
|
||||
}
|
||||
if err := s.keeperMetadataStorage.SetAsActive(ctx, namespace, name); err != nil {
|
||||
return fmt.Errorf("calling keeper metadata storage to set keeper as active: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/registry/apis/secret/testutils"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/secret/xkube"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
@@ -93,4 +94,149 @@ func TestCrud(t *testing.T) {
|
||||
_, err = sut.SecureValueMetadataStorage.Read(t.Context(), xkube.Namespace(sv1.Namespace), sv1.Name, contracts.ReadOpts{})
|
||||
require.ErrorIs(t, err, contracts.ErrSecureValueNotFound)
|
||||
})
|
||||
|
||||
t.Run("secret can be referenced only when the active keeper is a 3rd party keeper", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
ref := "path-to-secret"
|
||||
sv := &secretv1beta1.SecureValue{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sv1",
|
||||
Namespace: "ns1",
|
||||
},
|
||||
Spec: secretv1beta1.SecureValueSpec{
|
||||
Description: "desc1",
|
||||
Ref: &ref,
|
||||
Decrypters: []string{"decrypter1"},
|
||||
},
|
||||
Status: secretv1beta1.SecureValueStatus{},
|
||||
}
|
||||
|
||||
// Creating a secure value using ref with the system keeper
|
||||
createdSv, err := sut.CreateSv(t.Context(), testutils.CreateSvWithSv(sv))
|
||||
require.NotNil(t, err)
|
||||
require.Nil(t, createdSv)
|
||||
require.Contains(t, err.Error(), "tried to create secure value using reference with system keeper, references can only be used with 3rd party keepers")
|
||||
|
||||
// Create a 3rd party keeper
|
||||
keeper := &secretv1beta1.Keeper{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "k1",
|
||||
Namespace: "ns1",
|
||||
},
|
||||
Spec: secretv1beta1.KeeperSpec{
|
||||
Description: "desc",
|
||||
Aws: &secretv1beta1.KeeperAWSConfig{
|
||||
Region: "us-east-1",
|
||||
AssumeRole: &secretv1beta1.KeeperAWSAssumeRole{
|
||||
AssumeRoleArn: "arn",
|
||||
ExternalID: "id",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a 3rd party keeper
|
||||
_, err = sut.KeeperMetadataStorage.Create(t.Context(), keeper, "actor-uid")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set the new keeper as active
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(keeper.Namespace), keeper.Name))
|
||||
|
||||
// Create a secure value using a ref
|
||||
createdSv, err = sut.CreateSv(t.Context(), testutils.CreateSvWithSv(sv))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, keeper.Name, createdSv.Status.Keeper)
|
||||
})
|
||||
|
||||
t.Run("creating secure value with reference", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
// Create a keeper because references cannot be used with the system keeper
|
||||
keeper, err := sut.KeeperMetadataStorage.Create(t.Context(), &secretv1beta1.Keeper{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns",
|
||||
Name: "k1",
|
||||
},
|
||||
Spec: secretv1beta1.KeeperSpec{
|
||||
Aws: &secretv1beta1.KeeperAWSConfig{},
|
||||
},
|
||||
}, "actor-uid")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(keeper.Namespace), keeper.Name))
|
||||
|
||||
sv, err := sut.CreateSv(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sv)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_SetAsActive(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("setting the system keeper as the active keeper", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
namespace := "ns"
|
||||
|
||||
// Create a new keeper
|
||||
keeper, err := sut.KeeperMetadataStorage.Create(t.Context(), &secretv1beta1.Keeper{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns",
|
||||
Name: "k1",
|
||||
},
|
||||
Spec: secretv1beta1.KeeperSpec{
|
||||
Description: "description",
|
||||
Aws: &secretv1beta1.KeeperAWSConfig{},
|
||||
},
|
||||
}, "actor-uid")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set the new keeper as active
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(keeper.Namespace), keeper.Name))
|
||||
keeperName, _, err := sut.KeeperMetadataStorage.GetActiveKeeperConfig(t.Context(), namespace)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, keeper.Name, keeperName)
|
||||
|
||||
// Set the system keeper as active
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(namespace), contracts.SystemKeeperName))
|
||||
keeperName, _, err = sut.KeeperMetadataStorage.GetActiveKeeperConfig(t.Context(), namespace)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, contracts.SystemKeeperName, keeperName)
|
||||
})
|
||||
|
||||
t.Run("each namespace can have one active keeper", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sut := testutils.Setup(t)
|
||||
|
||||
k1, err := sut.CreateKeeper(t.Context(), func(ckc *testutils.CreateKeeperConfig) {
|
||||
ckc.Keeper.Namespace = "ns1"
|
||||
ckc.Keeper.Name = "k1"
|
||||
})
|
||||
require.NoError(t, err)
|
||||
k2, err := sut.CreateKeeper(t.Context(), func(ckc *testutils.CreateKeeperConfig) {
|
||||
ckc.Keeper.Namespace = "ns2"
|
||||
ckc.Keeper.Name = "k2"
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(k1.Namespace), k1.Name))
|
||||
require.NoError(t, sut.KeeperMetadataStorage.SetAsActive(t.Context(), xkube.Namespace(k2.Namespace), k2.Name))
|
||||
|
||||
keeperName, _, err := sut.KeeperMetadataStorage.GetActiveKeeperConfig(t.Context(), k1.Namespace)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, k1.Name, keeperName)
|
||||
|
||||
keeperName, _, err = sut.KeeperMetadataStorage.GetActiveKeeperConfig(t.Context(), k2.Namespace)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, k2.Name, keeperName)
|
||||
})
|
||||
}
|
||||
|
||||
96
pkg/registry/apis/secret/testutils/generators.go
Normal file
96
pkg/registry/apis/secret/testutils/generators.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
secretv1beta1 "github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
"pgregory.net/rapid"
|
||||
)
|
||||
|
||||
var (
|
||||
DecryptersGen = rapid.SampledFrom([]string{"svc1", "svc2", "svc3", "svc4", "svc5"})
|
||||
SecureValueNameGen = rapid.SampledFrom([]string{"n1", "n2", "n3", "n4", "n5"})
|
||||
KeeperNameGen = rapid.SampledFrom([]string{"k1", "k2", "k3", "k4", "k5"})
|
||||
NamespaceGen = rapid.SampledFrom([]string{"ns1", "ns2", "ns3", "ns4", "ns5"})
|
||||
SecretsToRefGen = rapid.SampledFrom([]string{"ref1", "ref2", "ref3", "ref4", "ref5"})
|
||||
// Generator for secure values that specify a secret value
|
||||
AnySecureValueGen = rapid.Custom(func(t *rapid.T) *secretv1beta1.SecureValue {
|
||||
return &secretv1beta1.SecureValue{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: SecureValueNameGen.Draw(t, "name"),
|
||||
Namespace: NamespaceGen.Draw(t, "ns"),
|
||||
},
|
||||
Spec: secretv1beta1.SecureValueSpec{
|
||||
Description: rapid.SampledFrom([]string{"d1", "d2", "d3", "d4", "d5"}).Draw(t, "description"),
|
||||
Value: ptr.To(secretv1beta1.NewExposedSecureValue(rapid.SampledFrom([]string{"v1", "v2", "v3", "v4", "v5"}).Draw(t, "value"))),
|
||||
Decrypters: rapid.SliceOfDistinct(DecryptersGen, func(v string) string { return v }).Draw(t, "decrypters"),
|
||||
},
|
||||
Status: secretv1beta1.SecureValueStatus{},
|
||||
}
|
||||
})
|
||||
// Generator for secure values that reference values from 3rd party stores
|
||||
AnySecureValueWithRefGen = rapid.Custom(func(t *rapid.T) *secretv1beta1.SecureValue {
|
||||
return &secretv1beta1.SecureValue{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: SecureValueNameGen.Draw(t, "name"),
|
||||
Namespace: NamespaceGen.Draw(t, "ns"),
|
||||
},
|
||||
Spec: secretv1beta1.SecureValueSpec{
|
||||
Description: rapid.SampledFrom([]string{"d1", "d2", "d3", "d4", "d5"}).Draw(t, "description"),
|
||||
Ref: ptr.To(SecretsToRefGen.Draw(t, "ref")),
|
||||
Decrypters: rapid.SliceOfDistinct(DecryptersGen, func(v string) string { return v }).Draw(t, "decrypters"),
|
||||
},
|
||||
Status: secretv1beta1.SecureValueStatus{},
|
||||
}
|
||||
})
|
||||
UpdateSecureValueGen = rapid.Custom(func(t *rapid.T) *secretv1beta1.SecureValue {
|
||||
sv := AnySecureValueGen.Draw(t, "sv")
|
||||
// Maybe update the secret value, maybe not
|
||||
if !rapid.Bool().Draw(t, "should_update_value") {
|
||||
sv.Spec.Value = nil
|
||||
}
|
||||
return sv
|
||||
})
|
||||
DecryptGen = rapid.Custom(func(t *rapid.T) DecryptInput {
|
||||
return DecryptInput{
|
||||
Namespace: NamespaceGen.Draw(t, "ns"),
|
||||
Name: SecureValueNameGen.Draw(t, "name"),
|
||||
Decrypter: DecryptersGen.Draw(t, "decrypter"),
|
||||
}
|
||||
})
|
||||
AnyKeeperGen = rapid.Custom(func(t *rapid.T) *secretv1beta1.Keeper {
|
||||
spec := secretv1beta1.KeeperSpec{
|
||||
Description: rapid.String().Draw(t, "description"),
|
||||
}
|
||||
|
||||
keeperType := rapid.SampledFrom([]string{"isAwsKeeper", "isAzureKeeper", "isGcpKeeper", "isVaultKeeper"}).Draw(t, "keeperType")
|
||||
switch keeperType {
|
||||
case "isAwsKeeper":
|
||||
spec.Aws = &secretv1beta1.KeeperAWSConfig{}
|
||||
case "isAzureKeeper":
|
||||
spec.Azure = &secretv1beta1.KeeperAzureConfig{}
|
||||
case "isGcpKeeper":
|
||||
spec.Gcp = &secretv1beta1.KeeperGCPConfig{}
|
||||
case "isVaultKeeper":
|
||||
spec.HashiCorpVault = &secretv1beta1.KeeperHashiCorpConfig{}
|
||||
default:
|
||||
panic(fmt.Sprintf("unhandled keeper type '%+v', did you forget a switch case?", keeperType))
|
||||
}
|
||||
|
||||
return &secretv1beta1.Keeper{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: KeeperNameGen.Draw(t, "name"),
|
||||
Namespace: NamespaceGen.Draw(t, "ns"),
|
||||
},
|
||||
Spec: spec,
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
type DecryptInput struct {
|
||||
Namespace string
|
||||
Name string
|
||||
Decrypter string
|
||||
}
|
||||
321
pkg/registry/apis/secret/testutils/model_gsm.go
Normal file
321
pkg/registry/apis/secret/testutils/model_gsm.go
Normal file
@@ -0,0 +1,321 @@
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
secretv1beta1 "github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1"
|
||||
"github.com/grafana/grafana/apps/secret/pkg/decrypt"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/secret/contracts"
|
||||
)
|
||||
|
||||
type ModelSecureValue struct {
|
||||
*secretv1beta1.SecureValue
|
||||
active bool
|
||||
created time.Time
|
||||
leaseCreated time.Time
|
||||
}
|
||||
|
||||
type ModelKeeper struct {
|
||||
namespace string
|
||||
name string
|
||||
active bool
|
||||
keeperType secretv1beta1.KeeperType
|
||||
}
|
||||
|
||||
// A simplified in memoruy model of the grafana secrets manager
|
||||
type ModelGsm struct {
|
||||
SecureValues []*ModelSecureValue
|
||||
Keepers []*ModelKeeper
|
||||
modelSecretsManager *ModelAWSSecretsManager
|
||||
}
|
||||
|
||||
func NewModelGsm(modelSecretsManager *ModelAWSSecretsManager) *ModelGsm {
|
||||
return &ModelGsm{modelSecretsManager: modelSecretsManager}
|
||||
}
|
||||
|
||||
func (m *ModelGsm) getNewVersionNumber(namespace, name string) int64 {
|
||||
latestVersion := int64(0)
|
||||
for _, sv := range m.SecureValues {
|
||||
if sv.Namespace == namespace && sv.Name == name {
|
||||
latestVersion = max(latestVersion, sv.Status.Version)
|
||||
}
|
||||
}
|
||||
return latestVersion + 1
|
||||
}
|
||||
|
||||
func (m *ModelGsm) SetVersionToActive(namespace, name string, version int64) {
|
||||
for _, sv := range m.SecureValues {
|
||||
if sv.Namespace == namespace && sv.Name == name {
|
||||
sv.active = sv.Status.Version == version
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ModelGsm) SetVersionToInactive(namespace, name string, version int64) {
|
||||
for _, sv := range m.SecureValues {
|
||||
if sv.Namespace == namespace && sv.Name == name && sv.Status.Version == version {
|
||||
sv.active = false
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ModelGsm) ReadActiveVersion(namespace, name string) *ModelSecureValue {
|
||||
for _, sv := range m.SecureValues {
|
||||
if sv.Namespace == namespace && sv.Name == name && sv.active {
|
||||
return sv
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) Create(now time.Time, sv *secretv1beta1.SecureValue) (*secretv1beta1.SecureValue, error) {
|
||||
keeper := m.getActiveKeeper(sv.Namespace)
|
||||
|
||||
if sv.Spec.Ref != nil && keeper.keeperType == secretv1beta1.SystemKeeperType {
|
||||
return nil, contracts.ErrReferenceWithSystemKeeper
|
||||
}
|
||||
|
||||
sv = sv.DeepCopy()
|
||||
|
||||
// Preserve the original creation time if this secure value already exists
|
||||
created := now
|
||||
if sv := m.ReadActiveVersion(sv.Namespace, sv.Name); sv != nil {
|
||||
created = sv.created
|
||||
}
|
||||
|
||||
modelSv := &ModelSecureValue{SecureValue: sv, active: false, created: created}
|
||||
modelSv.Status.Version = m.getNewVersionNumber(modelSv.Namespace, modelSv.Name)
|
||||
modelSv.Status.ExternalID = fmt.Sprintf("%d", modelSv.Status.Version)
|
||||
modelSv.Status.Keeper = keeper.name
|
||||
m.SecureValues = append(m.SecureValues, modelSv)
|
||||
m.SetVersionToActive(modelSv.Namespace, modelSv.Name, modelSv.Status.Version)
|
||||
return modelSv.SecureValue, nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) getActiveKeeper(namespace string) *ModelKeeper {
|
||||
for _, k := range m.Keepers {
|
||||
if k.namespace == namespace && k.active {
|
||||
return k
|
||||
}
|
||||
}
|
||||
|
||||
// Default to the system keeper when there are no active keepers in the namespace
|
||||
return &ModelKeeper{
|
||||
namespace: namespace,
|
||||
name: contracts.SystemKeeperName,
|
||||
active: true,
|
||||
keeperType: secretv1beta1.SystemKeeperType,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ModelGsm) keeperExists(namespace, name string) bool {
|
||||
return m.findKeeper(namespace, name) != nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) findKeeper(namespace, name string) *ModelKeeper {
|
||||
// The system keeper is not in the list of keepers
|
||||
if name == contracts.SystemKeeperName {
|
||||
return &ModelKeeper{namespace: namespace, name: contracts.SystemKeeperName, active: true, keeperType: secretv1beta1.SystemKeeperType}
|
||||
}
|
||||
for _, k := range m.Keepers {
|
||||
if k.namespace == namespace && k.name == name {
|
||||
return k
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) CreateKeeper(keeper *secretv1beta1.Keeper) (*secretv1beta1.Keeper, error) {
|
||||
if m.keeperExists(keeper.Namespace, keeper.Name) {
|
||||
return nil, contracts.ErrKeeperAlreadyExists
|
||||
}
|
||||
|
||||
var keeperType secretv1beta1.KeeperType
|
||||
switch {
|
||||
case keeper.Spec.Aws != nil:
|
||||
keeperType = secretv1beta1.AWSKeeperType
|
||||
case keeper.Spec.Gcp != nil:
|
||||
keeperType = secretv1beta1.GCPKeeperType
|
||||
case keeper.Spec.Azure != nil:
|
||||
keeperType = secretv1beta1.AzureKeeperType
|
||||
case keeper.Spec.HashiCorpVault != nil:
|
||||
keeperType = secretv1beta1.HashiCorpKeeperType
|
||||
default:
|
||||
keeperType = secretv1beta1.SystemKeeperType
|
||||
}
|
||||
|
||||
m.Keepers = append(m.Keepers, &ModelKeeper{namespace: keeper.Namespace, name: keeper.Name, keeperType: keeperType})
|
||||
|
||||
return keeper.DeepCopy(), nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) SetKeeperAsActive(namespace, keeperName string) error {
|
||||
// Set every other keeper in the namespace as inactive
|
||||
for _, k := range m.Keepers {
|
||||
if k.namespace == namespace {
|
||||
k.active = k.name == keeperName
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) Update(now time.Time, newSecureValue *secretv1beta1.SecureValue) (*secretv1beta1.SecureValue, bool, error) {
|
||||
sv := m.ReadActiveVersion(newSecureValue.Namespace, newSecureValue.Name)
|
||||
if sv == nil {
|
||||
return nil, false, contracts.ErrSecureValueNotFound
|
||||
}
|
||||
|
||||
// If the keeper doesn't exist, return an error
|
||||
if !m.keeperExists(sv.Namespace, sv.Status.Keeper) {
|
||||
return nil, false, contracts.ErrKeeperNotFound
|
||||
}
|
||||
|
||||
// If the payload doesn't contain a value and it's not using a reference, get the value from current version
|
||||
if newSecureValue.Spec.Value == nil && newSecureValue.Spec.Ref == nil {
|
||||
// Tried to update a secure value without providing a new value or a ref
|
||||
if sv.Spec.Value == nil {
|
||||
return nil, false, contracts.ErrSecureValueMissingSecretAndRef
|
||||
}
|
||||
newSecureValue.Spec.Value = sv.Spec.Value
|
||||
}
|
||||
|
||||
createdSv, err := m.Create(now, newSecureValue)
|
||||
|
||||
return createdSv, true, err
|
||||
}
|
||||
|
||||
func (m *ModelGsm) Delete(namespace, name string) (*secretv1beta1.SecureValue, error) {
|
||||
modelSv := m.ReadActiveVersion(namespace, name)
|
||||
if modelSv == nil {
|
||||
return nil, contracts.ErrSecureValueNotFound
|
||||
}
|
||||
m.SetVersionToInactive(namespace, name, modelSv.Status.Version)
|
||||
return modelSv.SecureValue, nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) List(namespace string) (*secretv1beta1.SecureValueList, error) {
|
||||
out := make([]secretv1beta1.SecureValue, 0)
|
||||
|
||||
for _, v := range m.SecureValues {
|
||||
if v.Namespace == namespace && v.active {
|
||||
out = append(out, *v.SecureValue)
|
||||
}
|
||||
}
|
||||
|
||||
return &secretv1beta1.SecureValueList{Items: out}, nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) Decrypt(ctx context.Context, decrypter, namespace, name string) (map[string]decrypt.DecryptResult, error) {
|
||||
for _, v := range m.SecureValues {
|
||||
if v.Namespace == namespace &&
|
||||
v.Name == name &&
|
||||
v.active {
|
||||
if slices.ContainsFunc(v.Spec.Decrypters, func(d string) bool { return d == decrypter }) {
|
||||
switch {
|
||||
// It's a secure value that specifies the secret
|
||||
case v.Spec.Value != nil:
|
||||
return map[string]decrypt.DecryptResult{
|
||||
name: decrypt.NewDecryptResultValue(v.DeepCopy().Spec.Value),
|
||||
}, nil
|
||||
|
||||
// It's a secure value that references a secret on a 3rd party store
|
||||
case v.Spec.Ref != nil:
|
||||
keeper := m.findKeeper(v.Namespace, v.Status.Keeper)
|
||||
switch keeper.keeperType {
|
||||
case secretv1beta1.AWSKeeperType:
|
||||
exposedValue, err := m.modelSecretsManager.RetrieveReference(ctx, nil, *v.Spec.Ref)
|
||||
if err != nil {
|
||||
return map[string]decrypt.DecryptResult{
|
||||
name: decrypt.NewDecryptResultErr(fmt.Errorf("%w: %w", contracts.ErrDecryptFailed, err)),
|
||||
}, nil
|
||||
}
|
||||
return map[string]decrypt.DecryptResult{
|
||||
name: decrypt.NewDecryptResultValue(&exposedValue),
|
||||
}, nil
|
||||
|
||||
// Other keepers are not implemented so we default to the system keeper
|
||||
default:
|
||||
// The system keeper doesn't implement Reference so decryption always fails
|
||||
return map[string]decrypt.DecryptResult{
|
||||
name: decrypt.NewDecryptResultErr(contracts.ErrDecryptFailed),
|
||||
}, nil
|
||||
}
|
||||
|
||||
default:
|
||||
panic("bug: secure value where Spec.Value and Spec.Ref are nil")
|
||||
}
|
||||
}
|
||||
|
||||
return map[string]decrypt.DecryptResult{
|
||||
name: decrypt.NewDecryptResultErr(contracts.ErrDecryptNotAuthorized),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return map[string]decrypt.DecryptResult{
|
||||
name: decrypt.NewDecryptResultErr(contracts.ErrDecryptNotFound),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) Read(namespace, name string) (*secretv1beta1.SecureValue, error) {
|
||||
modelSv := m.ReadActiveVersion(namespace, name)
|
||||
if modelSv == nil {
|
||||
return nil, contracts.ErrSecureValueNotFound
|
||||
}
|
||||
return modelSv.SecureValue, nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) LeaseInactiveSecureValues(now time.Time, minAge, leaseTTL time.Duration, maxBatchSize uint16) ([]*ModelSecureValue, error) {
|
||||
out := make([]*ModelSecureValue, 0)
|
||||
|
||||
for _, sv := range m.SecureValues {
|
||||
if len(out) >= int(maxBatchSize) {
|
||||
break
|
||||
}
|
||||
if !sv.active && now.Sub(sv.created) > minAge && now.Sub(sv.leaseCreated) > leaseTTL {
|
||||
sv.leaseCreated = now
|
||||
out = append(out, sv)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (m *ModelGsm) CleanupInactiveSecureValues(now time.Time, minAge time.Duration, maxBatchSize uint16) ([]*ModelSecureValue, error) {
|
||||
// Using a slice to allow duplicates
|
||||
toDelete := make([]*ModelSecureValue, 0)
|
||||
|
||||
// The implementation query sorts by created time ascending
|
||||
slices.SortFunc(m.SecureValues, func(a, b *ModelSecureValue) int {
|
||||
if a.created.Before(b.created) {
|
||||
return -1
|
||||
} else if a.created.After(b.created) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
|
||||
for _, sv := range m.SecureValues {
|
||||
if len(toDelete) >= int(maxBatchSize) {
|
||||
break
|
||||
}
|
||||
|
||||
if !sv.active && now.Sub(sv.created) > minAge {
|
||||
toDelete = append(toDelete, sv)
|
||||
}
|
||||
}
|
||||
|
||||
// PERF: The slices are always small
|
||||
m.SecureValues = slices.DeleteFunc(m.SecureValues, func(v1 *ModelSecureValue) bool {
|
||||
return slices.ContainsFunc(toDelete, func(v2 *ModelSecureValue) bool {
|
||||
return v2.UID == v1.UID
|
||||
})
|
||||
})
|
||||
|
||||
return toDelete, nil
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package testutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -143,7 +144,8 @@ func Setup(t *testing.T, opts ...func(*SetupConfig)) Sut {
|
||||
realMigrationExecutor, err := encryptionstorage.ProvideEncryptedValueMigrationExecutor(database, tracer, encryptedValueStorage, globalEncryptedValueStorage)
|
||||
require.NoError(t, err)
|
||||
|
||||
var keeperService contracts.KeeperService = newKeeperServiceWrapper(sqlKeeper)
|
||||
mockAwsKeeper := NewModelSecretsManager()
|
||||
var keeperService contracts.KeeperService = newKeeperServiceWrapper(sqlKeeper, mockAwsKeeper)
|
||||
|
||||
if setupCfg.KeeperService != nil {
|
||||
keeperService = setupCfg.KeeperService
|
||||
@@ -190,6 +192,7 @@ func Setup(t *testing.T, opts ...func(*SetupConfig)) Sut {
|
||||
Clock: clock,
|
||||
KeeperService: keeperService,
|
||||
KeeperMetadataStorage: keeperMetadataStorage,
|
||||
ModelSecretsManager: mockAwsKeeper,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -212,6 +215,8 @@ type Sut struct {
|
||||
Clock *FakeClock
|
||||
KeeperService contracts.KeeperService
|
||||
KeeperMetadataStorage contracts.KeeperMetadataStorage
|
||||
// A mock of AWS secrets manager that implements contracts.Keeper
|
||||
ModelSecretsManager *ModelAWSSecretsManager
|
||||
}
|
||||
|
||||
type CreateSvConfig struct {
|
||||
@@ -260,16 +265,54 @@ func (s *Sut) DeleteSv(ctx context.Context, namespace, name string) (*secretv1be
|
||||
return sv, err
|
||||
}
|
||||
|
||||
type keeperServiceWrapper struct {
|
||||
keeper contracts.Keeper
|
||||
type CreateKeeperConfig struct {
|
||||
// The default keeper payload. Mutate it to change which keeper ends up being created
|
||||
Keeper *secretv1beta1.Keeper
|
||||
}
|
||||
|
||||
func newKeeperServiceWrapper(keeper contracts.Keeper) *keeperServiceWrapper {
|
||||
return &keeperServiceWrapper{keeper: keeper}
|
||||
func (s *Sut) CreateAWSKeeper(ctx context.Context) (*secretv1beta1.Keeper, error) {
|
||||
return s.CreateKeeper(ctx, func(cfg *CreateKeeperConfig) {
|
||||
cfg.Keeper.Spec = secretv1beta1.KeeperSpec{
|
||||
Aws: &secretv1beta1.KeeperAWSConfig{},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Sut) CreateKeeper(ctx context.Context, opts ...func(*CreateKeeperConfig)) (*secretv1beta1.Keeper, error) {
|
||||
cfg := CreateKeeperConfig{
|
||||
Keeper: &secretv1beta1.Keeper{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sv1",
|
||||
Namespace: "ns1",
|
||||
},
|
||||
Spec: secretv1beta1.KeeperSpec{
|
||||
Aws: &secretv1beta1.KeeperAWSConfig{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&cfg)
|
||||
}
|
||||
|
||||
return s.KeeperMetadataStorage.Create(ctx, cfg.Keeper, "actor-uid")
|
||||
}
|
||||
|
||||
type keeperServiceWrapper struct {
|
||||
sqlKeeper *sqlkeeper.SQLKeeper
|
||||
awsKeeper *ModelAWSSecretsManager
|
||||
}
|
||||
|
||||
func newKeeperServiceWrapper(sqlKeeper *sqlkeeper.SQLKeeper, awsKeeper *ModelAWSSecretsManager) *keeperServiceWrapper {
|
||||
return &keeperServiceWrapper{sqlKeeper: sqlKeeper, awsKeeper: awsKeeper}
|
||||
}
|
||||
|
||||
func (wrapper *keeperServiceWrapper) KeeperForConfig(cfg secretv1beta1.KeeperConfig) (contracts.Keeper, error) {
|
||||
return wrapper.keeper, nil
|
||||
switch cfg.(type) {
|
||||
case *secretv1beta1.NamedKeeperConfig[*secretv1beta1.KeeperAWSConfig]:
|
||||
return wrapper.awsKeeper, nil
|
||||
default:
|
||||
return wrapper.sqlKeeper, nil
|
||||
}
|
||||
}
|
||||
|
||||
func CreateUserAuthContext(ctx context.Context, namespace string, permissions map[string][]string) context.Context {
|
||||
@@ -390,3 +433,113 @@ type NoopMigrationExecutor struct {
|
||||
func (e *NoopMigrationExecutor) Execute(ctx context.Context) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// A mock of AWS secrets manager, used for testing.
|
||||
type ModelAWSSecretsManager struct {
|
||||
secrets map[string]entry
|
||||
alreadyDeleted map[string]bool
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
exposedValueOrRef string
|
||||
externalID string
|
||||
}
|
||||
|
||||
func NewModelSecretsManager() *ModelAWSSecretsManager {
|
||||
return &ModelAWSSecretsManager{
|
||||
secrets: make(map[string]entry),
|
||||
alreadyDeleted: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ModelAWSSecretsManager) Store(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64, exposedValueOrRef string) (externalID contracts.ExternalID, err error) {
|
||||
if exposedValueOrRef == "" {
|
||||
return "", fmt.Errorf("failed to satisfy constraint: Member must have length greater than or equal to 1")
|
||||
}
|
||||
|
||||
versionID := buildVersionID(namespace, name, version)
|
||||
if e, ok := m.secrets[versionID]; ok {
|
||||
// Ignore duplicated requests
|
||||
if e.exposedValueOrRef == exposedValueOrRef {
|
||||
return contracts.ExternalID(e.externalID), nil
|
||||
}
|
||||
|
||||
// Tried to create a secret that already exists
|
||||
return "", fmt.Errorf("ResourceExistsException: The operation failed because the secret %+v already exists", versionID)
|
||||
}
|
||||
|
||||
// First time creating the secret
|
||||
entry := entry{
|
||||
exposedValueOrRef: exposedValueOrRef,
|
||||
externalID: "external-id",
|
||||
}
|
||||
m.secrets[versionID] = entry
|
||||
|
||||
return contracts.ExternalID(entry.externalID), nil
|
||||
}
|
||||
|
||||
// Used to simulate the creation of secrets in the 3rd party secret store
|
||||
func (m *ModelAWSSecretsManager) Create(name, value string) {
|
||||
m.secrets[name] = entry{
|
||||
exposedValueOrRef: value,
|
||||
externalID: fmt.Sprintf("external_id_%+v", value),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ModelAWSSecretsManager) Expose(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64) (exposedValue secretv1beta1.ExposedSecureValue, err error) {
|
||||
versionID := buildVersionID(namespace, name, version)
|
||||
|
||||
if m.deleted(versionID) {
|
||||
return "", fmt.Errorf("InvalidRequestException: You can't perform this operation on the secret because it was marked for deletion")
|
||||
}
|
||||
|
||||
entry, ok := m.secrets[versionID]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("ResourceNotFoundException: Secrets Manager can't find the specified secret")
|
||||
}
|
||||
|
||||
return secretv1beta1.ExposedSecureValue(entry.exposedValueOrRef), nil
|
||||
}
|
||||
|
||||
// TODO: this could be namespaced to make it more realistic
|
||||
func (m *ModelAWSSecretsManager) RetrieveReference(ctx context.Context, _ secretv1beta1.KeeperConfig, ref string) (secretv1beta1.ExposedSecureValue, error) {
|
||||
entry, ok := m.secrets[ref]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("ResourceNotFoundException: Secrets Manager can't find the specified secret")
|
||||
}
|
||||
return secretv1beta1.ExposedSecureValue(entry.exposedValueOrRef), nil
|
||||
}
|
||||
|
||||
func (m *ModelAWSSecretsManager) Delete(ctx context.Context, cfg secretv1beta1.KeeperConfig, namespace xkube.Namespace, name string, version int64) (err error) {
|
||||
versionID := buildVersionID(namespace, name, version)
|
||||
|
||||
// Deleting a secret that existed at some point is idempotent
|
||||
if m.deleted(versionID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the secret is being deleted for the first time
|
||||
if m.exists(versionID) {
|
||||
m.delete(versionID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ModelAWSSecretsManager) deleted(versionID string) bool {
|
||||
return m.alreadyDeleted[versionID]
|
||||
}
|
||||
|
||||
func (m *ModelAWSSecretsManager) exists(versionID string) bool {
|
||||
_, ok := m.secrets[versionID]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (m *ModelAWSSecretsManager) delete(versionID string) {
|
||||
m.alreadyDeleted[versionID] = true
|
||||
delete(m.secrets, versionID)
|
||||
}
|
||||
|
||||
func buildVersionID(namespace xkube.Namespace, name string, version int64) string {
|
||||
return fmt.Sprintf("%s/%s/%d", namespace, name, version)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
@@ -9,14 +11,17 @@ import (
|
||||
|
||||
secretv1beta1 "github.com/grafana/grafana/apps/secret/pkg/apis/secret/v1beta1"
|
||||
"github.com/grafana/grafana/pkg/registry/apis/secret/contracts"
|
||||
"github.com/grafana/grafana/pkg/services/featuremgmt"
|
||||
)
|
||||
|
||||
type keeperValidator struct{}
|
||||
type keeperValidator struct {
|
||||
features featuremgmt.FeatureToggles
|
||||
}
|
||||
|
||||
var _ contracts.KeeperValidator = &keeperValidator{}
|
||||
|
||||
func ProvideKeeperValidator() contracts.KeeperValidator {
|
||||
return &keeperValidator{}
|
||||
func ProvideKeeperValidator(features featuremgmt.FeatureToggles) contracts.KeeperValidator {
|
||||
return &keeperValidator{features: features}
|
||||
}
|
||||
|
||||
func (v *keeperValidator) Validate(keeper *secretv1beta1.Keeper, oldKeeper *secretv1beta1.Keeper, operation admission.Operation) field.ErrorList {
|
||||
@@ -57,51 +62,110 @@ func (v *keeperValidator) Validate(keeper *secretv1beta1.Keeper, oldKeeper *secr
|
||||
}
|
||||
|
||||
if keeper.Spec.Aws != nil {
|
||||
if err := validateCredentialValue(field.NewPath("spec", "aws", "accessKeyID"), keeper.Spec.Aws.AccessKeyID); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if err := validateCredentialValue(field.NewPath("spec", "aws", "secretAccessKey"), keeper.Spec.Aws.SecretAccessKey); err != nil {
|
||||
errs = append(errs, err)
|
||||
//nolint
|
||||
if !v.features.IsEnabled(context.Background(), featuremgmt.FlagSecretsManagementAppPlatformAwsKeeper) {
|
||||
errs = append(errs,
|
||||
field.Forbidden(field.NewPath("spec", "aws"),
|
||||
fmt.Sprintf("enable aws keeper feature toggle to create aws keepers: %s", featuremgmt.FlagSecretsManagementAppPlatformAwsKeeper)))
|
||||
} else {
|
||||
errs = append(errs, validateAws(keeper.Spec.Aws)...)
|
||||
}
|
||||
}
|
||||
|
||||
if keeper.Spec.Azure != nil {
|
||||
if keeper.Spec.Azure.KeyVaultName == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "azure", "keyVaultName"), "a `keyVaultName` is required"))
|
||||
}
|
||||
|
||||
if keeper.Spec.Azure.TenantID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "azure", "tenantID"), "a `tenantID` is required"))
|
||||
}
|
||||
|
||||
if keeper.Spec.Azure.ClientID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "azure", "clientID"), "a `clientID` is required"))
|
||||
}
|
||||
|
||||
if err := validateCredentialValue(field.NewPath("spec", "azure", "clientSecret"), keeper.Spec.Azure.ClientSecret); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
errs = append(errs, validateAzure(keeper.Spec.Azure)...)
|
||||
}
|
||||
|
||||
if keeper.Spec.Gcp != nil {
|
||||
if keeper.Spec.Gcp.ProjectID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "gcp", "projectID"), "a `projectID` is required"))
|
||||
}
|
||||
|
||||
if keeper.Spec.Gcp.CredentialsFile == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "gcp", "credentialsFile"), "a `credentialsFile` is required"))
|
||||
}
|
||||
errs = append(errs, validateGcp(keeper.Spec.Gcp)...)
|
||||
}
|
||||
|
||||
if keeper.Spec.HashiCorpVault != nil {
|
||||
if keeper.Spec.HashiCorpVault.Address == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "hashiCorpVault", "address"), "an `address` is required"))
|
||||
}
|
||||
errs = append(errs, validateHashiCorpVault(keeper.Spec.HashiCorpVault)...)
|
||||
}
|
||||
|
||||
if err := validateCredentialValue(field.NewPath("spec", "hashiCorpVault", "token"), keeper.Spec.HashiCorpVault.Token); err != nil {
|
||||
return errs
|
||||
}
|
||||
|
||||
func validateAws(cfg *secretv1beta1.KeeperAWSConfig) field.ErrorList {
|
||||
errs := make(field.ErrorList, 0)
|
||||
|
||||
if cfg.Region == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "aws", "region"), "region must be present"))
|
||||
}
|
||||
|
||||
switch {
|
||||
case cfg.AccessKey == nil && cfg.AssumeRole == nil:
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "aws"), "one of `accessKey` or `assumeRole` must be present"))
|
||||
|
||||
case cfg.AccessKey != nil && cfg.AssumeRole != nil:
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "aws"), "only one of `accessKey` or `assumeRole` can be present"))
|
||||
|
||||
case cfg.AccessKey != nil:
|
||||
if err := validateCredentialValue(field.NewPath("spec", "aws", "accessKey", "accessKeyID"), cfg.AccessKey.AccessKeyID); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if err := validateCredentialValue(field.NewPath("spec", "aws", "accessKey", "secretAccessKey"), cfg.AccessKey.SecretAccessKey); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
case cfg.AssumeRole != nil:
|
||||
if cfg.AssumeRole.AssumeRoleArn == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "aws", "assumeRole", "assumeRoleArn"), "arn of the role to assume must be present"))
|
||||
}
|
||||
if cfg.AssumeRole.ExternalID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "aws", "assumeRole", "externalId"), "externalId must be present"))
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func validateAzure(cfg *secretv1beta1.KeeperAzureConfig) field.ErrorList {
|
||||
errs := make(field.ErrorList, 0)
|
||||
|
||||
if cfg.KeyVaultName == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "azure", "keyVaultName"), "a `keyVaultName` is required"))
|
||||
}
|
||||
|
||||
if cfg.TenantID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "azure", "tenantID"), "a `tenantID` is required"))
|
||||
}
|
||||
|
||||
if cfg.ClientID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "azure", "clientID"), "a `clientID` is required"))
|
||||
}
|
||||
|
||||
if err := validateCredentialValue(field.NewPath("spec", "azure", "clientSecret"), cfg.ClientSecret); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func validateGcp(cfg *secretv1beta1.KeeperGCPConfig) field.ErrorList {
|
||||
errs := make(field.ErrorList, 0)
|
||||
|
||||
if cfg.ProjectID == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "gcp", "projectID"), "a `projectID` is required"))
|
||||
}
|
||||
|
||||
if cfg.CredentialsFile == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "gcp", "credentialsFile"), "a `credentialsFile` is required"))
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func validateHashiCorpVault(cfg *secretv1beta1.KeeperHashiCorpConfig) field.ErrorList {
|
||||
errs := make(field.ErrorList, 0)
|
||||
|
||||
if cfg.Address == "" {
|
||||
errs = append(errs, field.Required(field.NewPath("spec", "hashiCorpVault", "address"), "an `address` is required"))
|
||||
}
|
||||
|
||||
if err := validateCredentialValue(field.NewPath("spec", "hashiCorpVault", "token"), cfg.Token); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return errs
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user