Compare commits

...

7 Commits

Author SHA1 Message Date
Georges Chaudy
1022f04063 Refactor Watch method to implement batch event processing and authorization 2026-01-09 10:25:50 +01:00
Georges Chaudy
5e7d0392a3 Refactor authorization checks to utilize batch checks instead of compile across multiple services
- Integrated batch authorization checks using the authz package in List functions for IAM and SecureValue services, improving efficiency in permission validation.
- Updated List functions to handle pagination and authorization in a more streamlined manner, reducing redundant checks.
- Enhanced the server's List method to support batch authorization for resource listing, ensuring proper access control.
- Refactored related test cases to validate the new batch authorization logic and ensure comprehensive coverage of various scenarios.
2026-01-09 10:25:36 +01:00
Georges Chaudy
aea8d434c9 point to authlib in branch 2026-01-08 15:33:35 +01:00
Georges Chaudy
cddb1f9fa6 point to authlib in branch 2026-01-06 17:01:09 +01:00
Georges Chaudy
d2c78f5799 Implement BatchCheck method in Authz service with comprehensive unit tests
- Added BatchCheck method to the Authz service, enabling multiple access checks in a single request with optimized batching.
- Implemented request validation, grouping checks by namespace and action to enhance performance.
- Developed extensive unit tests for BatchCheck, covering various scenarios including empty checks, invalid namespaces, and user permission checks.
- Enhanced caching behavior for permissions and integrated folder inheritance checks.
- Updated related test cases to ensure robust validation of the new functionality.
2026-01-06 16:59:24 +01:00
Georges Chaudy
4f3f9ebc04 Add unit tests for BatchCheck method in LegacyAccessClient
- Implemented multiple test cases to validate the behavior of the BatchCheck method, including scenarios for empty checks, unknown resources, admin permissions, unchecked verbs, and scope validation.
- Ensured proper handling of multiple checks with mixed results and the use of a resolver for resource mapping.
- Added tests for caching behavior based on action to optimize performance.
2026-01-06 16:59:24 +01:00
Georges Chaudy
1498970e74 Implement BatchCheck functionality in LegacyAccessClient and update related proto definitions
- Added BatchCheck method to LegacyAccessClient for handling batch authorization checks.
- Updated proto definitions to remove BatchCheckRequest and BatchCheckResponse messages, replacing them with a new structure.
- Adjusted related client and server implementations to align with the new BatchCheck structure.
- Modified tests to validate the new BatchCheck functionality and ensure proper integration with existing authorization logic.
2026-01-06 16:59:23 +01:00
26 changed files with 2842 additions and 1253 deletions

22
go.mod
View File

@@ -89,14 +89,14 @@ require (
github.com/gorilla/mux v1.8.1 // @grafana/grafana-backend-group github.com/gorilla/mux v1.8.1 // @grafana/grafana-backend-group
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // @grafana/grafana-app-platform-squad github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // @grafana/grafana-app-platform-squad
github.com/grafana/alerting v0.0.0-20251231150637-b7821017d69f // @grafana/alerting-backend github.com/grafana/alerting v0.0.0-20251231150637-b7821017d69f // @grafana/alerting-backend
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // @grafana/identity-access-team github.com/grafana/authlib v0.0.0-20260106131612-bb61e476969f // @grafana/identity-access-team
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 // @grafana/identity-access-team github.com/grafana/authlib/types v0.0.0-20260106131612-bb61e476969f // @grafana/identity-access-team
github.com/grafana/dataplane/examples v0.0.1 // @grafana/observability-metrics github.com/grafana/dataplane/examples v0.0.1 // @grafana/observability-metrics
github.com/grafana/dataplane/sdata v0.0.9 // @grafana/observability-metrics github.com/grafana/dataplane/sdata v0.0.9 // @grafana/observability-metrics
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // @grafana/grafana-backend-group github.com/grafana/dskit v0.0.0-20251204003651-27988664e6ff // @grafana/grafana-backend-group
github.com/grafana/e2e v0.1.1 // @grafana-app-platform-squad github.com/grafana/e2e v0.1.1 // @grafana-app-platform-squad
github.com/grafana/gofpdf v0.0.0-20250307124105-3b9c5d35577f // @grafana/sharing-squad github.com/grafana/gofpdf v0.0.0-20250307124105-3b9c5d35577f // @grafana/sharing-squad
github.com/grafana/gomemcache v0.0.0-20250318131618-74242eea118d // @grafana/grafana-operator-experience-squad github.com/grafana/gomemcache v0.0.0-20251127154401-74f93547077b // @grafana/grafana-operator-experience-squad
github.com/grafana/grafana-api-golang-client v0.27.0 // @grafana/alerting-backend github.com/grafana/grafana-api-golang-client v0.27.0 // @grafana/alerting-backend
github.com/grafana/grafana-app-sdk v0.48.7 // @grafana/grafana-app-platform-squad github.com/grafana/grafana-app-sdk v0.48.7 // @grafana/grafana-app-platform-squad
github.com/grafana/grafana-app-sdk/logging v0.48.7 // @grafana/grafana-app-platform-squad github.com/grafana/grafana-app-sdk/logging v0.48.7 // @grafana/grafana-app-platform-squad
@@ -151,7 +151,7 @@ require (
github.com/openfga/api/proto v0.0.0-20250909172242-b4b2a12f5c67 // @grafana/identity-access-team github.com/openfga/api/proto v0.0.0-20250909172242-b4b2a12f5c67 // @grafana/identity-access-team
github.com/openfga/language/pkg/go v0.2.0-beta.2.0.20251027165255-0f8f255e5f6c // @grafana/identity-access-team github.com/openfga/language/pkg/go v0.2.0-beta.2.0.20251027165255-0f8f255e5f6c // @grafana/identity-access-team
github.com/openfga/openfga v1.11.1 // @grafana/identity-access-team github.com/openfga/openfga v1.11.1 // @grafana/identity-access-team
github.com/opentracing-contrib/go-grpc v0.1.1 // @grafana/grafana-search-and-storage github.com/opentracing-contrib/go-grpc v0.1.2 // @grafana/grafana-search-and-storage
github.com/opentracing/opentracing-go v1.2.0 // @grafana/grafana-search-and-storage github.com/opentracing/opentracing-go v1.2.0 // @grafana/grafana-search-and-storage
github.com/openzipkin/zipkin-go v0.4.3 // @grafana/oss-big-tent github.com/openzipkin/zipkin-go v0.4.3 // @grafana/oss-big-tent
github.com/patrickmn/go-cache v2.1.0+incompatible // @grafana/alerting-backend github.com/patrickmn/go-cache v2.1.0+incompatible // @grafana/alerting-backend
@@ -472,7 +472,7 @@ require (
github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/hcl v1.0.1-vault-7 // indirect github.com/hashicorp/hcl v1.0.1-vault-7 // indirect
github.com/hashicorp/memberlist v0.5.2 // indirect github.com/hashicorp/memberlist v0.5.3 // indirect
github.com/hashicorp/serf v0.10.2 // indirect github.com/hashicorp/serf v0.10.2 // indirect
github.com/hashicorp/vault/api v1.20.0 // indirect github.com/hashicorp/vault/api v1.20.0 // indirect
github.com/hashicorp/yamux v0.1.2 // indirect github.com/hashicorp/yamux v0.1.2 // indirect
@@ -517,7 +517,7 @@ require (
github.com/mdlayher/socket v0.4.1 // indirect github.com/mdlayher/socket v0.4.1 // indirect
github.com/mdlayher/vsock v1.2.1 // indirect github.com/mdlayher/vsock v1.2.1 // indirect
github.com/mfridman/interpolate v0.0.2 // indirect github.com/mfridman/interpolate v0.0.2 // indirect
github.com/miekg/dns v1.1.63 // indirect github.com/miekg/dns v1.1.68 // indirect
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect
@@ -550,17 +550,17 @@ require (
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.124.1 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.124.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/opentracing-contrib/go-stdlib v1.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pires/go-proxyproto v0.7.0 // indirect github.com/pires/go-proxyproto v0.8.1 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/exporter-toolkit v0.14.0 // indirect github.com/prometheus/exporter-toolkit v0.15.0 // indirect
github.com/prometheus/procfs v0.19.2 // indirect github.com/prometheus/procfs v0.19.2 // indirect
github.com/protocolbuffers/txtpbfmt v0.0.0-20251124094003-fcb97cc64c7b // indirect github.com/protocolbuffers/txtpbfmt v0.0.0-20251124094003-fcb97cc64c7b // indirect
github.com/puzpuzpuz/xsync/v2 v2.5.1 // indirect github.com/puzpuzpuz/xsync/v2 v2.5.1 // indirect
@@ -574,7 +574,7 @@ require (
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/segmentio/asm v1.2.0 // indirect github.com/segmentio/asm v1.2.0 // indirect
github.com/segmentio/encoding v0.5.3 // indirect github.com/segmentio/encoding v0.5.3 // indirect
github.com/sercand/kuberesolver/v6 v6.0.0 // indirect github.com/sercand/kuberesolver/v6 v6.0.1 // indirect
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
github.com/sethvargo/go-retry v0.3.0 // indirect github.com/sethvargo/go-retry v0.3.0 // indirect
github.com/shadowspore/fossil-delta v0.0.0-20241213113458-1d797d70cbe3 // indirect github.com/shadowspore/fossil-delta v0.0.0-20241213113458-1d797d70cbe3 // indirect

14
go.sum
View File

@@ -1629,14 +1629,20 @@ github.com/grafana/alerting v0.0.0-20251231150637-b7821017d69f h1:Br4SaUL3dnVopK
github.com/grafana/alerting v0.0.0-20251231150637-b7821017d69f/go.mod h1:l7v67cgP7x72ajB9UPZlumdrHqNztpKoqQ52cU8T3LU= github.com/grafana/alerting v0.0.0-20251231150637-b7821017d69f/go.mod h1:l7v67cgP7x72ajB9UPZlumdrHqNztpKoqQ52cU8T3LU=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o= github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg= github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib v0.0.0-20260106131612-bb61e476969f h1:OfVtnO3+Ficm7W69dFD5IaZWlMvOLIWBBnppE99dVkU=
github.com/grafana/authlib v0.0.0-20260106131612-bb61e476969f/go.mod h1:KUNx2Qz7mgh2tm2/TJXx0+uq5SkCrquCFI+dHln2Q50=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY= github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/authlib/types v0.0.0-20251203163023-dd5a97c606e3/go.mod h1:CZ5McGzO/q6lnRb8xvTODCC2bJniQoQ+gho0AVZC/zY=
github.com/grafana/authlib/types v0.0.0-20260106131612-bb61e476969f h1:5ZI6e22sGdg36MAIMJkH6PUHtZU/QuwAScNfgWNlK0I=
github.com/grafana/authlib/types v0.0.0-20260106131612-bb61e476969f/go.mod h1:j+YTXmAcD4zCNyl4QSNqYSEe/q9KgrH1btodnhK29hI=
github.com/grafana/dataplane/examples v0.0.1 h1:K9M5glueWyLoL4//H+EtTQq16lXuHLmOhb6DjSCahzA= github.com/grafana/dataplane/examples v0.0.1 h1:K9M5glueWyLoL4//H+EtTQq16lXuHLmOhb6DjSCahzA=
github.com/grafana/dataplane/examples v0.0.1/go.mod h1:h5YwY8s407/17XF5/dS8XrUtsTVV2RnuW8+m1Mp46mg= github.com/grafana/dataplane/examples v0.0.1/go.mod h1:h5YwY8s407/17XF5/dS8XrUtsTVV2RnuW8+m1Mp46mg=
github.com/grafana/dataplane/sdata v0.0.9 h1:AGL1LZnCUG4MnQtnWpBPbQ8ZpptaZs14w6kE/MWfg7s= github.com/grafana/dataplane/sdata v0.0.9 h1:AGL1LZnCUG4MnQtnWpBPbQ8ZpptaZs14w6kE/MWfg7s=
github.com/grafana/dataplane/sdata v0.0.9/go.mod h1:Jvs5ddpGmn6vcxT7tCTWAZ1mgi4sbcdFt9utQx5uMAU= github.com/grafana/dataplane/sdata v0.0.9/go.mod h1:Jvs5ddpGmn6vcxT7tCTWAZ1mgi4sbcdFt9utQx5uMAU=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4= github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4=
github.com/grafana/dskit v0.0.0-20251204003651-27988664e6ff/go.mod h1:/pHIcyeZJBZbtboXOjRtPaMl5KK+2VRdNJbCHDkpDYs=
github.com/grafana/e2e v0.1.1 h1:/b6xcv5BtoBnx8cZnCiey9DbjEc8z7gXHO5edoeRYxc= github.com/grafana/e2e v0.1.1 h1:/b6xcv5BtoBnx8cZnCiey9DbjEc8z7gXHO5edoeRYxc=
github.com/grafana/e2e v0.1.1/go.mod h1:RpNLgae5VT+BUHvPE+/zSypmOXKwEu4t+tnEMS1ATaE= github.com/grafana/e2e v0.1.1/go.mod h1:RpNLgae5VT+BUHvPE+/zSypmOXKwEu4t+tnEMS1ATaE=
github.com/grafana/go-mysql-server v0.20.1-grafana1 h1:yA4Mzt+tTdIlQutBUaiPnepULPQ7CS4hMu2GOpHqT6s= github.com/grafana/go-mysql-server v0.20.1-grafana1 h1:yA4Mzt+tTdIlQutBUaiPnepULPQ7CS4hMu2GOpHqT6s=
@@ -1645,6 +1651,7 @@ github.com/grafana/gofpdf v0.0.0-20250307124105-3b9c5d35577f h1:5xkjl5Y/j2QefJKO
github.com/grafana/gofpdf v0.0.0-20250307124105-3b9c5d35577f/go.mod h1:+O5QxOwwgP10jedZHapzXY+IPKTnzHBtIs5UUb9G+kI= github.com/grafana/gofpdf v0.0.0-20250307124105-3b9c5d35577f/go.mod h1:+O5QxOwwgP10jedZHapzXY+IPKTnzHBtIs5UUb9G+kI=
github.com/grafana/gomemcache v0.0.0-20250318131618-74242eea118d h1:oXRJlb9UjVsl6LhqBdbyAQ9YFhExwsj4bjh5vwMNRZY= github.com/grafana/gomemcache v0.0.0-20250318131618-74242eea118d h1:oXRJlb9UjVsl6LhqBdbyAQ9YFhExwsj4bjh5vwMNRZY=
github.com/grafana/gomemcache v0.0.0-20250318131618-74242eea118d/go.mod h1:j/s0jkda4UXTemDs7Pgw/vMT06alWc42CHisvYac0qw= github.com/grafana/gomemcache v0.0.0-20250318131618-74242eea118d/go.mod h1:j/s0jkda4UXTemDs7Pgw/vMT06alWc42CHisvYac0qw=
github.com/grafana/gomemcache v0.0.0-20251127154401-74f93547077b/go.mod h1:j/s0jkda4UXTemDs7Pgw/vMT06alWc42CHisvYac0qw=
github.com/grafana/grafana-api-golang-client v0.27.0 h1:zIwMXcbCB4n588i3O2N6HfNcQogCNTd/vPkEXTr7zX8= github.com/grafana/grafana-api-golang-client v0.27.0 h1:zIwMXcbCB4n588i3O2N6HfNcQogCNTd/vPkEXTr7zX8=
github.com/grafana/grafana-api-golang-client v0.27.0/go.mod h1:uNLZEmgKtTjHBtCQMwNn3qsx2mpMb8zU+7T4Xv3NR9Y= github.com/grafana/grafana-api-golang-client v0.27.0/go.mod h1:uNLZEmgKtTjHBtCQMwNn3qsx2mpMb8zU+7T4Xv3NR9Y=
github.com/grafana/grafana-app-sdk v0.48.7 h1:9mF7nqkqP0QUYYDlznoOt+GIyjzj45wGfUHB32u2ZMo= github.com/grafana/grafana-app-sdk v0.48.7 h1:9mF7nqkqP0QUYYDlznoOt+GIyjzj45wGfUHB32u2ZMo=
@@ -1797,6 +1804,7 @@ github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
github.com/hashicorp/memberlist v0.5.2 h1:rJoNPWZ0juJBgqn48gjy59K5H4rNgvUoM1kUD7bXiuI= github.com/hashicorp/memberlist v0.5.2 h1:rJoNPWZ0juJBgqn48gjy59K5H4rNgvUoM1kUD7bXiuI=
github.com/hashicorp/memberlist v0.5.2/go.mod h1:Ri9p/tRShbjYnpNf4FFPXG7wxEGY4Nrcn6E7jrVa//4= github.com/hashicorp/memberlist v0.5.2/go.mod h1:Ri9p/tRShbjYnpNf4FFPXG7wxEGY4Nrcn6E7jrVa//4=
github.com/hashicorp/memberlist v0.5.3/go.mod h1:h60o12SZn/ua/j0B6iKAZezA4eDaGsIuPO70eOaJ6WE=
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A= github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A=
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
@@ -2051,6 +2059,7 @@ github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY= github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs= github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
github.com/migueleliasweb/go-github-mock v1.1.0 h1:GKaOBPsrPGkAKgtfuWY8MclS1xR6MInkx1SexJucMwE= github.com/migueleliasweb/go-github-mock v1.1.0 h1:GKaOBPsrPGkAKgtfuWY8MclS1xR6MInkx1SexJucMwE=
github.com/migueleliasweb/go-github-mock v1.1.0/go.mod h1:pYe/XlGs4BGMfRY4vmeixVsODHnVDDhJ9zoi0qzSMHc= github.com/migueleliasweb/go-github-mock v1.1.0/go.mod h1:pYe/XlGs4BGMfRY4vmeixVsODHnVDDhJ9zoi0qzSMHc=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
@@ -2205,9 +2214,11 @@ github.com/openfga/openfga v1.11.1 h1:+cJBPi/J+RWPRg+cXOjwWEwjauiW8rdE3kEzcFy1ME
github.com/openfga/openfga v1.11.1/go.mod h1:MuTGr/ghY7t2sEGwS/59pq9SkqO0QY1kQLIe8Upt+G8= github.com/openfga/openfga v1.11.1/go.mod h1:MuTGr/ghY7t2sEGwS/59pq9SkqO0QY1kQLIe8Upt+G8=
github.com/opentracing-contrib/go-grpc v0.1.1 h1:Ws7IN1zyiL1DFqKQPhRXuKe5pLYzMfdxnC1qtajE2PE= github.com/opentracing-contrib/go-grpc v0.1.1 h1:Ws7IN1zyiL1DFqKQPhRXuKe5pLYzMfdxnC1qtajE2PE=
github.com/opentracing-contrib/go-grpc v0.1.1/go.mod h1:Nu6sz+4zzgxXu8rvKfnwjBEmHsuhTigxRwV2RhELrS8= github.com/opentracing-contrib/go-grpc v0.1.1/go.mod h1:Nu6sz+4zzgxXu8rvKfnwjBEmHsuhTigxRwV2RhELrS8=
github.com/opentracing-contrib/go-grpc v0.1.2/go.mod h1:glU6rl1Fhfp9aXUHkE36K2mR4ht8vih0ekOVlWKEUHM=
github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w=
github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w=
github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
github.com/opentracing-contrib/go-stdlib v1.1.0/go.mod h1:S0p+X9p6dcBkoMTL+Qq2VOvxKs9ys5PpYWXWqlCS0bQ=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
@@ -2241,6 +2252,7 @@ github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs= github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs=
github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4=
github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
@@ -2319,6 +2331,7 @@ github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57J
github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q=
github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA= github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA=
github.com/prometheus/exporter-toolkit v0.15.0/go.mod h1:OyRWd2iTo6Xge9Kedvv0IhCrJSBu36JCfJ2yVniRIYk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -2403,6 +2416,7 @@ github.com/segmentio/encoding v0.5.3 h1:OjMgICtcSFuNvQCdwqMCv9Tg7lEOXGwm1J5RPQcc
github.com/segmentio/encoding v0.5.3/go.mod h1:HS1ZKa3kSN32ZHVZ7ZLPLXWvOVIiZtyJnO1gPH1sKt0= github.com/segmentio/encoding v0.5.3/go.mod h1:HS1ZKa3kSN32ZHVZ7ZLPLXWvOVIiZtyJnO1gPH1sKt0=
github.com/sercand/kuberesolver/v6 v6.0.0 h1:ScvS2Ga9snVkpOahln/BCLySr3/iBAHJf25u66DweZ0= github.com/sercand/kuberesolver/v6 v6.0.0 h1:ScvS2Ga9snVkpOahln/BCLySr3/iBAHJf25u66DweZ0=
github.com/sercand/kuberesolver/v6 v6.0.0/go.mod h1:Dxkqms3OJadP5zirIBPLi9FV8Qpys3T3w40XPEcVsu0= github.com/sercand/kuberesolver/v6 v6.0.0/go.mod h1:Dxkqms3OJadP5zirIBPLi9FV8Qpys3T3w40XPEcVsu0=
github.com/sercand/kuberesolver/v6 v6.0.1/go.mod h1:C0tsTuRMONSY+Xf7pv7RMW1/JlewY1+wS8SZE+1lf1s=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE=

View File

@@ -317,6 +317,7 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.29.
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/propagator v0.53.0 h1:RAHqDHJmNMLe6JvDoRIlXmb72w+62Ue/k5p/qP9yfAg= github.com/GoogleCloudPlatform/opentelemetry-operations-go/propagator v0.53.0 h1:RAHqDHJmNMLe6JvDoRIlXmb72w+62Ue/k5p/qP9yfAg=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/propagator v0.53.0/go.mod h1:dtCRwgvytbGKWdlrjMOg9geBoRwRpCYWIOM/JhVsDIc= github.com/GoogleCloudPlatform/opentelemetry-operations-go/propagator v0.53.0/go.mod h1:dtCRwgvytbGKWdlrjMOg9geBoRwRpCYWIOM/JhVsDIc=
github.com/HdrHistogram/hdrhistogram-go v1.2.0/go.mod h1:CiIeGiHSd06zjX+FypuEJ5EQ07KKtxZ+8J6hszwVQig=
github.com/IBM/go-sdk-core/v5 v5.17.4 h1:VGb9+mRrnS2HpHZFM5hy4J6ppIWnwNrw0G+tLSgcJLc= github.com/IBM/go-sdk-core/v5 v5.17.4 h1:VGb9+mRrnS2HpHZFM5hy4J6ppIWnwNrw0G+tLSgcJLc=
github.com/IBM/go-sdk-core/v5 v5.17.4/go.mod h1:KsAAI7eStAWwQa4F96MLy+whYSh39JzNjklZRbN/8ns= github.com/IBM/go-sdk-core/v5 v5.17.4/go.mod h1:KsAAI7eStAWwQa4F96MLy+whYSh39JzNjklZRbN/8ns=
github.com/IBM/ibm-cos-sdk-go v1.11.0 h1:Jp55NLN3OvBwucMGpP5wNybyjncsmTZ9+GPHai/1cE8= github.com/IBM/ibm-cos-sdk-go v1.11.0 h1:Jp55NLN3OvBwucMGpP5wNybyjncsmTZ9+GPHai/1cE8=
@@ -879,7 +880,6 @@ github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQ
github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo= github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo=
github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0= github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0=
github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w= github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w=
github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0=
github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY=
@@ -890,19 +890,23 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/alerting v0.0.0-20250729175202-b4b881b7b263/go.mod h1:VKxaR93Gff0ZlO2sPcdPVob1a/UzArFEW5zx3Bpyhls= github.com/grafana/alerting v0.0.0-20250729175202-b4b881b7b263/go.mod h1:VKxaR93Gff0ZlO2sPcdPVob1a/UzArFEW5zx3Bpyhls=
github.com/grafana/alerting v0.0.0-20251009192429-9427c24835ae/go.mod h1:VGjS5gDwWEADPP6pF/drqLxEImgeuHlEW5u8E5EfIrM= github.com/grafana/alerting v0.0.0-20251009192429-9427c24835ae/go.mod h1:VGjS5gDwWEADPP6pF/drqLxEImgeuHlEW5u8E5EfIrM=
github.com/grafana/alerting v0.0.0-20251223160021-926c74910196/go.mod h1:l7v67cgP7x72ajB9UPZlumdrHqNztpKoqQ52cU8T3LU=
github.com/grafana/authlib v0.0.0-20250710201142-9542f2f28d43/go.mod h1:1fWkOiL+m32NBgRHZtlZGz2ji868tPZACYbqP3nBRJI= github.com/grafana/authlib v0.0.0-20250710201142-9542f2f28d43/go.mod h1:1fWkOiL+m32NBgRHZtlZGz2ji868tPZACYbqP3nBRJI=
github.com/grafana/authlib/types v0.0.0-20250710201142-9542f2f28d43/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/authlib/types v0.0.0-20250710201142-9542f2f28d43/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw= github.com/grafana/authlib/types v0.0.0-20250926065801-df98203cff37/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/authlib/types v0.0.0-20251203163023-dd5a97c606e3 h1:T4AMrL8ZB1U25m/+FOmkqWPnz0X7u/Oqj1ISg4OrS2c=
github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I=
github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw=
github.com/grafana/codejen v0.0.4-0.20230321061741-77f656893a3d/go.mod h1:zmwwM/DRyQB7pfuBjTWII3CWtxcXh8LTwAYGfDfpR6s= github.com/grafana/codejen v0.0.4-0.20230321061741-77f656893a3d/go.mod h1:zmwwM/DRyQB7pfuBjTWII3CWtxcXh8LTwAYGfDfpR6s=
github.com/grafana/cog v0.0.43/go.mod h1:TDunc7TYF7EfzjwFOlC5AkMe3To/U2KqyyG3QVvrF38= github.com/grafana/cog v0.0.43/go.mod h1:TDunc7TYF7EfzjwFOlC5AkMe3To/U2KqyyG3QVvrF38=
github.com/grafana/dskit v0.0.0-20250611075409-46f51e1ce914/go.mod h1:OiN4P4aC6LwLzLbEupH3Ue83VfQoNMfG48rsna8jI/E= github.com/grafana/dskit v0.0.0-20250611075409-46f51e1ce914/go.mod h1:OiN4P4aC6LwLzLbEupH3Ue83VfQoNMfG48rsna8jI/E=
github.com/grafana/dskit v0.0.0-20250818234656-8ff9c6532e85/go.mod h1:kImsvJ1xnmeT9Z6StK+RdEKLzlpzBsKwJbEQfmBJdFs= github.com/grafana/dskit v0.0.0-20250818234656-8ff9c6532e85/go.mod h1:kImsvJ1xnmeT9Z6StK+RdEKLzlpzBsKwJbEQfmBJdFs=
github.com/grafana/dskit v0.0.0-20251204003651-27988664e6ff h1:eDbrQsfY1Y3vMfuy5suGI2DRNC1DFBcZMFMlNbPrdiE=
github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak= github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak=
github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90= github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90=
github.com/grafana/go-mysql-server v0.20.1-0.20251027172658-317a8d46ffa4/go.mod h1:EeYR0apo+8j2Dyxmn2ghkPlirO2S5mT1xHBrA+Efys8= github.com/grafana/go-mysql-server v0.20.1-0.20251027172658-317a8d46ffa4/go.mod h1:EeYR0apo+8j2Dyxmn2ghkPlirO2S5mT1xHBrA+Efys8=
github.com/grafana/gomemcache v0.0.0-20250228145437-da7b95fd2ac1/go.mod h1:j/s0jkda4UXTemDs7Pgw/vMT06alWc42CHisvYac0qw= github.com/grafana/gomemcache v0.0.0-20250228145437-da7b95fd2ac1/go.mod h1:j/s0jkda4UXTemDs7Pgw/vMT06alWc42CHisvYac0qw=
github.com/grafana/gomemcache v0.0.0-20251127154401-74f93547077b h1:5qp8/5YPt/Z2RW5QHsxvwE05+LWQYIXydP2MwOkMfb8=
github.com/grafana/grafana-app-sdk v0.40.1/go.mod h1:4P8h7VB6KcDjX9bAoBQc6IP8iNylxe6bSXLR9gA39gM= github.com/grafana/grafana-app-sdk v0.40.1/go.mod h1:4P8h7VB6KcDjX9bAoBQc6IP8iNylxe6bSXLR9gA39gM=
github.com/grafana/grafana-app-sdk v0.40.2/go.mod h1:BbNXPNki3mtbkWxYqJsyA1Cj9AShSyaY33z8WkyfVv0= github.com/grafana/grafana-app-sdk v0.40.2/go.mod h1:BbNXPNki3mtbkWxYqJsyA1Cj9AShSyaY33z8WkyfVv0=
github.com/grafana/grafana-app-sdk v0.41.0 h1:SYHN3U7B1myRKY3UZZDkFsue9TDmAOap0UrQVTqtYBU= github.com/grafana/grafana-app-sdk v0.41.0 h1:SYHN3U7B1myRKY3UZZDkFsue9TDmAOap0UrQVTqtYBU=
@@ -1024,6 +1028,7 @@ github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI
github.com/hashicorp/mdns v1.0.5 h1:1M5hW1cunYeoXOqHwEb/GBDDHAFo0Yqb/uz/beC6LbE= github.com/hashicorp/mdns v1.0.5 h1:1M5hW1cunYeoXOqHwEb/GBDDHAFo0Yqb/uz/beC6LbE=
github.com/hashicorp/mdns v1.0.5/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/mdns v1.0.5/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/memberlist v0.5.3 h1:tQ1jOCypD0WvMemw/ZhhtH+PWpzcftQvgCorLu0hndk=
github.com/hashicorp/raft v1.7.0 h1:4u24Qn6lQ6uwziM++UgsyiT64Q8GyRn43CV41qPiz1o= github.com/hashicorp/raft v1.7.0 h1:4u24Qn6lQ6uwziM++UgsyiT64Q8GyRn43CV41qPiz1o=
github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0=
github.com/hashicorp/raft-wal v0.4.1 h1:aU8XZ6x8R9BAIB/83Z1dTDtXvDVmv9YVYeXxd/1QBSA= github.com/hashicorp/raft-wal v0.4.1 h1:aU8XZ6x8R9BAIB/83Z1dTDtXvDVmv9YVYeXxd/1QBSA=
@@ -1184,6 +1189,7 @@ github.com/mfridman/xflag v0.1.0/go.mod h1:/483ywM5ZO5SuMVjrIGquYNE5CzLrj5Ux/LxW
github.com/microcosm-cc/bluemonday v1.0.25 h1:4NEwSfiJ+Wva0VxN5B8OwMicaJvD8r9tlJWm9rtloEg= github.com/microcosm-cc/bluemonday v1.0.25 h1:4NEwSfiJ+Wva0VxN5B8OwMicaJvD8r9tlJWm9rtloEg=
github.com/microcosm-cc/bluemonday v1.0.25/go.mod h1:ZIOjCQp1OrzBBPIJmfX4qDYFuhU02nx4bn030ixfHLE= github.com/microcosm-cc/bluemonday v1.0.25/go.mod h1:ZIOjCQp1OrzBBPIJmfX4qDYFuhU02nx4bn030ixfHLE=
github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY=
github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/minio/minio-go/v7 v7.0.75/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8= github.com/minio/minio-go/v7 v7.0.75/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8=
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
@@ -1343,6 +1349,8 @@ github.com/openfga/api/proto v0.0.0-20250127102726-f9709139a369/go.mod h1:m74TNg
github.com/openfga/language/pkg/go v0.2.0-beta.2.0.20250428093642-7aeebe78bbfe/go.mod h1:5Z0pbTT7Jz/oQFLfadb+C5t5NwHrduAO7j7L07Ec1GM= github.com/openfga/language/pkg/go v0.2.0-beta.2.0.20250428093642-7aeebe78bbfe/go.mod h1:5Z0pbTT7Jz/oQFLfadb+C5t5NwHrduAO7j7L07Ec1GM=
github.com/openfga/openfga v1.10.0/go.mod h1:6/m4GTwQsqECsGYQVD3t5sCX97rh3smnmxbMa3YAtJk= github.com/openfga/openfga v1.10.0/go.mod h1:6/m4GTwQsqECsGYQVD3t5sCX97rh3smnmxbMa3YAtJk=
github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo=
github.com/opentracing-contrib/go-grpc v0.1.2 h1:MP16Ozc59kqqwn1v18aQxpeGZhsBanJ2iurZYaQSZ+g=
github.com/opentracing-contrib/go-stdlib v1.1.0 h1:cZBWc4pA4e65tqTJddbflK435S0tDImj6c9BMvkdUH0=
github.com/oschwald/geoip2-golang v1.11.0 h1:hNENhCn1Uyzhf9PTmquXENiWS6AlxAEnBII6r8krA3w= github.com/oschwald/geoip2-golang v1.11.0 h1:hNENhCn1Uyzhf9PTmquXENiWS6AlxAEnBII6r8krA3w=
github.com/oschwald/geoip2-golang v1.11.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo= github.com/oschwald/geoip2-golang v1.11.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo=
github.com/oschwald/maxminddb-golang v1.13.0 h1:R8xBorY71s84yO06NgTmQvqvTvlS/bnYZrrWX1MElnU= github.com/oschwald/maxminddb-golang v1.13.0 h1:R8xBorY71s84yO06NgTmQvqvTvlS/bnYZrrWX1MElnU=
@@ -1365,6 +1373,7 @@ github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2
github.com/phpdave11/gofpdf v1.4.2 h1:KPKiIbfwbvC/wOncwhrpRdXVj2CZTCFlw4wnoyjtHfQ= github.com/phpdave11/gofpdf v1.4.2 h1:KPKiIbfwbvC/wOncwhrpRdXVj2CZTCFlw4wnoyjtHfQ=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs= github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs=
@@ -1389,6 +1398,7 @@ github.com/prometheus/common v0.67.1/go.mod h1:RpmT9v35q2Y+lsieQsdOh5sXZ6ajUGC8N
github.com/prometheus/common v0.67.2/go.mod h1:63W3KZb1JOKgcjlIr64WW/LvFGAqKPj0atm+knVGEko= github.com/prometheus/common v0.67.2/go.mod h1:63W3KZb1JOKgcjlIr64WW/LvFGAqKPj0atm+knVGEko=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97/go.mod h1:LoBCZeRh+5hX+fSULNyFnagYlQG/gBsyA/deNzROkq8= github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97/go.mod h1:LoBCZeRh+5hX+fSULNyFnagYlQG/gBsyA/deNzROkq8=
github.com/prometheus/exporter-toolkit v0.15.0 h1:Pcle5sSViwR1x0gdPd0wtYrPQENBieQAM7TmT0qtb2U=
github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ=
github.com/prometheus/statsd_exporter v0.26.1 h1:ucbIAdPmwAUcA+dU+Opok8Qt81Aw8HanlO+2N/Wjv7w= github.com/prometheus/statsd_exporter v0.26.1 h1:ucbIAdPmwAUcA+dU+Opok8Qt81Aw8HanlO+2N/Wjv7w=
@@ -1450,6 +1460,7 @@ github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcV
github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ=
github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY=
github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ=
github.com/sercand/kuberesolver/v6 v6.0.1 h1:XZUTA0gy/lgDYp/UhEwv7Js24F1j8NJ833QrWv0Xux4=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
@@ -1946,7 +1957,9 @@ golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc= golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ= golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ=
@@ -1959,6 +1972,7 @@ golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5N
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ= golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ=
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/exp v0.0.0-20250811191247-51f88131bc50/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg= golang.org/x/exp v0.0.0-20250811191247-51f88131bc50/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
golang.org/x/exp v0.0.0-20251125195548-87e1e737ad39/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM=
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.25.0 h1:Y6uW6rH1y5y/LK1J8BPWZtr6yZ7hrsy6hFrXjgsc2fQ= golang.org/x/image v0.25.0 h1:Y6uW6rH1y5y/LK1J8BPWZtr6yZ7hrsy6hFrXjgsc2fQ=
@@ -2130,6 +2144,7 @@ google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c/go.
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk= google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk=
google.golang.org/genproto/googleapis/api v0.0.0-20251111163417-95abcf5c77ba/go.mod h1:G5IanEx8/PgI9w6CFcYQf7jMtHQhZruvfM1i3qOqk5U= google.golang.org/genproto/googleapis/api v0.0.0-20251111163417-95abcf5c77ba/go.mod h1:G5IanEx8/PgI9w6CFcYQf7jMtHQhZruvfM1i3qOqk5U=
google.golang.org/genproto/googleapis/api v0.0.0-20251124214823-79d6a2a48846/go.mod h1:Fk4kyraUvqD7i5H6S43sj2W98fbZa75lpZz/eUyhfO0=
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
google.golang.org/genproto/googleapis/bytestream v0.0.0-20250603155806-513f23925822 h1:zWFRixYR5QlotL+Uv3YfsPRENIrQFXiGs+iwqel6fOQ= google.golang.org/genproto/googleapis/bytestream v0.0.0-20250603155806-513f23925822 h1:zWFRixYR5QlotL+Uv3YfsPRENIrQFXiGs+iwqel6fOQ=
google.golang.org/genproto/googleapis/bytestream v0.0.0-20250603155806-513f23925822/go.mod h1:h6yxum/C2qRb4txaZRLDHK8RyS0H/o2oEDeKY4onY/Y= google.golang.org/genproto/googleapis/bytestream v0.0.0-20250603155806-513f23925822/go.mod h1:h6yxum/C2qRb4txaZRLDHK8RyS0H/o2oEDeKY4onY/Y=

View File

@@ -4,10 +4,10 @@ import (
"context" "context"
"strconv" "strconv"
"github.com/grafana/authlib/authz"
authlib "github.com/grafana/authlib/types" authlib "github.com/grafana/authlib/types"
iamv0alpha1 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1" iamv0alpha1 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/apimachinery/utils"
legacyiamv0 "github.com/grafana/grafana/pkg/apis/iam/v0alpha1" legacyiamv0 "github.com/grafana/grafana/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/services/apiserver/endpoints/request" "github.com/grafana/grafana/pkg/services/apiserver/endpoints/request"
@@ -54,10 +54,10 @@ type ListResponse[T Resource] struct {
type ListFunc[T Resource] func(ctx context.Context, ns authlib.NamespaceInfo, p Pagination) (*ListResponse[T], error) type ListFunc[T Resource] func(ctx context.Context, ns authlib.NamespaceInfo, p Pagination) (*ListResponse[T], error)
// List is a helper function that will perform access check on resources if // List is a helper function that will perform access check on resources if
// prvovided with a authlib.AccessClient. // provided with a authlib.AccessClient.
func List[T Resource]( func List[T Resource](
ctx context.Context, ctx context.Context,
resource utils.ResourceInfo, resourceInfo utils.ResourceInfo,
ac authlib.AccessClient, ac authlib.AccessClient,
p Pagination, p Pagination,
fn ListFunc[T], fn ListFunc[T],
@@ -67,63 +67,86 @@ func List[T Resource](
return nil, err return nil, err
} }
ident, err := identity.GetRequester(ctx)
if err != nil {
return nil, err
}
check := func(_, _ string) bool { return true }
if ac != nil {
var err error
check, _, err = ac.Compile(ctx, ident, authlib.ListRequest{
Resource: resource.GroupResource().Resource,
Group: resource.GroupResource().Group,
Verb: "list",
Namespace: ns.Value,
})
if err != nil {
return nil, err
}
}
res := &ListResponse[T]{Items: make([]T, 0, p.Limit)} res := &ListResponse[T]{Items: make([]T, 0, p.Limit)}
first, err := fn(ctx, ns, p) first, err := fn(ctx, ns, p)
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, item := range first.Items {
if !check(item.AuthID(), "") {
continue
}
res.Items = append(res.Items, item)
}
res.Continue = first.Continue res.Continue = first.Continue
res.RV = first.RV res.RV = first.RV
// If no access client, skip authorization
if ac == nil {
res.Items = append(res.Items, first.Items...)
for len(res.Items) < int(p.Limit) && res.Continue != 0 {
r, err := fn(ctx, ns, Pagination{Limit: p.Limit - int64(len(res.Items)), Continue: res.Continue})
if err != nil {
return nil, err
}
res.Items = append(res.Items, r.Items...)
res.Continue = r.Continue
}
return res, nil
}
// Use FilterAuthorized to batch authorize items
extractFn := func(item T) authz.BatchCheckItem {
return authz.BatchCheckItem{
Name: item.AuthID(),
Folder: "",
Verb: "list",
Group: resourceInfo.GroupResource().Group,
Resource: resourceInfo.GroupResource().Resource,
Namespace: ns.Value,
}
}
// Convert first batch to iter.Seq and filter
firstCandidates := func(yield func(T) bool) {
for _, item := range first.Items {
if !yield(item) {
return
}
}
}
for item, err := range authz.FilterAuthorized(ctx, ac, firstCandidates, extractFn).Items {
if err != nil {
return nil, err
}
res.Items = append(res.Items, item)
}
outer: outer:
for len(res.Items) < int(p.Limit) && res.Continue != 0 { for len(res.Items) < int(p.Limit) && res.Continue != 0 {
// FIXME: it is not optimal to reduce the amout we look for here but it is the easiest way to // FIXME: it is not optimal to reduce the amount we look for here but it is the easiest way to
// correctly handle pagination and continue tokens // correctly handle pagination and continue tokens
r, err := fn(ctx, ns, Pagination{Limit: p.Limit - int64(len(res.Items)), Continue: res.Continue}) r, err := fn(ctx, ns, Pagination{Limit: p.Limit - int64(len(res.Items)), Continue: res.Continue})
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, item := range r.Items { candidates := func(yield func(T) bool) {
if len(res.Items) == int(p.Limit) { for _, item := range r.Items {
if !yield(item) {
return
}
}
}
for item, authErr := range authz.FilterAuthorized(ctx, ac, candidates, extractFn).Items {
if authErr != nil {
return nil, authErr
}
if len(res.Items) >= int(p.Limit) {
res.Continue = r.Continue res.Continue = r.Continue
break outer break outer
} }
if !check(item.AuthID(), "") {
continue
}
res.Items = append(res.Items, item) res.Items = append(res.Items, item)
} }
res.Continue = r.Continue
} }
return res, nil return res, nil

View File

@@ -6,6 +6,7 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/grafana/authlib/authz"
claims "github.com/grafana/authlib/types" claims "github.com/grafana/authlib/types"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
@@ -297,35 +298,38 @@ func (s *SecureValueService) List(ctx context.Context, namespace xkube.Namespace
s.metrics.SecureValueListDuration.WithLabelValues(strconv.FormatBool(success)).Observe(time.Since(start).Seconds()) s.metrics.SecureValueListDuration.WithLabelValues(strconv.FormatBool(success)).Observe(time.Since(start).Seconds())
}() }()
user, ok := claims.AuthInfoFrom(ctx)
if !ok {
return nil, fmt.Errorf("missing auth info in context")
}
hasPermissionFor, _, err := s.accessClient.Compile(ctx, user, claims.ListRequest{
Group: secretv1beta1.APIGroup,
Resource: secretv1beta1.SecureValuesResourceInfo.GetName(),
Namespace: namespace.String(),
Verb: utils.VerbGet, // Why not VerbList?
})
if err != nil {
return nil, fmt.Errorf("failed to compile checker: %w", err)
}
secureValuesMetadata, err := s.secureValueMetadataStorage.List(ctx, namespace) secureValuesMetadata, err := s.secureValueMetadataStorage.List(ctx, namespace)
if err != nil { if err != nil {
return nil, fmt.Errorf("fetching secure values from storage: %+w", err) return nil, fmt.Errorf("fetching secure values from storage: %+w", err)
} }
// Convert slice to iter.Seq
candidates := func(yield func(secretv1beta1.SecureValue) bool) {
for _, m := range secureValuesMetadata {
if !yield(m) {
return
}
}
}
extractFn := func(sv secretv1beta1.SecureValue) authz.BatchCheckItem {
return authz.BatchCheckItem{
Name: sv.Name,
Folder: "",
Verb: utils.VerbGet, // Why not VerbList?
Group: secretv1beta1.APIGroup,
Resource: secretv1beta1.SecureValuesResourceInfo.GetName(),
Namespace: namespace.String(),
}
}
out := make([]secretv1beta1.SecureValue, 0) out := make([]secretv1beta1.SecureValue, 0)
for _, metadata := range secureValuesMetadata { for item, err := range authz.FilterAuthorized(ctx, s.accessClient, candidates, extractFn).Items {
// Check whether the user has permission to access this specific SecureValue in the namespace. if err != nil {
if !hasPermissionFor(metadata.Name, "") { return nil, fmt.Errorf("failed to check authorization: %w", err)
continue
} }
out = append(out, item)
out = append(out, metadata)
} }
return &secretv1beta1.SecureValueList{ return &secretv1beta1.SecureValueList{

View File

@@ -167,3 +167,99 @@ func (c *LegacyAccessClient) Compile(ctx context.Context, id claims.AuthInfo, re
return check(fmt.Sprintf("%s:%s:%s", opts.Resource, opts.Attr, name)) return check(fmt.Sprintf("%s:%s:%s", opts.Resource, opts.Attr, name))
}, claims.NoopZookie{}, nil }, claims.NoopZookie{}, nil
} }
func (c *LegacyAccessClient) BatchCheck(ctx context.Context, id claims.AuthInfo, req claims.BatchCheckRequest) (claims.BatchCheckResponse, error) {
ident, ok := id.(identity.Requester)
if !ok {
return claims.BatchCheckResponse{}, errors.New("expected identity.Requester for legacy access control")
}
results := make(map[string]claims.BatchCheckResult, len(req.Checks))
// Cache checkers by action to avoid recreating them for each check
checkerCache := make(map[string]func(scopes ...string) bool)
for _, check := range req.Checks {
opts, ok := c.opts[check.Resource]
if !ok {
// For now w fallback to grafana admin if no options are found for resource.
if ident.GetIsGrafanaAdmin() {
results[check.CorrelationID] = claims.BatchCheckResult{Allowed: true}
} else {
results[check.CorrelationID] = claims.BatchCheckResult{Allowed: false}
}
continue
}
// Check if verb should be skipped
if opts.Unchecked[check.Verb] {
results[check.CorrelationID] = claims.BatchCheckResult{Allowed: true}
continue
}
action, ok := opts.Mapping[check.Verb]
if !ok {
results[check.CorrelationID] = claims.BatchCheckResult{
Allowed: false,
Error: fmt.Errorf("missing action for %s %s", check.Verb, check.Resource),
}
continue
}
// Get or create cached checker for this action
checker, ok := checkerCache[action]
if !ok {
checker = Checker(ident, action)
checkerCache[action] = checker
}
// Handle list and create verbs (no specific name)
// TODO: Should we allow list/create without name in a BatchCheck request?
if check.Name == "" {
if check.Verb == utils.VerbList || check.Verb == utils.VerbCreate {
// For list/create without name, check if user has the action at all
// TODO: Is this correct for Create?
results[check.CorrelationID] = claims.BatchCheckResult{
Allowed: len(ident.GetPermissions()[action]) > 0,
}
} else {
results[check.CorrelationID] = claims.BatchCheckResult{
Allowed: false,
Error: fmt.Errorf("unhandled authorization: %s %s", check.Group, check.Verb),
}
}
continue
}
// Check with resolver or direct scope
var allowed bool
if opts.Resolver != nil {
ns, err := claims.ParseNamespace(check.Namespace)
if err != nil {
results[check.CorrelationID] = claims.BatchCheckResult{
Allowed: false,
Error: err,
}
continue
}
scopes, err := opts.Resolver.Resolve(ctx, ns, check.Name)
if err != nil {
results[check.CorrelationID] = claims.BatchCheckResult{
Allowed: false,
Error: err,
}
continue
}
allowed = checker(scopes...)
} else {
allowed = checker(fmt.Sprintf("%s:%s:%s", opts.Resource, opts.Attr, check.Name))
}
results[check.CorrelationID] = claims.BatchCheckResult{Allowed: allowed}
}
return claims.BatchCheckResponse{
Results: results,
Zookie: claims.NoopZookie{},
}, nil
}

View File

@@ -136,6 +136,220 @@ func TestLegacyAccessClient_Check(t *testing.T) {
}) })
} }
func TestLegacyAccessClient_BatchCheck(t *testing.T) {
ac := acimpl.ProvideAccessControl(featuremgmt.WithFeatures())
t.Run("should return empty results for empty checks", func(t *testing.T) {
a := accesscontrol.NewLegacyAccessClient(ac)
res, err := a.BatchCheck(context.Background(), &identity.StaticRequester{}, authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{},
})
assert.NoError(t, err)
assert.Empty(t, res.Results)
})
t.Run("should reject unknown resource for non-admin", func(t *testing.T) {
a := accesscontrol.NewLegacyAccessClient(ac)
res, err := a.BatchCheck(context.Background(), &identity.StaticRequester{}, authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{CorrelationID: "check-1", Verb: "get", Resource: "unknown", Name: "1"},
},
})
assert.NoError(t, err)
assert.False(t, res.Results["check-1"].Allowed)
})
t.Run("should allow unknown resource for grafana admin", func(t *testing.T) {
a := accesscontrol.NewLegacyAccessClient(ac)
res, err := a.BatchCheck(context.Background(), &identity.StaticRequester{IsGrafanaAdmin: true}, authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{CorrelationID: "check-1", Verb: "get", Resource: "unknown", Name: "1"},
},
})
assert.NoError(t, err)
assert.True(t, res.Results["check-1"].Allowed)
})
t.Run("should allow unchecked verbs", func(t *testing.T) {
a := accesscontrol.NewLegacyAccessClient(ac, accesscontrol.ResourceAuthorizerOptions{
Resource: "dashboards",
Attr: "uid",
Unchecked: map[string]bool{"get": true},
})
res, err := a.BatchCheck(context.Background(), &identity.StaticRequester{}, authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{CorrelationID: "check-1", Verb: "get", Resource: "dashboards", Name: "1"},
},
})
assert.NoError(t, err)
assert.True(t, res.Results["check-1"].Allowed)
})
t.Run("should return error for missing action mapping", func(t *testing.T) {
a := accesscontrol.NewLegacyAccessClient(ac, accesscontrol.ResourceAuthorizerOptions{
Resource: "dashboards",
Attr: "uid",
Mapping: map[string]string{}, // Empty mapping
})
res, err := a.BatchCheck(context.Background(), &identity.StaticRequester{}, authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{CorrelationID: "check-1", Verb: "get", Resource: "dashboards", Name: "1"},
},
})
assert.NoError(t, err)
assert.False(t, res.Results["check-1"].Allowed)
assert.Error(t, res.Results["check-1"].Error)
})
t.Run("should allow when user has correct scope", func(t *testing.T) {
a := accesscontrol.NewLegacyAccessClient(ac, accesscontrol.ResourceAuthorizerOptions{
Resource: "dashboards",
Attr: "uid",
Mapping: map[string]string{"get": "dashboards:read"},
})
ident := newIdent(accesscontrol.Permission{Action: "dashboards:read", Scope: "dashboards:uid:1"})
res, err := a.BatchCheck(context.Background(), ident, authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{CorrelationID: "check-1", Verb: "get", Resource: "dashboards", Name: "1"},
},
})
assert.NoError(t, err)
assert.True(t, res.Results["check-1"].Allowed)
})
t.Run("should reject when user has wrong scope", func(t *testing.T) {
a := accesscontrol.NewLegacyAccessClient(ac, accesscontrol.ResourceAuthorizerOptions{
Resource: "dashboards",
Attr: "uid",
Mapping: map[string]string{"get": "dashboards:read"},
})
ident := newIdent(accesscontrol.Permission{Action: "dashboards:read", Scope: "dashboards:uid:2"})
res, err := a.BatchCheck(context.Background(), ident, authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{CorrelationID: "check-1", Verb: "get", Resource: "dashboards", Name: "1"},
},
})
assert.NoError(t, err)
assert.False(t, res.Results["check-1"].Allowed)
})
t.Run("should handle list without name", func(t *testing.T) {
a := accesscontrol.NewLegacyAccessClient(ac, accesscontrol.ResourceAuthorizerOptions{
Resource: "dashboards",
Attr: "uid",
Mapping: map[string]string{"list": "dashboards:read"},
})
ident := newIdent(accesscontrol.Permission{Action: "dashboards:read", Scope: "dashboards:uid:*"})
res, err := a.BatchCheck(context.Background(), ident, authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{CorrelationID: "check-1", Verb: "list", Resource: "dashboards", Name: ""},
},
})
assert.NoError(t, err)
assert.True(t, res.Results["check-1"].Allowed)
})
t.Run("should handle multiple checks with mixed results", func(t *testing.T) {
a := accesscontrol.NewLegacyAccessClient(ac, accesscontrol.ResourceAuthorizerOptions{
Resource: "dashboards",
Attr: "uid",
Mapping: map[string]string{"get": "dashboards:read"},
})
ident := newIdent(
accesscontrol.Permission{Action: "dashboards:read", Scope: "dashboards:uid:1"},
accesscontrol.Permission{Action: "dashboards:read", Scope: "dashboards:uid:3"},
)
res, err := a.BatchCheck(context.Background(), ident, authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{CorrelationID: "check-1", Verb: "get", Resource: "dashboards", Name: "1"},
{CorrelationID: "check-2", Verb: "get", Resource: "dashboards", Name: "2"},
{CorrelationID: "check-3", Verb: "get", Resource: "dashboards", Name: "3"},
},
})
assert.NoError(t, err)
assert.True(t, res.Results["check-1"].Allowed)
assert.False(t, res.Results["check-2"].Allowed)
assert.True(t, res.Results["check-3"].Allowed)
})
t.Run("should use resolver when provided", func(t *testing.T) {
resolver := accesscontrol.ResourceResolverFunc(func(ctx context.Context, ns authlib.NamespaceInfo, name string) ([]string, error) {
// Resolve dashboard name to folder scope
return []string{"folders:uid:folder-a"}, nil
})
a := accesscontrol.NewLegacyAccessClient(ac, accesscontrol.ResourceAuthorizerOptions{
Resource: "dashboards",
Attr: "uid",
Mapping: map[string]string{"get": "dashboards:read"},
Resolver: resolver,
})
ident := newIdent(accesscontrol.Permission{Action: "dashboards:read", Scope: "folders:uid:folder-a"})
res, err := a.BatchCheck(context.Background(), ident, authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{CorrelationID: "check-1", Verb: "get", Resource: "dashboards", Name: "1", Namespace: "default"},
},
})
assert.NoError(t, err)
assert.True(t, res.Results["check-1"].Allowed)
})
t.Run("should cache checker by action", func(t *testing.T) {
a := accesscontrol.NewLegacyAccessClient(ac, accesscontrol.ResourceAuthorizerOptions{
Resource: "dashboards",
Attr: "uid",
Mapping: map[string]string{"get": "dashboards:read", "update": "dashboards:write"},
})
ident := newIdent(
accesscontrol.Permission{Action: "dashboards:read", Scope: "dashboards:uid:*"},
accesscontrol.Permission{Action: "dashboards:write", Scope: "dashboards:uid:1"},
)
res, err := a.BatchCheck(context.Background(), ident, authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{CorrelationID: "read-1", Verb: "get", Resource: "dashboards", Name: "1"},
{CorrelationID: "read-2", Verb: "get", Resource: "dashboards", Name: "2"},
{CorrelationID: "write-1", Verb: "update", Resource: "dashboards", Name: "1"},
{CorrelationID: "write-2", Verb: "update", Resource: "dashboards", Name: "2"},
},
})
assert.NoError(t, err)
// Read with wildcard scope should allow all
assert.True(t, res.Results["read-1"].Allowed)
assert.True(t, res.Results["read-2"].Allowed)
// Write only has scope for uid:1
assert.True(t, res.Results["write-1"].Allowed)
assert.False(t, res.Results["write-2"].Allowed)
})
}
func newIdent(permissions ...accesscontrol.Permission) *identity.StaticRequester { func newIdent(permissions ...accesscontrol.Permission) *identity.StaticRequester {
pmap := map[string][]string{} pmap := map[string][]string{}
for _, p := range permissions { for _, p := range permissions {

File diff suppressed because it is too large Load Diff

View File

@@ -9,8 +9,6 @@ import "google/protobuf/timestamp.proto";
import "google/protobuf/wrappers.proto"; import "google/protobuf/wrappers.proto";
service AuthzExtentionService { service AuthzExtentionService {
rpc BatchCheck(BatchCheckRequest) returns (BatchCheckResponse);
rpc Read(ReadRequest) returns (ReadResponse); rpc Read(ReadRequest) returns (ReadResponse);
rpc Write(WriteRequest) returns (WriteResponse); rpc Write(WriteRequest) returns (WriteResponse);
@@ -231,29 +229,6 @@ message WriteRequest {
message WriteResponse {} message WriteResponse {}
message BatchCheckRequest {
string subject = 1;
string namespace = 2;
repeated BatchCheckItem items = 3;
}
message BatchCheckItem {
string verb = 1;
string group = 2;
string resource = 3;
string name = 4;
string subresource = 5;
string folder = 6;
}
message BatchCheckResponse {
map<string, BatchCheckGroupResource> groups = 1;
}
message BatchCheckGroupResource {
map<string, bool> items = 1;
}
message QueryRequest { message QueryRequest {
string namespace = 1; string namespace = 1;
QueryOperation operation = 2; QueryOperation operation = 2;

View File

@@ -19,18 +19,16 @@ import (
const _ = grpc.SupportPackageIsVersion8 const _ = grpc.SupportPackageIsVersion8
const ( const (
AuthzExtentionService_BatchCheck_FullMethodName = "/authz.extention.v1.AuthzExtentionService/BatchCheck" AuthzExtentionService_Read_FullMethodName = "/authz.extention.v1.AuthzExtentionService/Read"
AuthzExtentionService_Read_FullMethodName = "/authz.extention.v1.AuthzExtentionService/Read" AuthzExtentionService_Write_FullMethodName = "/authz.extention.v1.AuthzExtentionService/Write"
AuthzExtentionService_Write_FullMethodName = "/authz.extention.v1.AuthzExtentionService/Write" AuthzExtentionService_Mutate_FullMethodName = "/authz.extention.v1.AuthzExtentionService/Mutate"
AuthzExtentionService_Mutate_FullMethodName = "/authz.extention.v1.AuthzExtentionService/Mutate" AuthzExtentionService_Query_FullMethodName = "/authz.extention.v1.AuthzExtentionService/Query"
AuthzExtentionService_Query_FullMethodName = "/authz.extention.v1.AuthzExtentionService/Query"
) )
// AuthzExtentionServiceClient is the client API for AuthzExtentionService service. // AuthzExtentionServiceClient is the client API for AuthzExtentionService service.
// //
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type AuthzExtentionServiceClient interface { type AuthzExtentionServiceClient interface {
BatchCheck(ctx context.Context, in *BatchCheckRequest, opts ...grpc.CallOption) (*BatchCheckResponse, error)
Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ReadResponse, error) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ReadResponse, error)
Write(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) Write(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error)
Mutate(ctx context.Context, in *MutateRequest, opts ...grpc.CallOption) (*MutateResponse, error) Mutate(ctx context.Context, in *MutateRequest, opts ...grpc.CallOption) (*MutateResponse, error)
@@ -45,16 +43,6 @@ func NewAuthzExtentionServiceClient(cc grpc.ClientConnInterface) AuthzExtentionS
return &authzExtentionServiceClient{cc} return &authzExtentionServiceClient{cc}
} }
func (c *authzExtentionServiceClient) BatchCheck(ctx context.Context, in *BatchCheckRequest, opts ...grpc.CallOption) (*BatchCheckResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(BatchCheckResponse)
err := c.cc.Invoke(ctx, AuthzExtentionService_BatchCheck_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *authzExtentionServiceClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ReadResponse, error) { func (c *authzExtentionServiceClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ReadResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ReadResponse) out := new(ReadResponse)
@@ -99,7 +87,6 @@ func (c *authzExtentionServiceClient) Query(ctx context.Context, in *QueryReques
// All implementations should embed UnimplementedAuthzExtentionServiceServer // All implementations should embed UnimplementedAuthzExtentionServiceServer
// for forward compatibility // for forward compatibility
type AuthzExtentionServiceServer interface { type AuthzExtentionServiceServer interface {
BatchCheck(context.Context, *BatchCheckRequest) (*BatchCheckResponse, error)
Read(context.Context, *ReadRequest) (*ReadResponse, error) Read(context.Context, *ReadRequest) (*ReadResponse, error)
Write(context.Context, *WriteRequest) (*WriteResponse, error) Write(context.Context, *WriteRequest) (*WriteResponse, error)
Mutate(context.Context, *MutateRequest) (*MutateResponse, error) Mutate(context.Context, *MutateRequest) (*MutateResponse, error)
@@ -110,9 +97,6 @@ type AuthzExtentionServiceServer interface {
type UnimplementedAuthzExtentionServiceServer struct { type UnimplementedAuthzExtentionServiceServer struct {
} }
func (UnimplementedAuthzExtentionServiceServer) BatchCheck(context.Context, *BatchCheckRequest) (*BatchCheckResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method BatchCheck not implemented")
}
func (UnimplementedAuthzExtentionServiceServer) Read(context.Context, *ReadRequest) (*ReadResponse, error) { func (UnimplementedAuthzExtentionServiceServer) Read(context.Context, *ReadRequest) (*ReadResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Read not implemented") return nil, status.Errorf(codes.Unimplemented, "method Read not implemented")
} }
@@ -137,24 +121,6 @@ func RegisterAuthzExtentionServiceServer(s grpc.ServiceRegistrar, srv AuthzExten
s.RegisterService(&AuthzExtentionService_ServiceDesc, srv) s.RegisterService(&AuthzExtentionService_ServiceDesc, srv)
} }
func _AuthzExtentionService_BatchCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(BatchCheckRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AuthzExtentionServiceServer).BatchCheck(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: AuthzExtentionService_BatchCheck_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AuthzExtentionServiceServer).BatchCheck(ctx, req.(*BatchCheckRequest))
}
return interceptor(ctx, in, info, handler)
}
func _AuthzExtentionService_Read_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { func _AuthzExtentionService_Read_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReadRequest) in := new(ReadRequest)
if err := dec(in); err != nil { if err := dec(in); err != nil {
@@ -234,10 +200,6 @@ var AuthzExtentionService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "authz.extention.v1.AuthzExtentionService", ServiceName: "authz.extention.v1.AuthzExtentionService",
HandlerType: (*AuthzExtentionServiceServer)(nil), HandlerType: (*AuthzExtentionServiceServer)(nil),
Methods: []grpc.MethodDesc{ Methods: []grpc.MethodDesc{
{
MethodName: "BatchCheck",
Handler: _AuthzExtentionService_BatchCheck_Handler,
},
{ {
MethodName: "Read", MethodName: "Read",
Handler: _AuthzExtentionService_Read_Handler, Handler: _AuthzExtentionService_Read_Handler,

View File

@@ -186,6 +186,150 @@ func (s *Service) Check(ctx context.Context, req *authzv1.CheckRequest) (*authzv
return &authzv1.CheckResponse{Allowed: allowed}, nil return &authzv1.CheckResponse{Allowed: allowed}, nil
} }
// BatchCheck implements authzv1.AuthzServiceServer.BatchCheck
// This performs multiple access checks in a single request with optimized batching.
// 1. Validates the subject once
// 2. Groups checks by (namespace, action) to load permissions once per group
// 3. Reuses the folder tree across checks
func (s *Service) BatchCheck(ctx context.Context, req *authzv1.BatchCheckRequest) (*authzv1.BatchCheckResponse, error) {
ctx, span := s.tracer.Start(ctx, "authz_direct_db.service.BatchCheck")
defer span.End()
checks := req.GetChecks()
span.SetAttributes(attribute.Int("check_count", len(checks)))
ctxLogger := s.logger.FromContext(ctx).New(
"subject", req.GetSubject(),
"check_count", len(checks),
)
defer func(start time.Time) {
ctxLogger.Debug("BatchCheck execution time", "duration", time.Since(start).Milliseconds())
}(time.Now())
// Early check for auth info - required for namespace validation
if _, has := types.AuthInfoFrom(ctx); !has {
return nil, status.Error(codes.Internal, "could not get auth info from context")
}
if len(checks) == 0 {
return &authzv1.BatchCheckResponse{
Results: make(map[string]*authzv1.BatchCheckResult),
Zookie: &authzv1.Zookie{Timestamp: time.Now().UnixMilli()},
}, nil
}
// Validate subject once for all checks
userUID, idType, err := s.validateSubject(ctx, req.GetSubject())
if err != nil {
ctxLogger.Error("invalid subject", "error", err)
// Return all checks as denied with the same error
results := make(map[string]*authzv1.BatchCheckResult, len(checks))
for _, item := range checks {
results[item.GetCorrelationId()] = &authzv1.BatchCheckResult{
Allowed: false,
Error: err.Error(),
}
}
return &authzv1.BatchCheckResponse{Results: results, Zookie: &authzv1.Zookie{Timestamp: time.Now().UnixMilli()}}, nil
}
results := make(map[string]*authzv1.BatchCheckResult, len(checks))
// Group checks by (namespace, action) to batch permission lookups
type checkGroup struct {
namespace types.NamespaceInfo
action string
actionSets []string
items []*authzv1.BatchCheckItem
checkReqs []*checkRequest
}
groups := make(map[string]*checkGroup)
// First pass: validate and group checks
for _, item := range checks {
ns, err := validateNamespace(ctx, item.GetNamespace())
if err != nil {
results[item.GetCorrelationId()] = &authzv1.BatchCheckResult{Allowed: false, Error: err.Error()}
continue
}
action, actionSets, err := s.validateAction(ctx, item.GetGroup(), item.GetResource(), item.GetVerb())
if err != nil {
results[item.GetCorrelationId()] = &authzv1.BatchCheckResult{Allowed: false, Error: err.Error()}
continue
}
// Create the internal check request
checkReq := &checkRequest{
Namespace: ns,
UserUID: userUID,
IdentityType: idType,
Action: action,
ActionSets: actionSets,
Group: item.GetGroup(),
Resource: item.GetResource(),
Verb: item.GetVerb(),
Name: item.GetName(),
ParentFolder: item.GetFolder(),
}
// Group by namespace + action
groupKey := ns.Value + ":" + action
if g, ok := groups[groupKey]; ok {
g.items = append(g.items, item)
g.checkReqs = append(g.checkReqs, checkReq)
} else {
groups[groupKey] = &checkGroup{
namespace: ns,
action: action,
actionSets: actionSets,
items: []*authzv1.BatchCheckItem{item},
checkReqs: []*checkRequest{checkReq},
}
}
}
// Second pass: process each group with shared permissions
for _, group := range groups {
// Set namespace in context for this group (required by store methods)
groupCtx := request.WithNamespace(ctx, group.namespace.Value)
// Try to get cached permissions first, then fall back to store
permissions, err := s.getCachedIdentityPermissions(groupCtx, group.namespace, idType, userUID, group.action)
if err != nil {
// Cache miss - fetch from store
permissions, err = s.getIdentityPermissions(groupCtx, group.namespace, idType, userUID, group.action, group.actionSets)
if err != nil {
ctxLogger.Error("could not get permissions", "namespace", group.namespace.Value, "action", group.action, "error", err)
for _, item := range group.items {
results[item.GetCorrelationId()] = &authzv1.BatchCheckResult{Allowed: false, Error: err.Error()}
}
continue
}
}
// Check each item in the group using the shared permissions
for i, item := range group.items {
checkReq := group.checkReqs[i]
allowed, err := s.checkPermission(groupCtx, permissions, checkReq)
if err != nil {
results[item.GetCorrelationId()] = &authzv1.BatchCheckResult{Allowed: false, Error: err.Error()}
continue
}
results[item.GetCorrelationId()] = &authzv1.BatchCheckResult{Allowed: allowed}
}
}
span.SetAttributes(attribute.Int("groups_processed", len(groups)))
return &authzv1.BatchCheckResponse{
Results: results,
Zookie: &authzv1.Zookie{Timestamp: time.Now().UnixMilli()},
}, nil
}
func (s *Service) List(ctx context.Context, req *authzv1.ListRequest) (*authzv1.ListResponse, error) { func (s *Service) List(ctx context.Context, req *authzv1.ListRequest) (*authzv1.ListResponse, error) {
ctx, span := s.tracer.Start(ctx, "authz_direct_db.service.List") ctx, span := s.tracer.Start(ctx, "authz_direct_db.service.List")
defer span.End() defer span.End()

View File

@@ -1829,6 +1829,613 @@ func TestService_CacheList(t *testing.T) {
}) })
} }
func TestService_BatchCheck(t *testing.T) {
callingService := authn.NewAccessTokenAuthInfo(authn.Claims[authn.AccessTokenClaims]{
Claims: jwt.Claims{
Subject: types.NewTypeID(types.TypeAccessPolicy, "some-service"),
Audience: []string{"authzservice"},
},
Rest: authn.AccessTokenClaims{Namespace: "org-12"},
})
t.Run("Require auth info", func(t *testing.T) {
s := setupService()
ctx := context.Background()
_, err := s.BatchCheck(ctx, &authzv1.BatchCheckRequest{
Subject: "user:test-uid",
Checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
},
})
require.Error(t, err)
require.Contains(t, err.Error(), "could not get auth info")
})
t.Run("Empty checks returns empty results", func(t *testing.T) {
s := setupService()
ctx := types.WithAuthInfo(context.Background(), callingService)
resp, err := s.BatchCheck(ctx, &authzv1.BatchCheckRequest{
Subject: "user:test-uid",
Checks: []*authzv1.BatchCheckItem{},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.Empty(t, resp.Results)
})
type batchCheckTestCase struct {
name string
checks []*authzv1.BatchCheckItem
permissions []accesscontrol.Permission
folders []store.Folder
expectedResults map[string]bool
expectedErrors map[string]bool // true if error expected for this correlation ID
expectGlobalError bool
}
t.Run("Request validation", func(t *testing.T) {
testCases := []batchCheckTestCase{
{
name: "should return error for invalid namespace",
checks: []*authzv1.BatchCheckItem{
{
Namespace: "",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
},
expectedResults: map[string]bool{"check1": false},
expectedErrors: map[string]bool{"check1": true},
},
{
name: "should return error for namespace mismatch",
checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-13",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
},
expectedResults: map[string]bool{"check1": false},
expectedErrors: map[string]bool{"check1": true},
},
{
name: "should return error for unknown group",
checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "unknown.grafana.app",
Resource: "unknown",
Verb: "get",
Name: "u1",
CorrelationId: "check1",
},
},
expectedResults: map[string]bool{"check1": false},
expectedErrors: map[string]bool{"check1": true},
},
{
name: "should return error for unknown verb",
checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "unknown",
Name: "dash1",
CorrelationId: "check1",
},
},
expectedResults: map[string]bool{"check1": false},
expectedErrors: map[string]bool{"check1": true},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
s := setupService()
ctx := types.WithAuthInfo(context.Background(), callingService)
userID := &store.UserIdentifiers{UID: "test-uid", ID: 1}
store := &fakeStore{
userID: userID,
userPermissions: tc.permissions,
}
s.store = store
s.permissionStore = store
s.identityStore = &fakeIdentityStore{}
resp, err := s.BatchCheck(ctx, &authzv1.BatchCheckRequest{
Subject: "user:test-uid",
Checks: tc.checks,
})
require.NoError(t, err)
require.NotNil(t, resp)
for corrID, expectedAllowed := range tc.expectedResults {
result, ok := resp.Results[corrID]
require.True(t, ok, "result for %s not found", corrID)
require.Equal(t, expectedAllowed, result.Allowed, "unexpected allowed for %s", corrID)
if tc.expectedErrors[corrID] {
require.NotEmpty(t, result.Error, "expected error for %s", corrID)
}
}
})
}
})
t.Run("User permission checks", func(t *testing.T) {
testCases := []batchCheckTestCase{
{
name: "should allow user with permission on single resource",
checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
},
permissions: []accesscontrol.Permission{{Action: "dashboards:read", Scope: "dashboards:uid:dash1"}},
expectedResults: map[string]bool{"check1": true},
},
{
name: "should deny user without permission",
checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
},
permissions: []accesscontrol.Permission{{Action: "dashboards:read", Scope: "dashboards:uid:dash2"}},
expectedResults: map[string]bool{"check1": false},
},
{
name: "should handle multiple checks with mixed results",
checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash2",
CorrelationId: "check2",
},
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash3",
CorrelationId: "check3",
},
},
permissions: []accesscontrol.Permission{
{Action: "dashboards:read", Scope: "dashboards:uid:dash1"},
{Action: "dashboards:read", Scope: "dashboards:uid:dash3"},
},
expectedResults: map[string]bool{
"check1": true,
"check2": false,
"check3": true,
},
},
{
name: "should handle wildcard permission",
checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash2",
CorrelationId: "check2",
},
},
permissions: []accesscontrol.Permission{{Action: "dashboards:read", Scope: "*", Kind: "*"}},
expectedResults: map[string]bool{"check1": true, "check2": true},
},
{
name: "should handle folder inheritance",
checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
Folder: "child",
CorrelationId: "check1",
},
},
permissions: []accesscontrol.Permission{
{Action: "dashboards:read", Scope: "folders:uid:parent", Kind: "folders", Attribute: "uid", Identifier: "parent"},
},
folders: []store.Folder{
{UID: "parent"},
{UID: "child", ParentUID: strPtr("parent")},
},
expectedResults: map[string]bool{"check1": true},
},
{
name: "should handle action sets",
checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
},
permissions: []accesscontrol.Permission{{Action: "dashboards:admin", Scope: "dashboards:uid:dash1"}},
expectedResults: map[string]bool{"check1": true},
},
{
name: "should handle checks across different resources",
checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
{
Namespace: "org-12",
Group: "folder.grafana.app",
Resource: "folders",
Verb: "get",
Name: "fold1",
CorrelationId: "check2",
},
},
permissions: []accesscontrol.Permission{
{Action: "dashboards:read", Scope: "dashboards:uid:dash1"},
{Action: "folders:read", Scope: "folders:uid:fold1"},
},
expectedResults: map[string]bool{"check1": true, "check2": true},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
s := setupService()
ctx := types.WithAuthInfo(context.Background(), callingService)
userID := &store.UserIdentifiers{UID: "test-uid", ID: 1}
store := &fakeStore{
userID: userID,
userPermissions: tc.permissions,
folders: tc.folders,
}
s.store = store
s.permissionStore = store
s.folderStore = store
s.identityStore = &fakeIdentityStore{}
if tc.folders != nil {
s.folderCache.Set(ctx, folderCacheKey("org-12"), newFolderTree(tc.folders))
}
resp, err := s.BatchCheck(ctx, &authzv1.BatchCheckRequest{
Subject: "user:test-uid",
Checks: tc.checks,
})
require.NoError(t, err)
require.NotNil(t, resp)
require.Len(t, resp.Results, len(tc.expectedResults))
for corrID, expectedAllowed := range tc.expectedResults {
result, ok := resp.Results[corrID]
require.True(t, ok, "result for %s not found", corrID)
require.Equal(t, expectedAllowed, result.Allowed, "unexpected allowed for %s", corrID)
}
})
}
})
t.Run("Anonymous permission checks", func(t *testing.T) {
testCases := []batchCheckTestCase{
{
name: "should allow anonymous with permission",
checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
},
permissions: []accesscontrol.Permission{{Action: "dashboards:read", Scope: "dashboards:uid:dash1"}},
expectedResults: map[string]bool{"check1": true},
},
{
name: "should deny anonymous without permission",
checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
},
permissions: []accesscontrol.Permission{{Action: "dashboards:read", Scope: "dashboards:uid:dash2"}},
expectedResults: map[string]bool{"check1": false},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
s := setupService()
ctx := types.WithAuthInfo(context.Background(), callingService)
store := &fakeStore{userPermissions: tc.permissions}
s.store = store
s.permissionStore = store
s.identityStore = &fakeIdentityStore{}
resp, err := s.BatchCheck(ctx, &authzv1.BatchCheckRequest{
Subject: "anonymous:0",
Checks: tc.checks,
})
require.NoError(t, err)
require.NotNil(t, resp)
for corrID, expectedAllowed := range tc.expectedResults {
result, ok := resp.Results[corrID]
require.True(t, ok, "result for %s not found", corrID)
require.Equal(t, expectedAllowed, result.Allowed, "unexpected allowed for %s", corrID)
}
})
}
})
t.Run("Rendering permission checks", func(t *testing.T) {
t.Run("should allow rendering with permission", func(t *testing.T) {
s := setupService()
ctx := types.WithAuthInfo(context.Background(), callingService)
resp, err := s.BatchCheck(ctx, &authzv1.BatchCheckRequest{
Subject: "render:0",
Checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.True(t, resp.Results["check1"].Allowed)
})
t.Run("should deny rendering access to another app resources", func(t *testing.T) {
s := setupService()
ctx := types.WithAuthInfo(context.Background(), callingService)
resp, err := s.BatchCheck(ctx, &authzv1.BatchCheckRequest{
Subject: "render:0",
Checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "another.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.False(t, resp.Results["check1"].Allowed)
require.NotEmpty(t, resp.Results["check1"].Error)
})
})
t.Run("Invalid subject returns errors for all checks", func(t *testing.T) {
s := setupService()
ctx := types.WithAuthInfo(context.Background(), callingService)
store := &fakeStore{}
s.store = store
s.permissionStore = store
s.identityStore = &fakeIdentityStore{}
resp, err := s.BatchCheck(ctx, &authzv1.BatchCheckRequest{
Subject: "invalid:12",
Checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash2",
CorrelationId: "check2",
},
},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.Len(t, resp.Results, 2)
for _, result := range resp.Results {
require.False(t, result.Allowed)
require.NotEmpty(t, result.Error)
}
})
t.Run("Grouping optimization", func(t *testing.T) {
t.Run("should batch permission lookups for same action", func(t *testing.T) {
s := setupService()
ctx := types.WithAuthInfo(context.Background(), callingService)
userID := &store.UserIdentifiers{UID: "test-uid", ID: 1}
fStore := &fakeStore{
userID: userID,
userPermissions: []accesscontrol.Permission{
{Action: "dashboards:read", Scope: "dashboards:uid:dash1"},
{Action: "dashboards:read", Scope: "dashboards:uid:dash2"},
},
}
s.store = fStore
s.permissionStore = fStore
s.identityStore = &fakeIdentityStore{}
resp, err := s.BatchCheck(ctx, &authzv1.BatchCheckRequest{
Subject: "user:test-uid",
Checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash2",
CorrelationId: "check2",
},
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash3",
CorrelationId: "check3",
},
},
})
require.NoError(t, err)
require.NotNil(t, resp)
require.True(t, resp.Results["check1"].Allowed)
require.True(t, resp.Results["check2"].Allowed)
require.False(t, resp.Results["check3"].Allowed)
// Verify permissions were fetched only once (1 call for userID + 1 call for basicRole + 1 call for permissions)
require.Equal(t, 3, fStore.calls)
})
})
}
func TestService_CacheBatchCheck(t *testing.T) {
callingService := authn.NewAccessTokenAuthInfo(authn.Claims[authn.AccessTokenClaims]{
Claims: jwt.Claims{
Subject: types.NewTypeID(types.TypeAccessPolicy, "some-service"),
Audience: []string{"authzservice"},
},
Rest: authn.AccessTokenClaims{Namespace: "org-12"},
})
ctx := types.WithAuthInfo(context.Background(), callingService)
userID := &store.UserIdentifiers{UID: "test-uid", ID: 1}
t.Run("Allow based on cached permissions", func(t *testing.T) {
s := setupService()
s.idCache.Set(ctx, userIdentifierCacheKey("org-12", "test-uid"), *userID)
s.permCache.Set(ctx, userPermCacheKey("org-12", "test-uid", "dashboards:read"), map[string]bool{"dashboards:uid:dash1": true})
resp, err := s.BatchCheck(ctx, &authzv1.BatchCheckRequest{
Subject: "user:test-uid",
Checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash1",
CorrelationId: "check1",
},
},
})
require.NoError(t, err)
require.True(t, resp.Results["check1"].Allowed)
})
t.Run("Fallback to database on cache miss", func(t *testing.T) {
s := setupService()
// Populate database but not cache
fStore := &fakeStore{
userID: userID,
userPermissions: []accesscontrol.Permission{{Action: "dashboards:read", Scope: "dashboards:uid:dash2"}},
}
s.store = fStore
s.permissionStore = fStore
s.identityStore = &fakeIdentityStore{}
s.idCache.Set(ctx, userIdentifierCacheKey("org-12", "test-uid"), *userID)
resp, err := s.BatchCheck(ctx, &authzv1.BatchCheckRequest{
Subject: "user:test-uid",
Checks: []*authzv1.BatchCheckItem{
{
Namespace: "org-12",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: "get",
Name: "dash2",
CorrelationId: "check1",
},
},
})
require.NoError(t, err)
require.True(t, resp.Results["check1"].Allowed)
})
}
func setupService() *Service { func setupService() *Service {
cache := cache.NewLocalCache(cache.Config{Expiry: 5 * time.Minute, CleanupInterval: 5 * time.Minute}) cache := cache.NewLocalCache(cache.Config{Expiry: 5 * time.Minute, CleanupInterval: 5 * time.Minute})
logger := log.New("authz-rbac-service") logger := log.New("authz-rbac-service")

View File

@@ -13,7 +13,6 @@ type Client interface {
authlib.AccessClient authlib.AccessClient
Read(ctx context.Context, req *authzextv1.ReadRequest) (*authzextv1.ReadResponse, error) Read(ctx context.Context, req *authzextv1.ReadRequest) (*authzextv1.ReadResponse, error)
Write(ctx context.Context, req *authzextv1.WriteRequest) error Write(ctx context.Context, req *authzextv1.WriteRequest) error
BatchCheck(ctx context.Context, req *authzextv1.BatchCheckRequest) (*authzextv1.BatchCheckResponse, error)
Mutate(ctx context.Context, req *authzextv1.MutateRequest) error Mutate(ctx context.Context, req *authzextv1.MutateRequest) error
Query(ctx context.Context, req *authzextv1.QueryRequest) (*authzextv1.QueryResponse, error) Query(ctx context.Context, req *authzextv1.QueryRequest) (*authzextv1.QueryResponse, error)

View File

@@ -68,11 +68,11 @@ func (c *Client) Write(ctx context.Context, req *authzextv1.WriteRequest) error
return err return err
} }
func (c *Client) BatchCheck(ctx context.Context, req *authzextv1.BatchCheckRequest) (*authzextv1.BatchCheckResponse, error) { func (c *Client) BatchCheck(ctx context.Context, id authlib.AuthInfo, req authlib.BatchCheckRequest) (authlib.BatchCheckResponse, error) {
ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Check") ctx, span := tracer.Start(ctx, "authlib.zanzana.client.BatchCheck")
defer span.End() defer span.End()
return c.authzext.BatchCheck(ctx, req) return c.authzlibclient.BatchCheck(ctx, id, req)
} }
func (c *Client) WriteNew(ctx context.Context, req *authzextv1.WriteRequest) error { func (c *Client) WriteNew(ctx context.Context, req *authzextv1.WriteRequest) error {

View File

@@ -34,8 +34,11 @@ func (nc NoopClient) Write(ctx context.Context, req *authzextv1.WriteRequest) er
return nil return nil
} }
func (nc NoopClient) BatchCheck(ctx context.Context, req *authzextv1.BatchCheckRequest) (*authzextv1.BatchCheckResponse, error) { func (nc NoopClient) BatchCheck(ctx context.Context, id authlib.AuthInfo, req authlib.BatchCheckRequest) (authlib.BatchCheckResponse, error) {
return nil, nil return authlib.BatchCheckResponse{
Results: make(map[string]authlib.BatchCheckResult),
Zookie: authlib.NoopZookie{},
}, nil
} }
func (nc NoopClient) Mutate(ctx context.Context, req *authzextv1.MutateRequest) error { func (nc NoopClient) Mutate(ctx context.Context, req *authzextv1.MutateRequest) error {

View File

@@ -132,3 +132,54 @@ func (c *ShadowClient) Compile(ctx context.Context, id authlib.AuthInfo, req aut
return shadowItemChecker, authlib.NoopZookie{}, err return shadowItemChecker, authlib.NoopZookie{}, err
} }
func (c *ShadowClient) BatchCheck(ctx context.Context, id authlib.AuthInfo, req authlib.BatchCheckRequest) (authlib.BatchCheckResponse, error) {
acResChan := make(chan authlib.BatchCheckResponse, 1)
acErrChan := make(chan error, 1)
go func() {
if c.zanzanaClient == nil {
return
}
zanzanaCtx := context.WithoutCancel(ctx)
zanzanaCtxTimeout, cancel := context.WithTimeout(zanzanaCtx, zanzanaTimeout)
defer cancel()
timer := prometheus.NewTimer(c.metrics.evaluationsSeconds.WithLabelValues("zanzana"))
res, err := c.zanzanaClient.BatchCheck(zanzanaCtxTimeout, id, req)
if err != nil {
c.logger.Error("Failed to run zanzana batch check", "error", err)
}
timer.ObserveDuration()
acRes := <-acResChan
acErr := <-acErrChan
if acErr == nil {
// Compare results for each correlation ID
for corrID, acResult := range acRes.Results {
zanzanaResult, exists := res.Results[corrID]
if !exists {
c.metrics.evaluationStatusTotal.WithLabelValues("error").Inc()
c.logger.Warn("Zanzana batch check missing result", "correlationId", corrID, "user", id.GetUID())
continue
}
if zanzanaResult.Allowed != acResult.Allowed {
c.metrics.evaluationStatusTotal.WithLabelValues("error").Inc()
c.logger.Warn("Zanzana batch check result does not match", "expected", acResult.Allowed, "actual", zanzanaResult.Allowed, "correlationId", corrID, "user", id.GetUID())
} else {
c.metrics.evaluationStatusTotal.WithLabelValues("success").Inc()
}
}
}
}()
timer := prometheus.NewTimer(c.metrics.evaluationsSeconds.WithLabelValues("rbac"))
res, err := c.accessClient.BatchCheck(ctx, id, req)
timer.ObserveDuration()
acResChan <- res
acErrChan <- err
return res, err
}

View File

@@ -10,7 +10,6 @@ import (
iamv0alpha1 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1" iamv0alpha1 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/services/accesscontrol" "github.com/grafana/grafana/pkg/services/accesscontrol"
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
) )
type typeInfo struct { type typeInfo struct {
@@ -73,7 +72,7 @@ func NewResourceInfoFromCheck(r *authzv1.CheckRequest) ResourceInfo {
return resource return resource
} }
func NewResourceInfoFromBatchItem(i *authzextv1.BatchCheckItem) ResourceInfo { func NewResourceInfoFromBatchItem(i *authzv1.BatchCheckItem) ResourceInfo {
typ, relations := getTypeAndRelations(i.GetGroup(), i.GetResource()) typ, relations := getTypeAndRelations(i.GetGroup(), i.GetResource())
return newResource( return newResource(
typ, typ,

View File

@@ -2,97 +2,463 @@ package server
import ( import (
"context" "context"
"fmt"
"time"
authzv1 "github.com/grafana/authlib/authz/proto/v1" authzv1 "github.com/grafana/authlib/authz/proto/v1"
openfgav1 "github.com/openfga/api/proto/openfga/v1" openfgav1 "github.com/openfga/api/proto/openfga/v1"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/codes"
"google.golang.org/protobuf/types/known/structpb"
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
"github.com/grafana/grafana/pkg/services/authz/zanzana/common" "github.com/grafana/grafana/pkg/services/authz/zanzana/common"
) )
func (s *Server) BatchCheck(ctx context.Context, r *authzextv1.BatchCheckRequest) (*authzextv1.BatchCheckResponse, error) { // checkKey represents a unique check to be performed
type checkKey struct {
relation string
object string
}
// batchCheckBuilder encapsulates state for building OpenFGA batch checks
type batchCheckBuilder struct {
subject string
contextuals *openfgav1.ContextualTupleKeys
checks []*openfgav1.BatchCheckItem
checksSeen map[checkKey]bool
checkMapping map[string]checkKey
counter int
}
func newBatchCheckBuilder(subject string, contextuals *openfgav1.ContextualTupleKeys) *batchCheckBuilder {
return &batchCheckBuilder{
subject: subject,
contextuals: contextuals,
checks: make([]*openfgav1.BatchCheckItem, 0),
checksSeen: make(map[checkKey]bool),
checkMapping: make(map[string]checkKey),
counter: 0,
}
}
func (b *batchCheckBuilder) addCheck(relation, object string, context *structpb.Struct) {
if object == "" {
return
}
key := checkKey{relation: relation, object: object}
if b.checksSeen[key] {
return
}
b.checksSeen[key] = true
correlationID := fmt.Sprintf("c%d", b.counter)
b.counter++
b.checks = append(b.checks, &openfgav1.BatchCheckItem{
TupleKey: &openfgav1.CheckRequestTupleKey{
User: b.subject,
Relation: relation,
Object: object,
},
ContextualTuples: b.contextuals,
Context: context,
CorrelationId: correlationID,
})
b.checkMapping[correlationID] = key
}
// BatchCheck implements authzv1.AuthzServiceServer.BatchCheck
// This performs multiple access checks in a single request using OpenFGA's native BatchCheck API.
func (s *Server) BatchCheck(ctx context.Context, r *authzv1.BatchCheckRequest) (*authzv1.BatchCheckResponse, error) {
ctx, span := s.tracer.Start(ctx, "server.BatchCheck") ctx, span := s.tracer.Start(ctx, "server.BatchCheck")
defer span.End() defer span.End()
if err := authorize(ctx, r.GetNamespace(), s.cfg); err != nil { span.SetAttributes(attribute.Int("check_count", len(r.GetChecks())))
span.RecordError(err)
span.SetStatus(codes.Error, err.Error())
return nil, err
}
batchRes := &authzextv1.BatchCheckResponse{ defer func(t time.Time) {
Groups: make(map[string]*authzextv1.BatchCheckGroupResource), s.metrics.requestDurationSeconds.WithLabelValues("server.BatchCheck", "").Observe(time.Since(t).Seconds())
} }(time.Now())
store, err := s.getStoreInfo(ctx, r.GetNamespace()) res, err := s.batchCheck(ctx, r)
if err != nil { if err != nil {
span.RecordError(err) span.RecordError(err)
span.SetStatus(codes.Error, err.Error()) span.SetStatus(codes.Error, err.Error())
return nil, err s.logger.Error("failed to perform batch check request", "error", err)
return nil, fmt.Errorf("failed to perform batch check request: %w", err)
}
return res, nil
}
func (s *Server) batchCheck(ctx context.Context, r *authzv1.BatchCheckRequest) (*authzv1.BatchCheckResponse, error) {
items := r.GetChecks()
if len(items) == 0 {
return &authzv1.BatchCheckResponse{
Results: make(map[string]*authzv1.BatchCheckResult),
}, nil
}
// Group items by namespace
itemsByNamespace := make(map[string][]*authzv1.BatchCheckItem)
for _, item := range items {
ns := item.GetNamespace()
itemsByNamespace[ns] = append(itemsByNamespace[ns], item)
}
// Authorize and get store info for each namespace
stores := make(map[string]*storeInfo)
for namespace := range itemsByNamespace {
if err := authorize(ctx, namespace, s.cfg); err != nil {
return nil, err
}
store, err := s.getStoreInfo(ctx, namespace)
if err != nil {
return nil, err
}
stores[namespace] = store
} }
contextuals, err := s.getContextuals(r.GetSubject()) contextuals, err := s.getContextuals(r.GetSubject())
if err != nil { if err != nil {
span.RecordError(err)
span.SetStatus(codes.Error, err.Error())
return nil, err return nil, err
} }
groupResourceAccess := make(map[string]bool) results := make(map[string]*authzv1.BatchCheckResult, len(items))
subject := r.GetSubject()
for _, item := range r.GetItems() { // Process each namespace separately
res, err := s.batchCheckItem(ctx, r, item, contextuals, store, groupResourceAccess) for namespace, nsItems := range itemsByNamespace {
if err != nil { store := stores[namespace]
span.RecordError(err)
span.SetStatus(codes.Error, err.Error()) // Phase 1: Check GroupResource access (broadest permissions)
return nil, err // Example: user has "get" on "dashboards" group_resource → all dashboards allowed
s.runGroupResourcePhase(ctx, store, subject, nsItems, contextuals, results)
// Phase 2: Check folder permission inheritance (can_get, can_create, etc. on parent folder)
// Example: user has "can_get" on folder-A → all dashboards in folder-A allowed
s.runFolderPermissionPhase(ctx, store, subject, nsItems, contextuals, results)
// Phase 3: Check folder subresource access (folder_get, folder_create, etc.)
// Example: user has "folder_get" on folder-A → dashboards in folder-A allowed via subresource
s.runFolderSubresourcePhase(ctx, store, subject, nsItems, contextuals, results)
// Phase 4: Check direct resource access
// Example: user has "get" directly on dashboard-123
s.runDirectResourcePhase(ctx, store, subject, nsItems, contextuals, results)
}
// Mark any remaining unresolved items as denied
for _, item := range items {
if _, resolved := results[item.GetCorrelationId()]; !resolved {
results[item.GetCorrelationId()] = &authzv1.BatchCheckResult{Allowed: false}
}
}
return s.buildResponse(results), nil
}
func (s *Server) buildResponse(results map[string]*authzv1.BatchCheckResult) *authzv1.BatchCheckResponse {
return &authzv1.BatchCheckResponse{
Results: results,
Zookie: &authzv1.Zookie{Timestamp: time.Now().UnixMilli()},
}
}
// runGroupResourcePhase checks if the user has GroupResource-level access.
// This is the broadest permission - if allowed, all items in that group are allowed.
func (s *Server) runGroupResourcePhase(
ctx context.Context,
store *storeInfo,
subject string,
items []*authzv1.BatchCheckItem,
contextuals *openfgav1.ContextualTupleKeys,
results map[string]*authzv1.BatchCheckResult,
) {
// Group items by their GroupResource
type grInfo struct {
relation string
grIdent string
items []string // correlation IDs
}
groupedItems := make(map[string]*grInfo) // groupResource -> info
for _, item := range items {
relation := common.VerbMapping[item.GetVerb()]
if !common.IsGroupResourceRelation(relation) {
continue
} }
groupResource := common.FormatGroupResource(item.GetGroup(), item.GetResource(), item.GetSubresource()) resource := common.NewResourceInfoFromBatchItem(item)
if _, ok := batchRes.Groups[groupResource]; !ok { gr := resource.GroupResource()
batchRes.Groups[groupResource] = &authzextv1.BatchCheckGroupResource{
Items: make(map[string]bool), if _, exists := groupedItems[gr]; !exists {
groupedItems[gr] = &grInfo{
relation: relation,
grIdent: resource.GroupResourceIdent(),
items: make([]string, 0),
} }
} }
batchRes.Groups[groupResource].Items[item.GetName()] = res.GetAllowed() groupedItems[gr].items = append(groupedItems[gr].items, item.GetCorrelationId())
} }
return batchRes, nil if len(groupedItems) == 0 {
return
}
// Build batch check for unique GroupResources
builder := newBatchCheckBuilder(subject, contextuals)
grCheckMapping := make(map[string]string) // OpenFGA correlationID -> groupResource
for gr, info := range groupedItems {
correlationID := fmt.Sprintf("gr%d", builder.counter)
builder.counter++
builder.checks = append(builder.checks, &openfgav1.BatchCheckItem{
TupleKey: &openfgav1.CheckRequestTupleKey{
User: subject,
Relation: info.relation,
Object: info.grIdent,
},
ContextualTuples: contextuals,
CorrelationId: correlationID,
})
grCheckMapping[correlationID] = gr
}
openfgaRes, err := s.openfgaClient.BatchCheck(ctx, &openfgav1.BatchCheckRequest{
StoreId: store.ID,
AuthorizationModelId: store.ModelID,
Checks: builder.checks,
})
if err != nil {
s.logger.Warn("Failed to check group resource access", "error", err)
return
}
// Mark all items in allowed GroupResources
for correlationID, result := range openfgaRes.GetResult() {
gr := grCheckMapping[correlationID]
if allowed, ok := result.GetCheckResult().(*openfgav1.BatchCheckSingleResult_Allowed); ok && allowed.Allowed {
for _, itemCorrelationID := range groupedItems[gr].items {
results[itemCorrelationID] = &authzv1.BatchCheckResult{Allowed: true}
}
}
}
} }
func (s *Server) batchCheckItem( // runFolderPermissionPhase checks folder permission inheritance (can_get, can_create, etc.).
// This applies to folder-based resources like dashboards, panels, etc.
func (s *Server) runFolderPermissionPhase(
ctx context.Context, ctx context.Context,
r *authzextv1.BatchCheckRequest,
item *authzextv1.BatchCheckItem,
contextuals *openfgav1.ContextualTupleKeys,
store *storeInfo, store *storeInfo,
groupResourceAccess map[string]bool, subject string,
) (*authzv1.CheckResponse, error) { items []*authzv1.BatchCheckItem,
var ( contextuals *openfgav1.ContextualTupleKeys,
relation = common.VerbMapping[item.GetVerb()] results map[string]*authzv1.BatchCheckResult,
resource = common.NewResourceInfoFromBatchItem(item) ) {
groupResource = resource.GroupResource() builder := newBatchCheckBuilder(subject, contextuals)
) checkToItems := make(map[checkKey][]string) // checkKey -> correlation IDs
allowed, ok := groupResourceAccess[groupResource] for _, item := range items {
if !ok { if _, resolved := results[item.GetCorrelationId()]; resolved {
res, err := s.checkGroupResource(ctx, r.GetSubject(), relation, resource, contextuals, store) continue
if err != nil {
return nil, err
} }
allowed = res.GetAllowed() resource := common.NewResourceInfoFromBatchItem(item)
groupResourceAccess[groupResource] = res.GetAllowed() folderIdent := resource.FolderIdent()
// Only folder-based generic resources use folder permission inheritance
if !resource.IsGeneric() || folderIdent == "" || !isFolderPermissionBasedResource(resource.GroupResource()) {
continue
}
relation := common.VerbMapping[item.GetVerb()]
rel := common.FolderPermissionRelation(relation)
key := checkKey{relation: rel, object: folderIdent}
checkToItems[key] = append(checkToItems[key], item.GetCorrelationId())
builder.addCheck(rel, folderIdent, resource.Context())
} }
if allowed { if len(builder.checks) == 0 {
return &authzv1.CheckResponse{Allowed: true}, nil return
} }
if resource.IsGeneric() { checkResults, err := s.executeOpenFGABatchChecks(ctx, store, builder)
return s.checkGeneric(ctx, r.GetSubject(), relation, resource, contextuals, store) if err != nil {
s.logger.Warn("Failed folder permission phase", "error", err)
return
} }
return s.checkTyped(ctx, r.GetSubject(), relation, resource, contextuals, store) // Mark items allowed by folder permissions
for key, allowed := range checkResults {
if allowed {
for _, correlationID := range checkToItems[key] {
results[correlationID] = &authzv1.BatchCheckResult{Allowed: true}
}
}
}
}
// runFolderSubresourcePhase checks folder subresource access (folder_get, folder_create, etc.).
func (s *Server) runFolderSubresourcePhase(
ctx context.Context,
store *storeInfo,
subject string,
items []*authzv1.BatchCheckItem,
contextuals *openfgav1.ContextualTupleKeys,
results map[string]*authzv1.BatchCheckResult,
) {
builder := newBatchCheckBuilder(subject, contextuals)
checkToItems := make(map[checkKey][]string)
for _, item := range items {
if _, resolved := results[item.GetCorrelationId()]; resolved {
continue
}
resource := common.NewResourceInfoFromBatchItem(item)
relation := common.VerbMapping[item.GetVerb()]
var objectIdent string
var subresRel string
if resource.IsGeneric() {
// Generic resources: check subresource on folder
folderIdent := resource.FolderIdent()
if folderIdent == "" {
continue
}
subresRel = common.SubresourceRelation(relation)
if !common.IsSubresourceRelation(subresRel) {
continue
}
objectIdent = folderIdent
} else {
// Typed resources: check subresource on the resource itself
if !resource.HasSubresource() || !resource.IsValidRelation(relation) {
continue
}
objectIdent = resource.ResourceIdent()
if objectIdent == "" {
continue
}
subresRel = common.SubresourceRelation(relation)
}
key := checkKey{relation: subresRel, object: objectIdent}
checkToItems[key] = append(checkToItems[key], item.GetCorrelationId())
builder.addCheck(subresRel, objectIdent, resource.Context())
}
if len(builder.checks) == 0 {
return
}
checkResults, err := s.executeOpenFGABatchChecks(ctx, store, builder)
if err != nil {
s.logger.Warn("Failed folder subresource phase", "error", err)
return
}
for key, allowed := range checkResults {
if allowed {
for _, correlationID := range checkToItems[key] {
results[correlationID] = &authzv1.BatchCheckResult{Allowed: true}
}
}
}
}
// runDirectResourcePhase checks direct resource access.
func (s *Server) runDirectResourcePhase(
ctx context.Context,
store *storeInfo,
subject string,
items []*authzv1.BatchCheckItem,
contextuals *openfgav1.ContextualTupleKeys,
results map[string]*authzv1.BatchCheckResult,
) {
builder := newBatchCheckBuilder(subject, contextuals)
checkToItems := make(map[checkKey][]string)
for _, item := range items {
if _, resolved := results[item.GetCorrelationId()]; resolved {
continue
}
resource := common.NewResourceInfoFromBatchItem(item)
relation := common.VerbMapping[item.GetVerb()]
if !resource.IsValidRelation(relation) {
continue
}
resourceIdent := resource.ResourceIdent()
if resourceIdent == "" {
continue
}
// For folders, use the computed permission relation
checkRelation := relation
if resource.Type() == common.TypeFolder {
checkRelation = common.FolderPermissionRelation(relation)
}
key := checkKey{relation: checkRelation, object: resourceIdent}
checkToItems[key] = append(checkToItems[key], item.GetCorrelationId())
builder.addCheck(checkRelation, resourceIdent, resource.Context())
}
if len(builder.checks) == 0 {
return
}
checkResults, err := s.executeOpenFGABatchChecks(ctx, store, builder)
if err != nil {
s.logger.Warn("Failed direct resource phase", "error", err)
return
}
for key, allowed := range checkResults {
if allowed {
for _, correlationID := range checkToItems[key] {
results[correlationID] = &authzv1.BatchCheckResult{Allowed: true}
}
}
}
}
// executeOpenFGABatchChecks executes the OpenFGA batch checks in chunks and returns results
func (s *Server) executeOpenFGABatchChecks(ctx context.Context, store *storeInfo, builder *batchCheckBuilder) (map[checkKey]bool, error) {
const maxChecksPerBatch = 50
checkResults := make(map[checkKey]bool)
for i := 0; i < len(builder.checks); i += maxChecksPerBatch {
end := i + maxChecksPerBatch
if end > len(builder.checks) {
end = len(builder.checks)
}
openfgaRes, err := s.openfgaClient.BatchCheck(ctx, &openfgav1.BatchCheckRequest{
StoreId: store.ID,
AuthorizationModelId: store.ModelID,
Checks: builder.checks[i:end],
})
if err != nil {
return nil, fmt.Errorf("failed to perform OpenFGA batch check: %w", err)
}
// Process results
for correlationID, result := range openfgaRes.GetResult() {
key, ok := builder.checkMapping[correlationID]
if !ok {
continue
}
if allowed, ok := result.GetCheckResult().(*openfgav1.BatchCheckSingleResult_Allowed); ok {
checkResults[key] = allowed.Allowed
}
}
}
return checkResults, nil
} }

View File

@@ -1,193 +1,302 @@
package server package server
import ( import (
"fmt"
"testing" "testing"
authzv1 "github.com/grafana/authlib/authz/proto/v1"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/apimachinery/utils"
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
"github.com/grafana/grafana/pkg/services/authz/zanzana/common"
) )
func testBatchCheck(t *testing.T, server *Server) { func testBatchCheck(t *testing.T, server *Server) {
newReq := func(subject, verb, group, resource, subresource string, items []*authzextv1.BatchCheckItem) *authzextv1.BatchCheckRequest { // Helper to create a batch check request
for i, item := range items { newReq := func(subject string, items []*authzv1.BatchCheckItem) *authzv1.BatchCheckRequest {
items[i] = &authzextv1.BatchCheckItem{ return &authzv1.BatchCheckRequest{
Verb: verb, Subject: subject,
Group: group, Checks: items,
Resource: resource,
Subresource: subresource,
Name: item.GetName(),
Folder: item.GetFolder(),
}
} }
}
return &authzextv1.BatchCheckRequest{ // Helper to create a batch check item with correlation ID (uses default namespace)
Namespace: namespace, newItem := func(verb, group, resource, subresource, folder, name string) *authzv1.BatchCheckItem {
Subject: subject, correlationID := fmt.Sprintf("%s-%s-%s-%s", group, resource, folder, name)
Items: items, return &authzv1.BatchCheckItem{
Namespace: namespace,
Verb: verb,
Group: group,
Resource: resource,
Subresource: subresource,
Name: name,
Folder: folder,
CorrelationId: correlationID,
} }
} }
t.Run("user:1 should only be able to read resource:dashboard.grafana.app/dashboards/1", func(t *testing.T) { t.Run("user:1 should only be able to read resource:dashboard.grafana.app/dashboards/1", func(t *testing.T) {
groupResource := common.FormatGroupResource(dashboardGroup, dashboardResource, "") res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:1", []*authzv1.BatchCheckItem{
res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:1", utils.VerbGet, dashboardGroup, dashboardResource, "", []*authzextv1.BatchCheckItem{ newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "1", "1"),
{Name: "1", Folder: "1"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "2", "2"),
{Name: "2", Folder: "2"},
})) }))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, res.Groups[groupResource].Items, 2) require.Len(t, res.Results, 2)
assert.True(t, res.Groups[groupResource].Items["1"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "1", "1")].Allowed)
assert.False(t, res.Groups[groupResource].Items["2"]) assert.False(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "2", "2")].Allowed)
}) })
t.Run("user:2 should be able to read resource:dashboard.grafana.app/dashboards/{1,2} through group_resource", func(t *testing.T) { t.Run("user:2 should be able to read resource:dashboard.grafana.app/dashboards/{1,2} through group_resource", func(t *testing.T) {
groupResource := common.FormatGroupResource(dashboardGroup, dashboardResource, "") res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:2", []*authzv1.BatchCheckItem{
res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:2", utils.VerbGet, dashboardGroup, dashboardResource, "", []*authzextv1.BatchCheckItem{ newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "1", "1"),
{Name: "1", Folder: "1"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "2", "2"),
{Name: "2", Folder: "2"},
})) }))
require.NoError(t, err) require.NoError(t, err)
assert.Len(t, res.Groups[groupResource].Items, 2) require.Len(t, res.Results, 2)
// user:2 has group_resource access, so both should be allowed
assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "1", "1")].Allowed)
assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "2", "2")].Allowed)
}) })
t.Run("user:3 should be able to read resource:dashboard.grafana.app/dashboards/1 with set relation", func(t *testing.T) { t.Run("user:3 should be able to read resource:dashboard.grafana.app/dashboards/1 with set relation", func(t *testing.T) {
groupResource := common.FormatGroupResource(dashboardGroup, dashboardResource, "") res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:3", []*authzv1.BatchCheckItem{
res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:3", utils.VerbGet, dashboardGroup, dashboardResource, "", []*authzextv1.BatchCheckItem{ newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "1", "1"),
{Name: "1", Folder: "1"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "2", "2"),
{Name: "2", Folder: "2"},
})) }))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, res.Groups[groupResource].Items, 2) require.Len(t, res.Results, 2)
assert.True(t, res.Groups[groupResource].Items["1"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "1", "1")].Allowed)
assert.False(t, res.Groups[groupResource].Items["2"]) assert.False(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "2", "2")].Allowed)
}) })
t.Run("user:4 should be able to read all dashboard.grafana.app/dashboards in folder 1 and 3", func(t *testing.T) { t.Run("user:4 should be able to read all dashboard.grafana.app/dashboards in folder 1 and 3", func(t *testing.T) {
groupResource := common.FormatGroupResource(dashboardGroup, dashboardResource, "") res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:4", []*authzv1.BatchCheckItem{
res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:4", utils.VerbGet, dashboardGroup, dashboardResource, "", []*authzextv1.BatchCheckItem{ newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "1", "1"),
{Name: "1", Folder: "1"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "3", "2"),
{Name: "2", Folder: "3"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "2", "3"),
{Name: "3", Folder: "2"},
})) }))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, res.Groups[groupResource].Items, 3) require.Len(t, res.Results, 3)
assert.True(t, res.Groups[groupResource].Items["1"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "1", "1")].Allowed)
assert.True(t, res.Groups[groupResource].Items["2"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "3", "2")].Allowed)
assert.False(t, res.Groups[groupResource].Items["3"]) assert.False(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "2", "3")].Allowed)
}) })
t.Run("user:5 should be able to read resource:dashboard.grafana.app/dashboards/1 through folder with set relation", func(t *testing.T) { t.Run("user:5 should be able to read resource:dashboard.grafana.app/dashboards/1 through folder with set relation", func(t *testing.T) {
groupResource := common.FormatGroupResource(dashboardGroup, dashboardResource, "") res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:5", []*authzv1.BatchCheckItem{
res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:5", utils.VerbGet, dashboardGroup, dashboardResource, "", []*authzextv1.BatchCheckItem{ newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "1", "1"),
{Name: "1", Folder: "1"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "2", "2"),
{Name: "2", Folder: "2"},
})) }))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, res.Groups[groupResource].Items, 2) require.Len(t, res.Results, 2)
assert.True(t, res.Groups[groupResource].Items["1"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "1", "1")].Allowed)
assert.False(t, res.Groups[groupResource].Items["2"]) assert.False(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "2", "2")].Allowed)
}) })
t.Run("user:6 should be able to read folder 1", func(t *testing.T) { t.Run("user:6 should be able to read folder 1", func(t *testing.T) {
groupResource := common.FormatGroupResource(folderGroup, folderResource, "") res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:6", []*authzv1.BatchCheckItem{
res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:6", utils.VerbGet, folderGroup, folderResource, "", []*authzextv1.BatchCheckItem{ newItem(utils.VerbGet, folderGroup, folderResource, "", "", "1"),
{Name: "1"}, newItem(utils.VerbGet, folderGroup, folderResource, "", "", "2"),
{Name: "2"},
})) }))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, res.Groups[groupResource].Items, 2) require.Len(t, res.Results, 2)
assert.True(t, res.Groups[groupResource].Items["1"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", folderGroup, folderResource, "", "1")].Allowed)
assert.False(t, res.Groups[groupResource].Items["2"]) assert.False(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", folderGroup, folderResource, "", "2")].Allowed)
}) })
t.Run("user:7 should be able to read folder {1,2} through group_resource access", func(t *testing.T) { t.Run("user:7 should be able to read folder {1,2} through group_resource access", func(t *testing.T) {
groupResource := common.FormatGroupResource(folderGroup, folderResource, "") res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:7", []*authzv1.BatchCheckItem{
res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:7", utils.VerbGet, folderGroup, folderResource, "", []*authzextv1.BatchCheckItem{ newItem(utils.VerbGet, folderGroup, folderResource, "", "", "1"),
{Name: "1"}, newItem(utils.VerbGet, folderGroup, folderResource, "", "", "2"),
{Name: "2"},
})) }))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, res.Groups[groupResource].Items, 2) require.Len(t, res.Results, 2)
require.True(t, res.Groups[groupResource].Items["1"])
require.True(t, res.Groups[groupResource].Items["2"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", folderGroup, folderResource, "", "1")].Allowed)
assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", folderGroup, folderResource, "", "2")].Allowed)
}) })
t.Run("user:8 should be able to read all resoruce:dashboard.grafana.app/dashboards in folder 6 through folder 5", func(t *testing.T) { t.Run("user:8 should be able to read all resource:dashboard.grafana.app/dashboards in folder 6 through folder 5", func(t *testing.T) {
groupResource := common.FormatGroupResource(dashboardGroup, dashboardResource, "") res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:8", []*authzv1.BatchCheckItem{
res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:8", utils.VerbGet, dashboardGroup, dashboardResource, "", []*authzextv1.BatchCheckItem{ newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "6", "10"),
{Name: "10", Folder: "6"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "6", "20"),
{Name: "20", Folder: "6"},
})) }))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, res.Groups[groupResource].Items, 2) require.Len(t, res.Results, 2)
require.True(t, res.Groups[groupResource].Items["10"])
require.True(t, res.Groups[groupResource].Items["20"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "6", "10")].Allowed)
assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "6", "20")].Allowed)
}) })
t.Run("user:9 should be able to create dashboards in folder 6 through folder 5", func(t *testing.T) { t.Run("user:9 should be able to create dashboards in folder 6 through folder 5", func(t *testing.T) {
groupResource := common.FormatGroupResource(dashboardGroup, dashboardResource, "") res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:9", []*authzv1.BatchCheckItem{
res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:9", utils.VerbCreate, dashboardGroup, dashboardResource, "", []*authzextv1.BatchCheckItem{ newItem(utils.VerbCreate, dashboardGroup, dashboardResource, "", "6", "10"),
{Name: "10", Folder: "6"}, newItem(utils.VerbCreate, dashboardGroup, dashboardResource, "", "6", "20"),
{Name: "20", Folder: "6"},
})) }))
require.NoError(t, err) require.NoError(t, err)
t.Log(res.Groups) require.Len(t, res.Results, 2)
require.Len(t, res.Groups[groupResource].Items, 2)
require.True(t, res.Groups[groupResource].Items["10"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "6", "10")].Allowed)
require.True(t, res.Groups[groupResource].Items["20"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "6", "20")].Allowed)
}) })
t.Run("user:10 should be able to get dashboard status for 10 and 11", func(t *testing.T) { t.Run("user:10 should be able to get dashboard status for 10 and 11", func(t *testing.T) {
groupResource := common.FormatGroupResource(dashboardGroup, dashboardResource, statusSubresource) res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:10", []*authzv1.BatchCheckItem{
res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:10", utils.VerbGet, dashboardGroup, dashboardResource, statusSubresource, []*authzextv1.BatchCheckItem{ newItem(utils.VerbGet, dashboardGroup, dashboardResource, statusSubresource, "6", "10"),
{Name: "10", Folder: "6"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, statusSubresource, "6", "11"),
{Name: "11", Folder: "6"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, statusSubresource, "6", "12"),
{Name: "12", Folder: "6"},
})) }))
require.NoError(t, err) require.NoError(t, err)
t.Log(res.Groups) require.Len(t, res.Results, 3)
require.Len(t, res.Groups[groupResource].Items, 3)
require.True(t, res.Groups[groupResource].Items["10"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "6", "10")].Allowed)
require.True(t, res.Groups[groupResource].Items["11"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "6", "11")].Allowed)
require.False(t, res.Groups[groupResource].Items["12"]) assert.False(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "6", "12")].Allowed)
}) })
t.Run("user:11 should be able to get dashboard status for 10, 11 and 12 through group_resource", func(t *testing.T) { t.Run("user:11 should be able to get dashboard status for 10, 11 and 12 through group_resource", func(t *testing.T) {
groupResource := common.FormatGroupResource(dashboardGroup, dashboardResource, statusSubresource) res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:11", []*authzv1.BatchCheckItem{
res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:11", utils.VerbGet, dashboardGroup, dashboardResource, statusSubresource, []*authzextv1.BatchCheckItem{ newItem(utils.VerbGet, dashboardGroup, dashboardResource, statusSubresource, "6", "10"),
{Name: "10", Folder: "6"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, statusSubresource, "6", "11"),
{Name: "11", Folder: "6"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, statusSubresource, "6", "12"),
{Name: "12", Folder: "6"},
})) }))
require.NoError(t, err) require.NoError(t, err)
t.Log(res.Groups) require.Len(t, res.Results, 3)
require.Len(t, res.Groups[groupResource].Items, 3)
require.True(t, res.Groups[groupResource].Items["10"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "6", "10")].Allowed)
require.True(t, res.Groups[groupResource].Items["11"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "6", "11")].Allowed)
require.True(t, res.Groups[groupResource].Items["12"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "6", "12")].Allowed)
}) })
t.Run("user:12 should be able to get dashboard status in folder 5 and 6", func(t *testing.T) { t.Run("user:12 should be able to get dashboard status in folder 5 and 6", func(t *testing.T) {
groupResource := common.FormatGroupResource(dashboardGroup, dashboardResource, statusSubresource) res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:12", []*authzv1.BatchCheckItem{
res, err := server.BatchCheck(newContextWithNamespace(), newReq("user:12", utils.VerbGet, dashboardGroup, dashboardResource, statusSubresource, []*authzextv1.BatchCheckItem{ newItem(utils.VerbGet, dashboardGroup, dashboardResource, statusSubresource, "5", "10"),
{Name: "10", Folder: "5"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, statusSubresource, "6", "11"),
{Name: "11", Folder: "6"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, statusSubresource, "6", "12"),
{Name: "12", Folder: "6"}, newItem(utils.VerbGet, dashboardGroup, dashboardResource, statusSubresource, "1", "13"),
{Name: "13", Folder: "1"},
})) }))
require.NoError(t, err) require.NoError(t, err)
require.Len(t, res.Groups[groupResource].Items, 4) require.Len(t, res.Results, 4)
require.True(t, res.Groups[groupResource].Items["10"])
require.True(t, res.Groups[groupResource].Items["11"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "5", "10")].Allowed)
require.True(t, res.Groups[groupResource].Items["12"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "6", "11")].Allowed)
require.False(t, res.Groups[groupResource].Items["13"]) assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "6", "12")].Allowed)
assert.False(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "1", "13")].Allowed)
})
// Cross-namespace tests
t.Run("cross-namespace: items with explicit namespace should be authorized against their own namespace", func(t *testing.T) {
// Helper to create item with explicit namespace
newItemWithNamespace := func(ns, verb, group, resource, subresource, folder, name string) *authzv1.BatchCheckItem {
correlationID := fmt.Sprintf("%s-%s-%s-%s-%s", ns, group, resource, folder, name)
return &authzv1.BatchCheckItem{
Namespace: ns,
Verb: verb,
Group: group,
Resource: resource,
Subresource: subresource,
Name: name,
Folder: folder,
CorrelationId: correlationID,
}
}
// user:1 has access to dashboard 1 in folder 1 in "default" namespace
// Both items use explicit namespace
res, err := server.BatchCheck(newContextWithNamespace(), &authzv1.BatchCheckRequest{
Subject: "user:1",
Checks: []*authzv1.BatchCheckItem{
// Item in default namespace (should be allowed - user:1 has access)
newItemWithNamespace(namespace, utils.VerbGet, dashboardGroup, dashboardResource, "", "1", "1"),
// Another item in default namespace with different correlation ID
newItem(utils.VerbGet, dashboardGroup, dashboardResource, "", "1", "1"),
},
})
require.NoError(t, err)
require.Len(t, res.Results, 2)
assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s-%s", namespace, dashboardGroup, dashboardResource, "1", "1")].Allowed)
assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s", dashboardGroup, dashboardResource, "1", "1")].Allowed)
})
t.Run("cross-namespace: items from different namespaces in same batch", func(t *testing.T) {
newItemWithNamespace := func(ns, verb, group, resource, subresource, folder, name string) *authzv1.BatchCheckItem {
correlationID := fmt.Sprintf("%s-%s-%s-%s-%s", ns, group, resource, folder, name)
return &authzv1.BatchCheckItem{
Namespace: ns,
Verb: verb,
Group: group,
Resource: resource,
Subresource: subresource,
Name: name,
Folder: folder,
CorrelationId: correlationID,
}
}
// user:2 has group_resource access in "default" namespace
// They should have access in default but not in other-namespace (no tuples there)
res, err := server.BatchCheck(newContextWithNamespace(), &authzv1.BatchCheckRequest{
Subject: "user:2",
Checks: []*authzv1.BatchCheckItem{
// Items in default namespace (should be allowed - user:2 has group_resource access)
newItemWithNamespace(namespace, utils.VerbGet, dashboardGroup, dashboardResource, "", "1", "1"),
newItemWithNamespace(namespace, utils.VerbGet, dashboardGroup, dashboardResource, "", "2", "2"),
// Items in other-namespace (should be denied - no tuples in other-namespace)
newItemWithNamespace("other-namespace", utils.VerbGet, dashboardGroup, dashboardResource, "", "1", "1"),
newItemWithNamespace("other-namespace", utils.VerbGet, dashboardGroup, dashboardResource, "", "2", "2"),
},
})
require.NoError(t, err)
require.Len(t, res.Results, 4)
// Default namespace items should be allowed
assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s-%s", namespace, dashboardGroup, dashboardResource, "1", "1")].Allowed)
assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s-%s", namespace, dashboardGroup, dashboardResource, "2", "2")].Allowed)
// Other namespace items should be denied (no permissions in that namespace)
assert.False(t, res.Results[fmt.Sprintf("%s-%s-%s-%s-%s", "other-namespace", dashboardGroup, dashboardResource, "1", "1")].Allowed)
assert.False(t, res.Results[fmt.Sprintf("%s-%s-%s-%s-%s", "other-namespace", dashboardGroup, dashboardResource, "2", "2")].Allowed)
})
t.Run("cross-namespace: mixed results across multiple namespaces", func(t *testing.T) {
newItemWithNamespace := func(ns, verb, group, resource, subresource, folder, name string) *authzv1.BatchCheckItem {
correlationID := fmt.Sprintf("%s-%s-%s-%s-%s", ns, group, resource, folder, name)
return &authzv1.BatchCheckItem{
Namespace: ns,
Verb: verb,
Group: group,
Resource: resource,
Subresource: subresource,
Name: name,
Folder: folder,
CorrelationId: correlationID,
}
}
// user:1 has specific access to dashboard 1 in folder 1
// user:2 would have broader access, but we're testing user:1
res, err := server.BatchCheck(newContextWithNamespace(), &authzv1.BatchCheckRequest{
Subject: "user:1",
Checks: []*authzv1.BatchCheckItem{
// Allowed in default namespace
newItemWithNamespace(namespace, utils.VerbGet, dashboardGroup, dashboardResource, "", "1", "1"),
// Denied in default namespace (user:1 doesn't have access to dashboard 2)
newItemWithNamespace(namespace, utils.VerbGet, dashboardGroup, dashboardResource, "", "2", "2"),
// Denied in other-namespace (no tuples)
newItemWithNamespace("other-namespace", utils.VerbGet, dashboardGroup, dashboardResource, "", "1", "1"),
},
})
require.NoError(t, err)
require.Len(t, res.Results, 3)
assert.True(t, res.Results[fmt.Sprintf("%s-%s-%s-%s-%s", namespace, dashboardGroup, dashboardResource, "1", "1")].Allowed)
assert.False(t, res.Results[fmt.Sprintf("%s-%s-%s-%s-%s", namespace, dashboardGroup, dashboardResource, "2", "2")].Allowed)
assert.False(t, res.Results[fmt.Sprintf("%s-%s-%s-%s-%s", "other-namespace", dashboardGroup, dashboardResource, "1", "1")].Allowed)
}) })
} }

View File

@@ -15,7 +15,6 @@ import (
"github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing" "github.com/grafana/grafana/pkg/infra/tracing"
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
"github.com/grafana/grafana/pkg/services/authz/zanzana/common" "github.com/grafana/grafana/pkg/services/authz/zanzana/common"
"github.com/grafana/grafana/pkg/services/authz/zanzana/store" "github.com/grafana/grafana/pkg/services/authz/zanzana/store"
"github.com/grafana/grafana/pkg/services/sqlstore" "github.com/grafana/grafana/pkg/services/sqlstore"
@@ -37,14 +36,14 @@ const (
// Timeout for List operations // Timeout for List operations
listTimeout = 30 * time.Second listTimeout = 30 * time.Second
// BenchmarkBatchCheck measures the performance of BatchCheck requests with 50 items per batch.
batchCheckSize = 50
// Resource type constants for benchmarks // Resource type constants for benchmarks
benchDashboardGroup = "dashboard.grafana.app" benchDashboardGroup = "dashboard.grafana.app"
benchDashboardResource = "dashboards" benchDashboardResource = "dashboards"
benchFolderGroup = "folder.grafana.app" benchFolderGroup = "folder.grafana.app"
benchFolderResource = "folders" benchFolderResource = "folders"
// BenchmarkBatchCheck measures the performance of BatchCheck requests with 50 items per batch.
batchCheckSize = 50
) )
// benchmarkData holds all the generated test data for benchmarks // benchmarkData holds all the generated test data for benchmarks
@@ -338,6 +337,14 @@ func setupBenchmarkServer(b *testing.B) (*Server, *benchmarkData) {
} }
cfg := setting.NewCfg() cfg := setting.NewCfg()
cfg.ZanzanaServer.CacheSettings.CheckCacheLimit = 100000 // Cache check results
cfg.ZanzanaServer.CacheSettings.CheckQueryCacheEnabled = true // Cache check subproblems
cfg.ZanzanaServer.CacheSettings.CheckIteratorCacheEnabled = true // Cache DB iterators for checks
cfg.ZanzanaServer.CacheSettings.CheckIteratorCacheMaxResults = 10000 // Max results per iterator
cfg.ZanzanaServer.CacheSettings.SharedIteratorEnabled = true // Share iterators across concurrent checks
cfg.ZanzanaServer.CacheSettings.SharedIteratorLimit = 10000 // Max shared iterators
testStore := sqlstore.NewTestStore(b, sqlstore.WithCfg(cfg)) testStore := sqlstore.NewTestStore(b, sqlstore.WithCfg(cfg))
openFGAStore, err := store.NewEmbeddedStore(cfg, testStore, log.NewNopLogger()) openFGAStore, err := store.NewEmbeddedStore(cfg, testStore, log.NewNopLogger())
@@ -573,58 +580,64 @@ func BenchmarkCheck(b *testing.B) {
}) })
} }
// BenchmarkBatchCheck measures the performance of BatchCheck requests
func BenchmarkBatchCheck(b *testing.B) { func BenchmarkBatchCheck(b *testing.B) {
srv, data := setupBenchmarkServer(b) srv, data := setupBenchmarkServer(b)
ctx := newContextWithNamespace() ctx := newContextWithNamespace()
// Helper to create batch check requests // Helper to create batch check requests using the new authzv1 API
newBatchCheckReq := func(subject string, items []*authzextv1.BatchCheckItem) *authzextv1.BatchCheckRequest { newBatchCheckReq := func(subject string, items []*authzv1.BatchCheckItem) *authzv1.BatchCheckRequest {
return &authzextv1.BatchCheckRequest{ return &authzv1.BatchCheckRequest{
Namespace: benchNamespace, Subject: subject,
Subject: subject, Checks: items,
Items: items,
} }
} }
// Helper to create batch items for resources in folders // Helper to create batch items for resources in folders
createBatchItems := func(resources []string, resourceFolders map[string]string) []*authzextv1.BatchCheckItem { createBatchItems := func(resources []string, resourceFolders map[string]string) []*authzv1.BatchCheckItem {
items := make([]*authzextv1.BatchCheckItem, 0, batchCheckSize) items := make([]*authzv1.BatchCheckItem, 0, batchCheckSize)
for i := 0; i < batchCheckSize && i < len(resources); i++ { for i := 0; i < batchCheckSize && i < len(resources); i++ {
resource := resources[i] resource := resources[i]
items = append(items, &authzextv1.BatchCheckItem{ items = append(items, &authzv1.BatchCheckItem{
Verb: utils.VerbGet, Namespace: benchNamespace,
Group: benchDashboardGroup, Verb: utils.VerbGet,
Resource: benchDashboardResource, Group: benchDashboardGroup,
Name: resource, Resource: benchDashboardResource,
Folder: resourceFolders[resource], Name: resource,
Folder: resourceFolders[resource],
CorrelationId: fmt.Sprintf("item-%d", i),
}) })
} }
return items return items
} }
// Helper to create batch items for folders at a specific depth // Helper to create batch items for folders at a specific depth
createFolderBatchItems := func(folders []string, depth int, folderDepths map[string]int) []*authzextv1.BatchCheckItem { createFolderBatchItems := func(folders []string, depth int, folderDepths map[string]int) []*authzv1.BatchCheckItem {
items := make([]*authzextv1.BatchCheckItem, 0, batchCheckSize) items := make([]*authzv1.BatchCheckItem, 0, batchCheckSize)
for _, folder := range folders { for _, folder := range folders {
if folderDepths[folder] == depth && len(items) < batchCheckSize { if folderDepths[folder] == depth && len(items) < batchCheckSize {
items = append(items, &authzextv1.BatchCheckItem{ items = append(items, &authzv1.BatchCheckItem{
Verb: utils.VerbGet, Namespace: benchNamespace,
Group: benchDashboardGroup, Verb: utils.VerbGet,
Resource: benchDashboardResource, Group: benchDashboardGroup,
Name: fmt.Sprintf("resource-in-%s", folder), Resource: benchDashboardResource,
Folder: folder, Name: fmt.Sprintf("resource-in-%s", folder),
Folder: folder,
CorrelationId: fmt.Sprintf("item-%d", len(items)),
}) })
} }
} }
// Fill remaining slots if needed // Fill remaining slots if needed
for len(items) < batchCheckSize && len(folders) > 0 { for len(items) < batchCheckSize && len(folders) > 0 {
folder := folders[len(items)%len(folders)] folder := folders[len(items)%len(folders)]
items = append(items, &authzextv1.BatchCheckItem{ items = append(items, &authzv1.BatchCheckItem{
Verb: utils.VerbGet, Namespace: benchNamespace,
Group: benchDashboardGroup, Verb: utils.VerbGet,
Resource: benchDashboardResource, Group: benchDashboardGroup,
Name: fmt.Sprintf("resource-%d", len(items)), Resource: benchDashboardResource,
Folder: folder, Name: fmt.Sprintf("resource-%d", len(items)),
Folder: folder,
CorrelationId: fmt.Sprintf("item-%d", len(items)),
}) })
} }
return items return items
@@ -636,6 +649,7 @@ func BenchmarkBatchCheck(b *testing.B) {
// User with group_resource permission - should have access to everything // User with group_resource permission - should have access to everything
user := data.users[0] user := data.users[0]
items := createBatchItems(data.resources, data.resourceFolders) items := createBatchItems(data.resources, data.resourceFolders)
b.Logf("Testing BatchCheck with %d items, user has group_resource permission (all access)", len(items))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@@ -643,7 +657,7 @@ func BenchmarkBatchCheck(b *testing.B) {
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
_ = res.Groups _ = res.Results
} }
}) })
@@ -651,6 +665,7 @@ func BenchmarkBatchCheck(b *testing.B) {
// User with folder permission on shallow folder // User with folder permission on shallow folder
user := data.users[usersPerPattern] user := data.users[usersPerPattern]
items := createFolderBatchItems(data.folders, 1, data.folderDepths) items := createFolderBatchItems(data.folders, 1, data.folderDepths)
b.Logf("Testing BatchCheck with %d items at depth 1", len(items))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@@ -658,7 +673,7 @@ func BenchmarkBatchCheck(b *testing.B) {
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
_ = res.Groups _ = res.Results
} }
}) })
@@ -666,6 +681,7 @@ func BenchmarkBatchCheck(b *testing.B) {
// User with folder permission on mid-depth folder // User with folder permission on mid-depth folder
user := data.users[2*usersPerPattern] user := data.users[2*usersPerPattern]
items := createFolderBatchItems(data.folders, 4, data.folderDepths) items := createFolderBatchItems(data.folders, 4, data.folderDepths)
b.Logf("Testing BatchCheck with %d items at depth 4", len(items))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@@ -673,22 +689,7 @@ func BenchmarkBatchCheck(b *testing.B) {
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
_ = res.Groups _ = res.Results
}
})
b.Run("FolderInheritance/Depth7", func(b *testing.B) {
// Check access on deepest folders (worst case for inheritance traversal)
user := data.users[usersPerPattern]
items := createFolderBatchItems(data.folders, data.maxDepth, data.folderDepths)
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.BatchCheck(ctx, newBatchCheckReq(user, items))
if err != nil {
b.Fatal(err)
}
_ = res.Groups
} }
}) })
@@ -696,6 +697,7 @@ func BenchmarkBatchCheck(b *testing.B) {
// User with direct resource permission // User with direct resource permission
user := data.users[4*usersPerPattern] user := data.users[4*usersPerPattern]
items := createBatchItems(data.resources, data.resourceFolders) items := createBatchItems(data.resources, data.resourceFolders)
b.Logf("Testing BatchCheck with %d items, user has direct resource permission", len(items))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@@ -703,22 +705,7 @@ func BenchmarkBatchCheck(b *testing.B) {
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
_ = res.Groups _ = res.Results
}
})
b.Run("TeamMembership", func(b *testing.B) {
// User who is a team member, team has folder permission
user := data.users[5*usersPerPattern]
items := createBatchItems(data.resources, data.resourceFolders)
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.BatchCheck(ctx, newBatchCheckReq(user, items))
if err != nil {
b.Fatal(err)
}
_ = res.Groups
} }
}) })
@@ -726,6 +713,7 @@ func BenchmarkBatchCheck(b *testing.B) {
// User with no permissions - tests denial path // User with no permissions - tests denial path
user := data.users[len(data.users)-1] user := data.users[len(data.users)-1]
items := createBatchItems(data.resources, data.resourceFolders) items := createBatchItems(data.resources, data.resourceFolders)
b.Logf("Testing BatchCheck with %d items, user has NO permissions (denial case)", len(items))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@@ -733,24 +721,29 @@ func BenchmarkBatchCheck(b *testing.B) {
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
_ = res.Groups _ = res.Results
} }
}) })
b.Run("MixedFolders", func(b *testing.B) { b.Run("MixedAccess", func(b *testing.B) {
// Batch of items across different folder depths // Create items from different folders - user has access to some but not all
user := data.users[usersPerPattern] user := data.users[3*usersPerPattern] // folder-scoped resource permission
items := make([]*authzextv1.BatchCheckItem, 0, batchCheckSize) items := make([]*authzv1.BatchCheckItem, 0, batchCheckSize)
// Mix of accessible and inaccessible resources
for i := 0; i < batchCheckSize; i++ { for i := 0; i < batchCheckSize; i++ {
folder := data.folders[i%len(data.folders)] folder := data.folders[i%len(data.folders)]
items = append(items, &authzextv1.BatchCheckItem{ items = append(items, &authzv1.BatchCheckItem{
Verb: utils.VerbGet, Namespace: benchNamespace,
Group: benchDashboardGroup, Verb: utils.VerbGet,
Resource: benchDashboardResource, Group: benchDashboardGroup,
Name: fmt.Sprintf("resource-%d", i), Resource: benchDashboardResource,
Folder: folder, Name: fmt.Sprintf("resource-%d", i),
Folder: folder,
CorrelationId: fmt.Sprintf("item-%d", i),
}) })
} }
b.Logf("Testing BatchCheck with %d items, user has mixed access (some allowed, some denied)", len(items))
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@@ -758,9 +751,31 @@ func BenchmarkBatchCheck(b *testing.B) {
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
_ = res.Groups _ = res.Results
} }
}) })
// Test BatchCheck at various folder depths
for depth := 0; depth <= data.maxDepth; depth++ {
depth := depth // capture for closure
if len(data.foldersByDepth[depth]) == 0 {
continue
}
b.Run(fmt.Sprintf("ByDepth/Depth%d", depth), func(b *testing.B) {
user := fmt.Sprintf("user:depth-%d-access", depth)
items := createFolderBatchItems(data.folders, depth, data.folderDepths)
b.Logf("Testing BatchCheck with %d items at depth %d", len(items), depth)
b.ResetTimer()
for i := 0; i < b.N; i++ {
res, err := srv.BatchCheck(ctx, newBatchCheckReq(user, items))
if err != nil {
b.Fatal(err)
}
_ = res.Results
}
})
}
} }
// BenchmarkList measures the performance of List requests (Compile equivalent) // BenchmarkList measures the performance of List requests (Compile equivalent)

View File

@@ -152,6 +152,67 @@ func (c authzLimitedClient) Check(ctx context.Context, id claims.AuthInfo, req c
return resp, nil return resp, nil
} }
// BatchCheck implements claims.AccessClient.
func (c authzLimitedClient) BatchCheck(ctx context.Context, id claims.AuthInfo, req claims.BatchCheckRequest) (claims.BatchCheckResponse, error) {
ctx, span := tracer.Start(ctx, "resource.authzLimitedClient.BatchCheck", trace.WithAttributes(
attribute.Int("num_checks", len(req.Checks)),
attribute.Bool("fallback_used", FallbackUsed(ctx)),
))
defer span.End()
if FallbackUsed(ctx) {
span.SetStatus(codes.Error, "BatchCheck not supported with fallback")
return claims.BatchCheckResponse{}, fmt.Errorf("BatchCheck not supported when fallback is used")
}
// Filter checks to only those that require RBAC and validate namespace
rbacChecks := make([]claims.BatchCheckItem, 0, len(req.Checks))
allowedByDefault := make(map[string]bool, len(req.Checks))
for _, check := range req.Checks {
if !claims.NamespaceMatches(id.GetNamespace(), check.Namespace) {
span.SetStatus(codes.Error, "Namespace mismatch")
span.RecordError(claims.ErrNamespaceMismatch)
return claims.BatchCheckResponse{}, claims.ErrNamespaceMismatch
}
if c.IsCompatibleWithRBAC(check.Group, check.Resource) {
rbacChecks = append(rbacChecks, check)
} else {
allowedByDefault[check.CorrelationID] = true
}
}
// If all checks are allowed by default, return early
if len(rbacChecks) == 0 {
results := make(map[string]claims.BatchCheckResult, len(req.Checks))
for _, check := range req.Checks {
results[check.CorrelationID] = claims.BatchCheckResult{
Allowed: true,
}
}
return claims.BatchCheckResponse{Results: results}, nil
}
// Call the underlying client with RBAC checks
resp, err := c.client.BatchCheck(ctx, id, claims.BatchCheckRequest{Checks: rbacChecks})
if err != nil {
c.logger.FromContext(ctx).Error("BatchCheck failed", "error", err, "num_checks", len(rbacChecks))
span.SetStatus(codes.Error, fmt.Sprintf("batch check failed: %v", err))
span.RecordError(err)
return resp, err
}
// Merge results with allowed-by-default checks
for correlationID := range allowedByDefault {
resp.Results[correlationID] = claims.BatchCheckResult{
Allowed: true,
}
}
return resp, nil
}
// Compile implements claims.AccessClient. // Compile implements claims.AccessClient.
func (c authzLimitedClient) Compile(ctx context.Context, id claims.AuthInfo, req claims.ListRequest) (claims.ItemChecker, claims.Zookie, error) { func (c authzLimitedClient) Compile(ctx context.Context, id claims.AuthInfo, req claims.ListRequest) (claims.ItemChecker, claims.Zookie, error) {
t := time.Now() t := time.Now()

View File

@@ -159,6 +159,97 @@ func TestNamespaceMatching(t *testing.T) {
} }
} }
func TestAuthzLimitedClient_BatchCheck(t *testing.T) {
mockClient := authlib.FixedAccessClient(true)
client := NewAuthzLimitedClient(mockClient, AuthzOptions{})
t.Run("returns error when fallback is used", func(t *testing.T) {
ctx := WithFallback(context.Background())
req := authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{
CorrelationID: "0",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: utils.VerbGet,
Namespace: "stacks-1",
Name: "test-dashboard",
},
},
}
_, err := client.BatchCheck(ctx, &identity.StaticRequester{Namespace: "stacks-1"}, req)
require.Error(t, err)
assert.Contains(t, err.Error(), "fallback")
})
t.Run("works normally without fallback", func(t *testing.T) {
ctx := context.Background()
req := authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{
CorrelationID: "0",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: utils.VerbGet,
Namespace: "stacks-1",
Name: "test-dashboard",
},
},
}
resp, err := client.BatchCheck(ctx, &identity.StaticRequester{Namespace: "stacks-1"}, req)
require.NoError(t, err)
require.Len(t, resp.Results, 1)
assert.True(t, resp.Results["0"].Allowed)
})
t.Run("returns error on namespace mismatch", func(t *testing.T) {
ctx := context.Background()
req := authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{
CorrelationID: "0",
Group: "dashboard.grafana.app",
Resource: "dashboards",
Verb: utils.VerbGet,
Namespace: "stacks-2", // Different namespace
Name: "test-dashboard",
},
},
}
_, err := client.BatchCheck(ctx, &identity.StaticRequester{Namespace: "stacks-1"}, req)
require.Error(t, err)
assert.ErrorIs(t, err, authlib.ErrNamespaceMismatch)
})
t.Run("allows non-RBAC resources by default", func(t *testing.T) {
// Use a client that would deny if checked
denyClient := authlib.FixedAccessClient(false)
client := NewAuthzLimitedClient(denyClient, AuthzOptions{})
ctx := context.Background()
req := authlib.BatchCheckRequest{
Checks: []authlib.BatchCheckItem{
{
CorrelationID: "0",
Group: "unknown.group",
Resource: "unknown.resource",
Verb: utils.VerbGet,
Namespace: "stacks-1",
Name: "test",
},
},
}
resp, err := client.BatchCheck(ctx, &identity.StaticRequester{Namespace: "stacks-1"}, req)
require.NoError(t, err)
require.Len(t, resp.Results, 1)
assert.True(t, resp.Results["0"].Allowed, "non-RBAC resources should be allowed by default")
})
}
// TestNamespaceMatchingFallback tests namespace matching in Check and Compile methods when fallback is used // TestNamespaceMatchingFallback tests namespace matching in Check and Compile methods when fallback is used
func TestNamespaceMatchingFallback(t *testing.T) { func TestNamespaceMatchingFallback(t *testing.T) {
// Create a mock client that always returns allowed=true // Create a mock client that always returns allowed=true

View File

@@ -20,6 +20,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/grafana/authlib/authz"
claims "github.com/grafana/authlib/types" claims "github.com/grafana/authlib/types"
"github.com/grafana/dskit/backoff" "github.com/grafana/dskit/backoff"
@@ -1051,78 +1052,93 @@ func (s *server) List(ctx context.Context, req *resourcepb.ListRequest) (*resour
rsp := &resourcepb.ListResponse{} rsp := &resourcepb.ListResponse{}
key := req.Options.Key key := req.Options.Key
checker, _, err := s.access.Compile(ctx, user, claims.ListRequest{
Group: key.Group, // Determine verb for authorization
Resource: key.Resource, verb := utils.VerbGet
Namespace: key.Namespace,
Verb: utils.VerbGet,
})
var trashChecker claims.ItemChecker // only for trash
if req.Source == resourcepb.ListRequest_TRASH { if req.Source == resourcepb.ListRequest_TRASH {
trashChecker, _, err = s.access.Compile(ctx, user, claims.ListRequest{ verb = utils.VerbSetPermissions // Basically Admin for trash
Group: key.Group,
Resource: key.Resource,
Namespace: key.Namespace,
Verb: utils.VerbSetPermissions, // Basically Admin
})
if err != nil {
return &resourcepb.ListResponse{Error: AsErrorResult(err)}, nil
}
}
if err != nil {
return &resourcepb.ListResponse{Error: AsErrorResult(err)}, nil
}
if checker == nil {
return &resourcepb.ListResponse{Error: &resourcepb.ErrorResult{
Code: http.StatusForbidden,
}}, nil
} }
// Candidate item for batch authorization
type candidateItem struct {
name string
folder string
resourceVersion int64
value []byte
continueToken string
}
var nextToken string
var iterErr error
// Process items in batches within the iterator
iterFunc := func(iter ListIterator) error { iterFunc := func(iter ListIterator) error {
for iter.Next() { // Convert ListIterator to iter.Seq
if err := iter.Error(); err != nil { candidates := func(yield func(candidateItem) bool) {
for iter.Next() {
if !yield(candidateItem{
name: iter.Name(),
folder: iter.Folder(),
resourceVersion: iter.ResourceVersion(),
value: iter.Value(),
continueToken: iter.ContinueToken(),
}) {
return
}
}
}
extractFn := func(c candidateItem) authz.BatchCheckItem {
return authz.BatchCheckItem{
Name: c.name,
Folder: c.folder,
Verb: verb,
Group: key.Group,
Resource: key.Resource,
Namespace: key.Namespace,
}
}
for item, err := range authz.FilterAuthorized(ctx, s.access, candidates, extractFn).Items {
if err != nil {
return err return err
} }
// Trash is only accessible to admins or the user who deleted the object // For trash items, also check if user is the one who deleted it
if req.Source == resourcepb.ListRequest_TRASH { if req.Source == resourcepb.ListRequest_TRASH {
if !s.isTrashItemAuthorized(ctx, iter, trashChecker) { if !s.isTrashItemAuthorizedByValue(ctx, item.value, true) {
continue continue
} }
} else if !checker(iter.Name(), iter.Folder()) {
continue
} }
item := &resourcepb.ResourceWrapper{ rsp.Items = append(rsp.Items, &resourcepb.ResourceWrapper{
ResourceVersion: iter.ResourceVersion(), ResourceVersion: item.resourceVersion,
Value: iter.Value(), Value: item.value,
} })
pageBytes += len(item.value)
pageBytes += len(item.Value) // Check if we've reached the page limit
rsp.Items = append(rsp.Items, item)
if (req.Limit > 0 && len(rsp.Items) >= int(req.Limit)) || pageBytes >= maxPageBytes { if (req.Limit > 0 && len(rsp.Items) >= int(req.Limit)) || pageBytes >= maxPageBytes {
t := iter.ContinueToken() nextToken = item.continueToken
if iter.Next() { break
rsp.NextPageToken = t
}
return iter.Error()
} }
} }
return iter.Error() return iter.Error()
} }
var rv int64 var rv int64
switch req.Source { switch req.Source {
case resourcepb.ListRequest_STORE: case resourcepb.ListRequest_STORE:
rv, err = s.backend.ListIterator(ctx, req, iterFunc) rv, iterErr = s.backend.ListIterator(ctx, req, iterFunc)
case resourcepb.ListRequest_HISTORY, resourcepb.ListRequest_TRASH: case resourcepb.ListRequest_HISTORY, resourcepb.ListRequest_TRASH:
rv, err = s.backend.ListHistory(ctx, req, iterFunc) rv, iterErr = s.backend.ListHistory(ctx, req, iterFunc)
default: default:
return nil, apierrors.NewBadRequest(fmt.Sprintf("invalid list source: %v", req.Source)) return nil, apierrors.NewBadRequest(fmt.Sprintf("invalid list source: %v", req.Source))
} }
if err != nil { if iterErr != nil {
rsp.Error = AsErrorResult(err) rsp.Error = AsErrorResult(iterErr)
return rsp, nil return rsp, nil
} }
@@ -1134,18 +1150,21 @@ func (s *server) List(ctx context.Context, req *resourcepb.ListRequest) (*resour
return rsp, nil return rsp, nil
} }
rsp.ResourceVersion = rv rsp.ResourceVersion = rv
return rsp, err
rsp.NextPageToken = nextToken
return rsp, nil
} }
// isTrashItemAuthorized checks if the user has access to the trash item. // isTrashItemAuthorizedByValue checks if the user has access to the trash item using the raw value.
func (s *server) isTrashItemAuthorized(ctx context.Context, iter ListIterator, trashChecker claims.ItemChecker) bool { // hasAdminPermission indicates whether the user has admin permission (from BatchCheck).
func (s *server) isTrashItemAuthorizedByValue(ctx context.Context, value []byte, hasAdminPermission bool) bool {
user, ok := claims.AuthInfoFrom(ctx) user, ok := claims.AuthInfoFrom(ctx)
if !ok || user == nil { if !ok || user == nil {
return false return false
} }
partial := &metav1.PartialObjectMetadata{} partial := &metav1.PartialObjectMetadata{}
err := json.Unmarshal(iter.Value(), partial) err := json.Unmarshal(value, partial)
if err != nil { if err != nil {
return false return false
} }
@@ -1161,7 +1180,7 @@ func (s *server) isTrashItemAuthorized(ctx context.Context, iter ListIterator, t
} }
// Trash is only accessible to admins or the user who deleted the object // Trash is only accessible to admins or the user who deleted the object
return obj.GetUpdatedBy() == user.GetUID() || trashChecker(iter.Name(), iter.Folder()) return obj.GetUpdatedBy() == user.GetUID() || hasAdminPermission
} }
func (s *server) initWatcher() error { func (s *server) initWatcher() error {
@@ -1202,18 +1221,6 @@ func (s *server) Watch(req *resourcepb.WatchRequest, srv resourcepb.ResourceStor
} }
key := req.Options.Key key := req.Options.Key
checker, _, err := s.access.Compile(ctx, user, claims.ListRequest{
Group: key.Group,
Resource: key.Resource,
Namespace: key.Namespace,
Verb: utils.VerbGet,
})
if err != nil {
return err
}
if checker == nil {
return apierrors.NewUnauthorized("not allowed to list anything") // ?? or a single error?
}
// Start listening -- this will buffer any changes that happen while we backfill. // Start listening -- this will buffer any changes that happen while we backfill.
// If events are generated faster than we can process them, then some events will be dropped. // If events are generated faster than we can process them, then some events will be dropped.
@@ -1267,22 +1274,56 @@ func (s *server) Watch(req *resourcepb.WatchRequest, srv resourcepb.ResourceStor
var initialEventsRV int64 // resource version coming from the initial events var initialEventsRV int64 // resource version coming from the initial events
if req.SendInitialEvents { if req.SendInitialEvents {
// Backfill the stream by adding every existing entities. // Backfill the stream by adding every existing entities with batch authorization
type candidateEvent struct {
name string
folder string
value []byte
version int64
}
initialEventsRV, err = s.backend.ListIterator(ctx, &resourcepb.ListRequest{Options: req.Options}, func(iter ListIterator) error { initialEventsRV, err = s.backend.ListIterator(ctx, &resourcepb.ListRequest{Options: req.Options}, func(iter ListIterator) error {
for iter.Next() { // Convert ListIterator to iter.Seq
if err := iter.Error(); err != nil { candidates := func(yield func(candidateEvent) bool) {
for iter.Next() {
if !yield(candidateEvent{
name: iter.Name(),
folder: iter.Folder(),
value: iter.Value(),
version: iter.ResourceVersion(),
}) {
return
}
}
}
extractFn := func(c candidateEvent) authz.BatchCheckItem {
return authz.BatchCheckItem{
Name: c.name,
Folder: c.folder,
Verb: utils.VerbGet,
Group: key.Group,
Resource: key.Resource,
Namespace: key.Namespace,
}
}
for item, err := range authz.FilterAuthorized(ctx, s.access, candidates, extractFn).Items {
if err != nil {
return err return err
} }
if err := srv.Send(&resourcepb.WatchEvent{ if err := srv.Send(&resourcepb.WatchEvent{
Type: resourcepb.WatchEvent_ADDED, Type: resourcepb.WatchEvent_ADDED,
Resource: &resourcepb.WatchEvent_Resource{ Resource: &resourcepb.WatchEvent_Resource{
Value: iter.Value(), Value: item.value,
Version: iter.ResourceVersion(), Version: item.version,
}, },
}); err != nil { }); err != nil {
return err return err
} }
} }
return iter.Error() return iter.Error()
}) })
if err != nil { if err != nil {
@@ -1309,6 +1350,127 @@ func (s *server) Watch(req *resourcepb.WatchRequest, srv resourcepb.ResourceStor
default: default:
since = req.Since since = req.Since
} }
// Type to hold candidate events for batch authorization
type candidateWatchEvent struct {
event *WrittenEvent
}
// Type to hold authorized event with its fetched previous object
type authorizedEvent struct {
event *WrittenEvent
previous *resourcepb.ReadResponse // nil if no previous or fetch failed
}
const maxBatchSize = 100
// processEventBatch authorizes and sends a batch of events.
// Errors are logged but never returned to keep the watch running.
processEventBatch := func(batch []*WrittenEvent) {
if len(batch) == 0 {
return
}
// Convert batch to iter.Seq for FilterAuthorized
candidates := func(yield func(candidateWatchEvent) bool) {
for _, event := range batch {
if !yield(candidateWatchEvent{event: event}) {
return
}
}
}
extractFn := func(c candidateWatchEvent) authz.BatchCheckItem {
return authz.BatchCheckItem{
Name: c.event.Key.Name,
Folder: c.event.Folder,
Verb: utils.VerbGet,
Group: key.Group,
Resource: key.Resource,
Namespace: key.Namespace,
}
}
// Step 1: Collect all authorized events
var authorizedEvents []authorizedEvent
for item, err := range authz.FilterAuthorized(ctx, s.access, candidates, extractFn).Items {
if err != nil {
s.log.Error("error during batch authorization", "error", err)
continue
}
authorizedEvents = append(authorizedEvents, authorizedEvent{event: item.event})
}
if len(authorizedEvents) == 0 {
return
}
// Step 2: Fetch previous objects concurrently for events that need them
var wg sync.WaitGroup
for i := range authorizedEvents {
if authorizedEvents[i].event.PreviousRV > 0 {
wg.Add(1)
go func(idx int) {
defer wg.Done()
event := authorizedEvents[idx].event
prevObj, readErr := s.Read(ctx, &resourcepb.ReadRequest{Key: event.Key, ResourceVersion: event.PreviousRV})
if readErr != nil {
s.log.Error("error reading previous object", "key", event.Key, "resource_version", event.PreviousRV, "error", readErr)
return
}
if prevObj.Error != nil {
s.log.Error("error reading previous object", "key", event.Key, "resource_version", event.PreviousRV, "error", prevObj.Error)
return
}
if prevObj.ResourceVersion != event.PreviousRV {
s.log.Error("resource version mismatch", "key", event.Key, "resource_version", event.PreviousRV, "actual", prevObj.ResourceVersion)
return
}
authorizedEvents[idx].previous = prevObj
}(i)
}
}
wg.Wait()
// Step 3: Send all events in order
for _, authEvent := range authorizedEvents {
event := authEvent.event
value := event.Value
// remove the delete marker stored in the value for deleted objects
if event.Type == resourcepb.WatchEvent_DELETED {
value = []byte{}
}
resp := &resourcepb.WatchEvent{
Timestamp: event.Timestamp,
Type: event.Type,
Resource: &resourcepb.WatchEvent_Resource{
Value: value,
Version: event.ResourceVersion,
},
}
if authEvent.previous != nil {
resp.Previous = &resourcepb.WatchEvent_Resource{
Value: authEvent.previous.Value,
Version: authEvent.previous.ResourceVersion,
}
}
if err := srv.Send(resp); err != nil {
s.log.Error("error sending watch event", "key", event.Key, "error", err)
continue
}
if s.storageMetrics != nil {
// record latency - resource version is a unix timestamp in microseconds so we convert to seconds
latencySeconds := float64(time.Now().UnixMicro()-event.ResourceVersion) / 1e6
if latencySeconds > 0 {
s.storageMetrics.WatchEventLatency.WithLabelValues(event.Key.Resource).Observe(latencySeconds)
}
}
}
}
// Main event loop with batching
var batch []*WrittenEvent
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@@ -1316,57 +1478,40 @@ func (s *server) Watch(req *resourcepb.WatchRequest, srv resourcepb.ResourceStor
case event, ok := <-stream: case event, ok := <-stream:
if !ok { if !ok {
// Process any remaining events in the batch before closing
processEventBatch(batch)
s.log.Debug("watch events closed") s.log.Debug("watch events closed")
return nil return nil
} }
s.log.Debug("Server Broadcasting", "type", event.Type, "rv", event.ResourceVersion, "previousRV", event.PreviousRV, "group", event.Key.Group, "namespace", event.Key.Namespace, "resource", event.Key.Resource, "name", event.Key.Name) s.log.Debug("Server Broadcasting", "type", event.Type, "rv", event.ResourceVersion, "previousRV", event.PreviousRV, "group", event.Key.Group, "namespace", event.Key.Namespace, "resource", event.Key.Resource, "name", event.Key.Name)
if event.ResourceVersion > since && matchesQueryKey(req.Options.Key, event.Key) { if event.ResourceVersion > since && matchesQueryKey(req.Options.Key, event.Key) {
if !checker(event.Key.Name, event.Folder) { batch = append(batch, event)
continue }
}
value := event.Value // Drain any additional events that are already available (non-blocking)
// remove the delete marker stored in the value for deleted objects // Stop draining when we reach maxBatchSize to bound memory and latency
if event.Type == resourcepb.WatchEvent_DELETED { draining := true
value = []byte{} for draining && len(batch) < maxBatchSize {
} select {
resp := &resourcepb.WatchEvent{ case event, ok := <-stream:
Timestamp: event.Timestamp, if !ok {
Type: event.Type, // Process the batch before closing
Resource: &resourcepb.WatchEvent_Resource{ processEventBatch(batch)
Value: value, s.log.Debug("watch events closed")
Version: event.ResourceVersion, return nil
},
}
if event.PreviousRV > 0 {
prevObj, err := s.Read(ctx, &resourcepb.ReadRequest{Key: event.Key, ResourceVersion: event.PreviousRV})
if err != nil {
// This scenario should never happen, but if it does, we should log it and continue
// sending the event without the previous object. The client will decide what to do.
s.log.Error("error reading previous object", "key", event.Key, "resource_version", event.PreviousRV, "error", prevObj.Error)
} else {
if prevObj.ResourceVersion != event.PreviousRV {
s.log.Error("resource version mismatch", "key", event.Key, "resource_version", event.PreviousRV, "actual", prevObj.ResourceVersion)
return fmt.Errorf("resource version mismatch")
}
resp.Previous = &resourcepb.WatchEvent_Resource{
Value: prevObj.Value,
Version: prevObj.ResourceVersion,
}
} }
} s.log.Debug("Server Broadcasting", "type", event.Type, "rv", event.ResourceVersion, "previousRV", event.PreviousRV, "group", event.Key.Group, "namespace", event.Key.Namespace, "resource", event.Key.Resource, "name", event.Key.Name)
if err := srv.Send(resp); err != nil { if event.ResourceVersion > since && matchesQueryKey(req.Options.Key, event.Key) {
return err batch = append(batch, event)
}
if s.storageMetrics != nil {
// record latency - resource version is a unix timestamp in microseconds so we convert to seconds
latencySeconds := float64(time.Now().UnixMicro()-event.ResourceVersion) / 1e6
if latencySeconds > 0 {
s.storageMetrics.WatchEventLatency.WithLabelValues(event.Key.Resource).Observe(latencySeconds)
} }
default:
draining = false
} }
} }
// Process the collected batch
processEventBatch(batch)
batch = batch[:0] // Reset batch for reuse
} }
} }
} }

View File

@@ -6,6 +6,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"iter"
"math" "math"
"os" "os"
"path/filepath" "path/filepath"
@@ -22,7 +23,6 @@ import (
"github.com/blevesearch/bleve/v2/mapping" "github.com/blevesearch/bleve/v2/mapping"
"github.com/blevesearch/bleve/v2/search" "github.com/blevesearch/bleve/v2/search"
"github.com/blevesearch/bleve/v2/search/query" "github.com/blevesearch/bleve/v2/search/query"
bleveSearch "github.com/blevesearch/bleve/v2/search/searcher"
index "github.com/blevesearch/bleve_index_api" index "github.com/blevesearch/bleve_index_api"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
bolterrors "go.etcd.io/bbolt/errors" bolterrors "go.etcd.io/bbolt/errors"
@@ -35,6 +35,7 @@ import (
"github.com/grafana/grafana/pkg/storage/unified/resourcepb" "github.com/grafana/grafana/pkg/storage/unified/resourcepb"
"github.com/grafana/grafana/pkg/storage/unified/search/builders" "github.com/grafana/grafana/pkg/storage/unified/search/builders"
"github.com/grafana/authlib/authz"
authlib "github.com/grafana/authlib/types" authlib "github.com/grafana/authlib/types"
"github.com/grafana/grafana/pkg/apimachinery/utils" "github.com/grafana/grafana/pkg/apimachinery/utils"
@@ -1300,43 +1301,27 @@ func (b *bleveIndex) toBleveSearchRequest(ctx context.Context, req *resourcepb.R
} }
if access != nil { if access != nil {
auth, ok := authlib.AuthInfoFrom(ctx)
if !ok {
return nil, resource.AsErrorResult(fmt.Errorf("missing auth info"))
}
verb := utils.VerbList verb := utils.VerbList
if req.Permission == int64(dashboardaccess.PERMISSION_EDIT) { if req.Permission == int64(dashboardaccess.PERMISSION_EDIT) {
verb = utils.VerbPatch verb = utils.VerbPatch
} }
checker, _, err := access.Compile(ctx, auth, authlib.ListRequest{ // Build resource -> verb mapping for batch authorization
Namespace: b.key.Namespace, resources := map[string]string{
Group: b.key.Group, b.key.Resource: verb,
Resource: b.key.Resource,
Verb: verb,
})
if err != nil {
return nil, resource.AsErrorResult(err)
}
checkers := map[string]authlib.ItemChecker{
b.key.Resource: checker,
} }
// handle federation // Handle federation
for _, federated := range req.Federated { for _, federated := range req.Federated {
checker, _, err := access.Compile(ctx, auth, authlib.ListRequest{ resources[federated.Resource] = utils.VerbList
Namespace: federated.Namespace,
Group: federated.Group,
Resource: federated.Resource,
Verb: utils.VerbList,
})
if err != nil {
return nil, resource.AsErrorResult(err)
}
checkers[federated.Resource] = checker
} }
searchrequest.Query = newPermissionScopedQuery(searchrequest.Query, checkers) searchrequest.Query = newPermissionScopedQuery(searchrequest.Query, permissionScopedQueryConfig{
access: access,
namespace: b.key.Namespace,
group: b.key.Group,
resources: resources,
})
} }
for k, v := range req.Facet { for k, v := range req.Facet {
@@ -1866,71 +1851,239 @@ func newResponseFacet(v *search.FacetResult) *resourcepb.ResourceSearchResponse_
type permissionScopedQuery struct { type permissionScopedQuery struct {
query.Query query.Query
checkers map[string]authlib.ItemChecker // one checker per resource access authlib.AccessClient
log log.Logger namespace string
group string
resources map[string]string // resource -> verb mapping
log log.Logger
} }
func newPermissionScopedQuery(q query.Query, checkers map[string]authlib.ItemChecker) *permissionScopedQuery { type permissionScopedQueryConfig struct {
access authlib.AccessClient
namespace string
group string
resources map[string]string // resource -> verb mapping
}
func newPermissionScopedQuery(q query.Query, cfg permissionScopedQueryConfig) *permissionScopedQuery {
return &permissionScopedQuery{ return &permissionScopedQuery{
Query: q, Query: q,
checkers: checkers, access: cfg.access,
log: log.New("search_permissions"), namespace: cfg.namespace,
group: cfg.group,
resources: cfg.resources,
log: log.New("search_permissions"),
} }
} }
func (q *permissionScopedQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) { func (q *permissionScopedQuery) Searcher(ctx context.Context, i index.IndexReader, m mapping.IndexMapping, options search.SearcherOptions) (search.Searcher, error) {
// Get a new logger from context, to pass traceIDs etc.
logger := q.log.FromContext(ctx) logger := q.log.FromContext(ctx)
searcher, err := q.Query.Searcher(ctx, i, m, options) searcher, err := q.Query.Searcher(ctx, i, m, options)
if err != nil { if err != nil {
return nil, err return nil, err
} }
dvReader, err := i.DocValueReader([]string{"folder"}) dvReader, err := i.DocValueReader([]string{"folder"})
if err != nil { if err != nil {
return nil, err return nil, err
} }
filteringSearcher := bleveSearch.NewFilteringSearcher(ctx, searcher, func(d *search.DocumentMatch) bool {
// The doc ID has the format: <namespace>/<group>/<resourceType>/<name>
// IndexInternalID will be the same as the doc ID when using an in-memory index, but when using a file-based
// index it becomes a binary encoded number that has some other internal meaning. Using ExternalID() will get the
// correct doc ID regardless of the index type.
d.ID, err = i.ExternalID(d.IndexInternalID)
if err != nil {
logger.Debug("Error getting external ID", "error", err)
return false
}
parts := strings.Split(d.ID, "/") return newBatchAuthzSearcher(ctx, searcher, i, dvReader, q.access, q.namespace, q.group, q.resources, logger), nil
// Exclude doc if id isn't expected format }
if len(parts) != 4 {
logger.Debug("Unexpected document ID format", "id", d.ID) // docInfo holds document information for authorization
return false type docInfo struct {
} doc *search.DocumentMatch
ns := parts[0] resourceType string
resource := parts[2] name string
name := parts[3] folder string
folder := "" verb string
err = dvReader.VisitDocValues(d.IndexInternalID, func(field string, value []byte) { }
if field == "folder" {
folder = string(value) // batchAuthzSearcher implements a batch-aware authorization filtering searcher
// using FilterAuthorized with iter.Pull2 for efficient batched authorization
type batchAuthzSearcher struct {
ctx context.Context
searcher search.Searcher
indexReader index.IndexReader
dvReader index.DocValueReader
access authlib.AccessClient
namespace string
group string
resources map[string]string // resource -> verb mapping
log log.Logger
// Pull iterator state (lazily initialized)
searchCtx *search.SearchContext
next func() (docInfo, error, bool)
stop func()
}
func newBatchAuthzSearcher(
ctx context.Context,
searcher search.Searcher,
indexReader index.IndexReader,
dvReader index.DocValueReader,
access authlib.AccessClient,
namespace string,
group string,
resources map[string]string,
logger log.Logger,
) *batchAuthzSearcher {
return &batchAuthzSearcher{
ctx: ctx,
searcher: searcher,
indexReader: indexReader,
dvReader: dvReader,
access: access,
namespace: namespace,
group: group,
resources: resources,
log: logger,
}
}
func (s *batchAuthzSearcher) Next(searchCtx *search.SearchContext) (*search.DocumentMatch, error) {
// Lazy initialization of pull iterator
if s.next == nil {
s.searchCtx = searchCtx
s.initPullIterator()
}
info, err, ok := s.next()
if !ok {
return nil, nil // No more documents
}
if err != nil {
return nil, err
}
return info.doc, nil
}
// initPullIterator sets up the FilterAuthorized iterator as a pull iterator
func (s *batchAuthzSearcher) initPullIterator() {
// Create iter.Seq that pulls documents from searcher and parses them
candidates := func(yield func(docInfo) bool) {
for {
doc, err := s.searcher.Next(s.searchCtx)
if err != nil {
s.log.Debug("Error getting next document", "error", err)
return
}
if doc == nil {
return // No more documents
} }
})
if err != nil {
logger.Debug("Error reading doc values", "error", err)
return false
}
if _, ok := q.checkers[resource]; !ok {
logger.Debug("No resource checker found", "resource", resource)
return false
}
allowed := q.checkers[resource](name, folder)
if !allowed {
logger.Debug("Denying access", "ns", ns, "name", name, "folder", folder)
}
return allowed
})
return filteringSearcher, nil info, ok := s.parseDocInfo(doc)
if !ok {
continue // Skip invalid documents
}
if !yield(info) {
return
}
}
}
extractFn := func(info docInfo) authz.BatchCheckItem {
return authz.BatchCheckItem{
Name: info.name,
Folder: info.folder,
Verb: info.verb,
Group: s.group,
Resource: info.resourceType,
Namespace: s.namespace,
}
}
// FilterAuthorized extracts auth from context and batches internally
authzIter := authz.FilterAuthorized(s.ctx, s.access, candidates, extractFn).Items
// Convert push iterator to pull iterator
s.next, s.stop = iter.Pull2(authzIter)
}
// parseDocInfo extracts document information needed for authorization
func (s *batchAuthzSearcher) parseDocInfo(doc *search.DocumentMatch) (docInfo, bool) {
// Get external ID
externalID, err := s.indexReader.ExternalID(doc.IndexInternalID)
if err != nil {
s.log.Debug("Error getting external ID", "error", err)
return docInfo{}, false
}
doc.ID = externalID
// Parse doc ID: <namespace>/<group>/<resourceType>/<name>
parts := strings.Split(doc.ID, "/")
if len(parts) != 4 {
s.log.Debug("Unexpected document ID format", "id", doc.ID)
return docInfo{}, false
}
resourceType := parts[2]
name := parts[3]
// Get folder from doc values
folder := ""
err = s.dvReader.VisitDocValues(doc.IndexInternalID, func(field string, value []byte) {
if field == "folder" {
folder = string(value)
}
})
if err != nil {
s.log.Debug("Error reading doc values", "error", err)
return docInfo{}, false
}
// Check if we have a verb for this resource type
verb, ok := s.resources[resourceType]
if !ok {
s.log.Debug("No verb found for resource", "resource", resourceType)
return docInfo{}, false
}
return docInfo{
doc: doc,
resourceType: resourceType,
name: name,
folder: folder,
verb: verb,
}, true
}
func (s *batchAuthzSearcher) Advance(searchCtx *search.SearchContext, ID index.IndexInternalID) (*search.DocumentMatch, error) {
return s.searcher.Advance(searchCtx, ID)
}
func (s *batchAuthzSearcher) Close() error {
if s.stop != nil {
s.stop()
}
return s.searcher.Close()
}
func (s *batchAuthzSearcher) Size() int {
return s.searcher.Size()
}
func (s *batchAuthzSearcher) DocumentMatchPoolSize() int {
return s.searcher.DocumentMatchPoolSize()
}
func (s *batchAuthzSearcher) Min() int {
return s.searcher.Min()
}
func (s *batchAuthzSearcher) Count() uint64 {
return s.searcher.Count()
}
func (s *batchAuthzSearcher) SetQueryNorm(qnorm float64) {
s.searcher.SetQueryNorm(qnorm)
}
func (s *batchAuthzSearcher) Weight() float64 {
return s.searcher.Weight()
} }
// hasTerms - any value that will be split into multiple tokens // hasTerms - any value that will be split into multiple tokens

View File

@@ -653,8 +653,12 @@ func (nc StubAccessClient) Write(ctx context.Context, req *authzextv1.WriteReque
return nil return nil
} }
func (nc StubAccessClient) BatchCheck(ctx context.Context, req *authzextv1.BatchCheckRequest) (*authzextv1.BatchCheckResponse, error) { func (nc StubAccessClient) BatchCheck(ctx context.Context, user authlib.AuthInfo, req authlib.BatchCheckRequest) (authlib.BatchCheckResponse, error) {
return nil, nil results := make(map[string]authlib.BatchCheckResult, len(req.Checks))
for _, item := range req.Checks {
results[item.CorrelationID] = authlib.BatchCheckResult{Allowed: nc.resourceResponses[item.Resource]}
}
return authlib.BatchCheckResponse{Results: results}, nil
} }
func TestSafeInt64ToInt(t *testing.T) { func TestSafeInt64ToInt(t *testing.T) {

View File

@@ -517,6 +517,28 @@ func (m *mockAccessClient) Check(ctx context.Context, user types.AuthInfo, req t
return types.CheckResponse{Allowed: m.allowed}, nil return types.CheckResponse{Allowed: m.allowed}, nil
} }
func (m *mockAccessClient) BatchCheck(ctx context.Context, user types.AuthInfo, req types.BatchCheckRequest) (types.BatchCheckResponse, error) {
results := make(map[string]types.BatchCheckResult, len(req.Checks))
for _, check := range req.Checks {
allowed := m.allowed
// Check specific folder:verb mappings if provided
if m.allowedMap != nil {
key := fmt.Sprintf("%s:%s", check.Folder, check.Verb)
if a, exists := m.allowedMap[key]; exists {
allowed = a
}
}
results[check.CorrelationID] = types.BatchCheckResult{
Allowed: allowed,
}
}
return types.BatchCheckResponse{Results: results}, nil
}
func (m *mockAccessClient) Compile(ctx context.Context, user types.AuthInfo, req types.ListRequest) (types.ItemChecker, types.Zookie, error) { func (m *mockAccessClient) Compile(ctx context.Context, user types.AuthInfo, req types.ListRequest) (types.ItemChecker, types.Zookie, error) {
if m.compileFn != nil { if m.compileFn != nil {
return m.compileFn(user, req), types.NoopZookie{}, nil return m.compileFn(user, req), types.NoopZookie{}, nil