Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot] 97f1bcf7d5 deps(go): bump modernc.org/sqlite from 1.40.1 to 1.42.2
Bumps [modernc.org/sqlite](https://gitlab.com/cznic/sqlite) from 1.40.1 to 1.42.2.
- [Commits](https://gitlab.com/cznic/sqlite/compare/v1.40.1...v1.42.2)

---
updated-dependencies:
- dependency-name: modernc.org/sqlite
  dependency-version: 1.42.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-01-08 09:47:56 +00:00
168 changed files with 2393 additions and 8696 deletions
@@ -1,8 +1,8 @@
diff --git a/dist/builder-manager/index.js b/dist/builder-manager/index.js
index ac8ac6a5f6a3b7852c4064e93dc9acd3201289e6..34a0a5a5c38dd7fe525c9ebd382a10a451d4d4f3 100644
index 3d7f9b213dae1801bda62b31db31b9113e382ccd..212501c63d20146c29db63fb0f6300c6779eecb5 100644
--- a/dist/builder-manager/index.js
+++ b/dist/builder-manager/index.js
@@ -1974,7 +1974,7 @@ var pa = /^\/($|\?)/, G, C, xt = /* @__PURE__ */ o(async (e) => {
@@ -1970,7 +1970,7 @@ var pa = /^\/($|\?)/, G, C, xt = /* @__PURE__ */ o(async (e) => {
bundle: !0,
minify: !0,
sourcemap: !1,
-1
View File
@@ -91,7 +91,6 @@ COPY pkg/storage/unified/resource pkg/storage/unified/resource
COPY pkg/storage/unified/resourcepb pkg/storage/unified/resourcepb
COPY pkg/storage/unified/apistore pkg/storage/unified/apistore
COPY pkg/semconv pkg/semconv
COPY pkg/plugins pkg/plugins
COPY pkg/aggregator pkg/aggregator
COPY apps/playlist apps/playlist
COPY apps/quotas apps/quotas
@@ -1,287 +0,0 @@
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v1beta1",
"metadata": {
"name": "legacy-ds-ref"
},
"spec": {
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"panels": [
{
"datasource": "${datasource}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Minimum cluster size"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
},
{
"id": "custom.lineStyle",
"value": {
"dash": [10, 10],
"fill": "dash"
}
},
{
"id": "custom.lineWidth",
"value": 1
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 8,
"x": 0,
"y": 0
},
"id": 16,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"timeCompare": false,
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": "${datasource}",
"editorMode": "code",
"expr": "count by (version) (alloy_build_info{cluster=~\"$cluster\", namespace=~\"$namespace\", job=~\"$job\"})",
"instant": false,
"legendFormat": "{{version}}",
"range": true,
"refId": "B"
}
],
"title": "Number of Alloy Instances",
"type": "timeseries"
},
{
"datasource": "${datasource}",
"description": "CPU usage of the Alloy process relative to 1 CPU core.\n\nFor example, 100% means using one entire CPU core.\n",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
},
"unit": "percentunit"
},
"overrides": [
{
"__systemRef": "hideSeriesFrom",
"matcher": {
"id": "byNames",
"options": {
"mode": "exclude",
"names": [
"Total"
],
"prefix": "All except:",
"readOnly": true
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": false,
"tooltip": true,
"viz": true
}
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 8,
"x": 8,
"y": 0
},
"id": 17,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"timeCompare": false,
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": "${datasource}",
"expr": "rate(alloy_resources_process_cpu_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n",
"hide": true,
"instant": false,
"legendFormat": "{{instance}}",
"range": true,
"refId": "A"
},
{
"datasource": "${datasource}",
"editorMode": "code",
"expr": "sum(rate(alloy_resources_process_cpu_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
"instant": false,
"legendFormat": "Total",
"range": true,
"refId": "B"
}
],
"title": "CPU usage",
"type": "timeseries"
}
],
"time": {
"from": "now-90m",
"to": "now"
},
"timezone": "utc",
"title": "Legacy DS Panel Query Ref",
"weekStart": ""
}
}
@@ -1,294 +0,0 @@
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v0alpha1",
"metadata": {
"name": "legacy-ds-ref"
},
"spec": {
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"panels": [
{
"datasource": "${datasource}",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Minimum cluster size"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
},
{
"id": "custom.lineStyle",
"value": {
"dash": [
10,
10
],
"fill": "dash"
}
},
{
"id": "custom.lineWidth",
"value": 1
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 8,
"x": 0,
"y": 0
},
"id": 16,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"timeCompare": false,
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": "${datasource}",
"editorMode": "code",
"expr": "count by (version) (alloy_build_info{cluster=~\"$cluster\", namespace=~\"$namespace\", job=~\"$job\"})",
"instant": false,
"legendFormat": "{{version}}",
"range": true,
"refId": "B"
}
],
"title": "Number of Alloy Instances",
"type": "timeseries"
},
{
"datasource": "${datasource}",
"description": "CPU usage of the Alloy process relative to 1 CPU core.\n\nFor example, 100% means using one entire CPU core.\n",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
},
"unit": "percentunit"
},
"overrides": [
{
"__systemRef": "hideSeriesFrom",
"matcher": {
"id": "byNames",
"options": {
"mode": "exclude",
"names": [
"Total"
],
"prefix": "All except:",
"readOnly": true
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": false,
"tooltip": true,
"viz": true
}
}
]
}
]
},
"gridPos": {
"h": 9,
"w": 8,
"x": 8,
"y": 0
},
"id": 17,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"timeCompare": false,
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": "${datasource}",
"expr": "rate(alloy_resources_process_cpu_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n",
"hide": true,
"instant": false,
"legendFormat": "{{instance}}",
"range": true,
"refId": "A"
},
{
"datasource": "${datasource}",
"editorMode": "code",
"expr": "sum(rate(alloy_resources_process_cpu_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
"instant": false,
"legendFormat": "Total",
"range": true,
"refId": "B"
}
],
"title": "CPU usage",
"type": "timeseries"
}
],
"time": {
"from": "now-90m",
"to": "now"
},
"timezone": "utc",
"title": "Legacy DS Panel Query Ref",
"weekStart": ""
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v1beta1"
}
}
}
@@ -1,405 +0,0 @@
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "legacy-ds-ref"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"query": {
"kind": "grafana",
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-16": {
"kind": "Panel",
"spec": {
"id": 16,
"title": "Number of Alloy Instances",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "",
"spec": {
"editorMode": "code",
"expr": "count by (version) (alloy_build_info{cluster=~\"$cluster\", namespace=~\"$namespace\", job=~\"$job\"})",
"instant": false,
"legendFormat": "{{version}}",
"range": true
}
},
"datasource": {
"type": "",
"uid": "${datasource}"
},
"refId": "B",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"timeCompare": false,
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Minimum cluster size"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
},
{
"id": "custom.lineStyle",
"value": {
"dash": [
10,
10
],
"fill": "dash"
}
},
{
"id": "custom.lineWidth",
"value": 1
}
]
}
]
}
}
}
}
},
"panel-17": {
"kind": "Panel",
"spec": {
"id": 17,
"title": "CPU usage",
"description": "CPU usage of the Alloy process relative to 1 CPU core.\n\nFor example, 100% means using one entire CPU core.\n",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "",
"spec": {
"expr": "rate(alloy_resources_process_cpu_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n",
"instant": false,
"legendFormat": "{{instance}}",
"range": true
}
},
"datasource": {
"type": "",
"uid": "${datasource}"
},
"refId": "A",
"hidden": true
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "",
"spec": {
"editorMode": "code",
"expr": "sum(rate(alloy_resources_process_cpu_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
"instant": false,
"legendFormat": "Total",
"range": true
}
},
"datasource": {
"type": "",
"uid": "${datasource}"
},
"refId": "B",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"timeCompare": false,
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"unit": "percentunit",
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": [
{
"__systemRef": "hideSeriesFrom",
"matcher": {
"id": "byNames",
"options": {
"mode": "exclude",
"names": [
"Total"
],
"prefix": "All except:",
"readOnly": true
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": false,
"tooltip": true,
"viz": true
}
}
]
}
]
}
}
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 8,
"height": 9,
"element": {
"kind": "ElementReference",
"name": "panel-16"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 8,
"y": 0,
"width": 8,
"height": 9,
"element": {
"kind": "ElementReference",
"name": "panel-17"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "utc",
"from": "now-90m",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Legacy DS Panel Query Ref",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v1beta1"
}
}
}
@@ -1,411 +0,0 @@
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2beta1",
"metadata": {
"name": "legacy-ds-ref"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana",
"version": "v0",
"datasource": {
"name": "-- Grafana --"
},
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-16": {
"kind": "Panel",
"spec": {
"id": 16,
"title": "Number of Alloy Instances",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "",
"version": "v0",
"datasource": {
"name": "${datasource}"
},
"spec": {
"editorMode": "code",
"expr": "count by (version) (alloy_build_info{cluster=~\"$cluster\", namespace=~\"$namespace\", job=~\"$job\"})",
"instant": false,
"legendFormat": "{{version}}",
"range": true
}
},
"refId": "B",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"timeCompare": false,
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": [
{
"matcher": {
"id": "byName",
"options": "Minimum cluster size"
},
"properties": [
{
"id": "color",
"value": {
"fixedColor": "red",
"mode": "fixed"
}
},
{
"id": "custom.lineStyle",
"value": {
"dash": [
10,
10
],
"fill": "dash"
}
},
{
"id": "custom.lineWidth",
"value": 1
}
]
}
]
}
}
}
}
},
"panel-17": {
"kind": "Panel",
"spec": {
"id": 17,
"title": "CPU usage",
"description": "CPU usage of the Alloy process relative to 1 CPU core.\n\nFor example, 100% means using one entire CPU core.\n",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "",
"version": "v0",
"datasource": {
"name": "${datasource}"
},
"spec": {
"expr": "rate(alloy_resources_process_cpu_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n",
"instant": false,
"legendFormat": "{{instance}}",
"range": true
}
},
"refId": "A",
"hidden": true
}
},
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "",
"version": "v0",
"datasource": {
"name": "${datasource}"
},
"spec": {
"editorMode": "code",
"expr": "sum(rate(alloy_resources_process_cpu_seconds_total{cluster=~\"$cluster\", namespace=~\"$namespace\", job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
"instant": false,
"legendFormat": "Total",
"range": true
}
},
"refId": "B",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"timeCompare": false,
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"unit": "percentunit",
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": 0,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": [
{
"__systemRef": "hideSeriesFrom",
"matcher": {
"id": "byNames",
"options": {
"mode": "exclude",
"names": [
"Total"
],
"prefix": "All except:",
"readOnly": true
}
},
"properties": [
{
"id": "custom.hideFrom",
"value": {
"legend": false,
"tooltip": true,
"viz": true
}
}
]
}
]
}
}
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 8,
"height": 9,
"element": {
"kind": "ElementReference",
"name": "panel-16"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 8,
"y": 0,
"width": 8,
"height": 9,
"element": {
"kind": "ElementReference",
"name": "panel-17"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "utc",
"from": "now-90m",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Legacy DS Panel Query Ref",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v1beta1"
}
}
}
@@ -88,11 +88,6 @@ func ConvertDashboard_V0_to_V1beta1(in *dashv0.Dashboard, out *dashv1.Dashboard,
// Which means that we have schemaVersion: 42 dashboards where datasource variable references are still strings
normalizeTemplateVariableDatasources(out.Spec.Object)
// Normalize panel and target datasources from string to object format
// This handles legacy dashboards where panels/targets have datasource: "$datasource" (string)
// instead of datasource: { uid: "$datasource" } (object)
normalizePanelDatasources(out.Spec.Object)
return nil
}
@@ -139,62 +134,3 @@ func isTemplateVariableRef(s string) bool {
}
return strings.HasPrefix(s, "$") || strings.HasPrefix(s, "${")
}
// normalizePanelDatasources converts panel and target string datasources to object format.
// Legacy dashboards may have panels/targets with datasource: "$datasource" (string).
// This normalizes them to datasource: { uid: "$datasource" } for consistent V1→V2 conversion.
func normalizePanelDatasources(dashboard map[string]interface{}) {
panels, ok := dashboard["panels"].([]interface{})
if !ok {
return
}
normalizePanelsDatasources(panels)
}
// normalizePanelsDatasources normalizes datasources in a list of panels (including nested row panels)
func normalizePanelsDatasources(panels []interface{}) {
for _, panel := range panels {
panelMap, ok := panel.(map[string]interface{})
if !ok {
continue
}
// Handle row panels with nested panels
if panelType, _ := panelMap["type"].(string); panelType == "row" {
if nestedPanels, ok := panelMap["panels"].([]interface{}); ok {
normalizePanelsDatasources(nestedPanels)
}
}
// Normalize panel-level datasource
if ds := panelMap["datasource"]; ds != nil {
if dsStr, ok := ds.(string); ok && isTemplateVariableRef(dsStr) {
panelMap["datasource"] = map[string]interface{}{
"uid": dsStr,
}
}
}
// Normalize target-level datasources
targets, ok := panelMap["targets"].([]interface{})
if !ok {
continue
}
for _, target := range targets {
targetMap, ok := target.(map[string]interface{})
if !ok {
continue
}
if ds := targetMap["datasource"]; ds != nil {
if dsStr, ok := ds.(string); ok && isTemplateVariableRef(dsStr) {
targetMap["datasource"] = map[string]interface{}{
"uid": dsStr,
}
}
}
}
}
}
@@ -2059,12 +2059,6 @@ func transformPanelQueries(ctx context.Context, panelMap map[string]interface{},
Uid: &dsUID,
}
}
} else if dsStr, ok := ds.(string); ok && isTemplateVariable(dsStr) {
// Handle legacy panel datasource as string (template variable reference e.g., "$datasource")
// Only process template variables - other string values are not supported in V2 format
panelDatasource = &dashv2alpha1.DashboardDataSourceRef{
Uid: &dsStr,
}
}
}
@@ -2151,10 +2145,6 @@ func transformSingleQuery(ctx context.Context, targetMap map[string]interface{},
// Resolve Grafana datasource UID when type is "datasource" and UID is empty
queryDatasourceUID = resolveGrafanaDatasourceUID(queryDatasourceType, queryDatasourceUID)
}
} else if dsStr, ok := targetMap["datasource"].(string); ok && isTemplateVariable(dsStr) {
// Handle legacy target datasource as string (template variable reference e.g., "$datasource")
// Only process template variables - other string values are not supported in V2 format
queryDatasourceUID = dsStr
}
// Use panel datasource if target datasource is missing or empty
+2 -7
View File
@@ -8,17 +8,12 @@ replace github.com/grafana/grafana/pkg/apimachinery => ../../pkg/apimachinery
replace github.com/grafana/grafana/pkg/apiserver => ../../pkg/apiserver
replace github.com/grafana/grafana/pkg/plugins => ../../pkg/plugins
replace github.com/grafana/grafana/pkg/semconv => ../../pkg/semconv
require (
github.com/emicklei/go-restful/v3 v3.13.0
github.com/grafana/grafana v0.0.0-00010101000000-000000000000
github.com/grafana/grafana-app-sdk v0.48.7
github.com/grafana/grafana-app-sdk/logging v0.48.7
github.com/grafana/grafana/pkg/apimachinery v0.0.0
github.com/grafana/grafana/pkg/plugins v0.0.0
github.com/stretchr/testify v1.11.1
k8s.io/apimachinery v0.34.3
k8s.io/apiserver v0.34.3
@@ -31,7 +26,7 @@ require (
cel.dev/expr v0.25.1 // indirect
github.com/Machiel/slugify v1.0.1 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/ProtonMail/go-crypto v1.3.0 // indirect
github.com/ProtonMail/go-crypto v1.1.6 // indirect
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/apache/arrow-go/v18 v18.4.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
@@ -106,7 +101,7 @@ require (
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1 // indirect
github.com/grafana/grafana-plugin-sdk-go v0.284.0 // indirect
github.com/grafana/grafana/pkg/apiserver v0.0.0 // indirect
github.com/grafana/grafana/pkg/semconv v0.0.0 // indirect
github.com/grafana/grafana/pkg/semconv v0.0.0-20250804150913-990f1c69ecc2 // indirect
github.com/grafana/otel-profiling-go v0.5.1 // indirect
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect
github.com/grafana/sqlds/v5 v5.0.3 // indirect
+4 -2
View File
@@ -11,8 +11,8 @@ github.com/Machiel/slugify v1.0.1 h1:EfWSlRWstMadsgzmiV7d0yVd2IFlagWH68Q+DcYCm4E
github.com/Machiel/slugify v1.0.1/go.mod h1:fTFGn5uWEynW4CUMG7sWkYXOf1UgDxyTM3DbR6Qfg3k=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw=
github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -235,6 +235,8 @@ github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1 h1:FFcEA01tW+SmuJIuDbHOdgUBL+d
github.com/grafana/grafana-azure-sdk-go/v2 v2.3.1/go.mod h1:Oi4anANlCuTCc66jCyqIzfVbgLXFll8Wja+Y4vfANlc=
github.com/grafana/grafana-plugin-sdk-go v0.284.0 h1:1bK7eWsnPBLUWDcWJWe218Ik5ad0a5JpEL4mH9ry7Ws=
github.com/grafana/grafana-plugin-sdk-go v0.284.0/go.mod h1:lHPniaSxq3SL5MxDIPy04TYB1jnTp/ivkYO+xn5Rz3E=
github.com/grafana/grafana/pkg/semconv v0.0.0-20250804150913-990f1c69ecc2 h1:A65jWgLk4Re28gIuZcpC0aTh71JZ0ey89hKGE9h543s=
github.com/grafana/grafana/pkg/semconv v0.0.0-20250804150913-990f1c69ecc2/go.mod h1:2HRzUK/xQEYc+8d5If/XSusMcaYq9IptnBSHACiQcOQ=
github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8=
github.com/grafana/otel-profiling-go v0.5.1/go.mod h1:ftN/t5A/4gQI19/8MoWurBEtC6gFw8Dns1sJZ9W4Tls=
github.com/grafana/prometheus-alertmanager v0.25.1-0.20250911094103-5456b6e45604 h1:aXfUhVN/Ewfpbko2CCtL65cIiGgwStOo4lWH2b6gw2U=
@@ -116,26 +116,3 @@ type ConnectionList struct {
// +listType=atomic
Items []Connection `json:"items"`
}
// ExternalRepositoryList lists repositories from an external git provider
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ExternalRepositoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
// +listType=atomic
Items []ExternalRepository `json:"items"`
}
type ExternalRepository struct {
// Name of the repository
Name string `json:"name"`
// Owner is the user, organization, or workspace that owns the repository
// For GitHub: organization or user
// For GitLab: namespace (user or group)
// For Bitbucket: workspace
// For pure Git: empty
Owner string `json:"owner,omitempty"`
// URL of the repository
URL string `json:"url"`
}
@@ -197,7 +197,6 @@ func AddKnownTypes(gv schema.GroupVersion, scheme *runtime.Scheme) error {
&HistoricJobList{},
&Connection{},
&ConnectionList{},
&ExternalRepositoryList{},
)
return nil
}
@@ -262,53 +262,6 @@ func (in *ExportJobOptions) DeepCopy() *ExportJobOptions {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalRepository) DeepCopyInto(out *ExternalRepository) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalRepository.
func (in *ExternalRepository) DeepCopy() *ExternalRepository {
if in == nil {
return nil
}
out := new(ExternalRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalRepositoryList) DeepCopyInto(out *ExternalRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ExternalRepository, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalRepositoryList.
func (in *ExternalRepositoryList) DeepCopy() *ExternalRepositoryList {
if in == nil {
return nil
}
out := new(ExternalRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ExternalRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FileItem) DeepCopyInto(out *FileItem) {
*out = *in
@@ -26,8 +26,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1.DeleteJobOptions": schema_pkg_apis_provisioning_v0alpha1_DeleteJobOptions(ref),
"github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1.ErrorDetails": schema_pkg_apis_provisioning_v0alpha1_ErrorDetails(ref),
"github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1.ExportJobOptions": schema_pkg_apis_provisioning_v0alpha1_ExportJobOptions(ref),
"github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1.ExternalRepository": schema_pkg_apis_provisioning_v0alpha1_ExternalRepository(ref),
"github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1.ExternalRepositoryList": schema_pkg_apis_provisioning_v0alpha1_ExternalRepositoryList(ref),
"github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1.FileItem": schema_pkg_apis_provisioning_v0alpha1_FileItem(ref),
"github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1.FileList": schema_pkg_apis_provisioning_v0alpha1_FileList(ref),
"github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1.GitHubConnectionConfig": schema_pkg_apis_provisioning_v0alpha1_GitHubConnectionConfig(ref),
@@ -546,96 +544,6 @@ func schema_pkg_apis_provisioning_v0alpha1_ExportJobOptions(ref common.Reference
}
}
func schema_pkg_apis_provisioning_v0alpha1_ExternalRepository(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the repository",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"owner": {
SchemaProps: spec.SchemaProps{
Description: "Owner is the user, organization, or workspace that owns the repository For GitHub: organization or user For GitLab: namespace (user or group) For Bitbucket: workspace For pure Git: empty",
Type: []string{"string"},
Format: "",
},
},
"url": {
SchemaProps: spec.SchemaProps{
Description: "URL of the repository",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "url"},
},
},
}
}
func schema_pkg_apis_provisioning_v0alpha1_ExternalRepositoryList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ExternalRepositoryList lists repositories from an external git provider",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1.ExternalRepository"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1.ExternalRepository", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_pkg_apis_provisioning_v0alpha1_FileItem(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -1,7 +1,6 @@
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,ConnectionList,Items
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,DeleteJobOptions,Paths
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,DeleteJobOptions,Resources
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,ExternalRepositoryList,Items
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,FileList,Items
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,HistoryList,Items
API rule violation: list_type_missing,github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1,JobResourceSummary,Errors
@@ -1,4 +0,0 @@
package connection
// BlockDeletionFinalizer prevents deletion of connections while repositories reference them
const BlockDeletionFinalizer = "block-deletion-while-repositories-exist"
@@ -1,40 +0,0 @@
package controller
import (
"context"
"encoding/json"
"fmt"
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
client "github.com/grafana/grafana/apps/provisioning/pkg/generated/clientset/versioned/typed/provisioning/v0alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// ConnectionStatusPatcher provides methods to patch Connection status subresources.
type ConnectionStatusPatcher struct {
client client.ProvisioningV0alpha1Interface
}
// NewConnectionStatusPatcher creates a new ConnectionStatusPatcher.
func NewConnectionStatusPatcher(client client.ProvisioningV0alpha1Interface) *ConnectionStatusPatcher {
return &ConnectionStatusPatcher{
client: client,
}
}
// Patch applies JSON patch operations to a Connection's status subresource.
func (p *ConnectionStatusPatcher) Patch(ctx context.Context, conn *provisioning.Connection, patchOperations ...map[string]interface{}) error {
patch, err := json.Marshal(patchOperations)
if err != nil {
return fmt.Errorf("unable to marshal patch data: %w", err)
}
_, err = p.client.Connections(conn.Namespace).
Patch(ctx, conn.Name, types.JSONPatchType, patch, metav1.PatchOptions{}, "status")
if err != nil {
return fmt.Errorf("unable to update connection status: %w", err)
}
return nil
}
-2
View File
@@ -2234,8 +2234,6 @@ encryption_provider = secret_key.v1
# These flags are required in on-prem installations for GitSync to work
#
# Whether to register the MT CRUD API
register_api_server = true
# Whether to create the MT secrets management database
run_secrets_db_migrations = true
# Whether to run the data key id migration. Requires that RunSecretsDBMigrations is also true.
-2
View File
@@ -2123,8 +2123,6 @@ default_datasource_uid =
# These flags are required in on-prem installations for GitSync to work
#
# Whether to register the MT CRUD API
;register_api_server = true
# Whether to create the MT secrets management database
;run_secrets_db_migrations = true
# Whether to run the data key id migration. Requires that RunSecretsDBMigrations is also true.
@@ -186,7 +186,7 @@ For the JSON and field usage notes, refer to the [links schema documentation](ht
### `tags`
Tags associated with the dashboard. Each tag can be up to 50 characters long.
The tags associated with the dashboard:
` [...string]`
@@ -111,4 +111,3 @@ After installing and configuring the Graphite data source you can:
- Add [transformations](ref:transformations)
- Add [annotations](ref:annotate-visualizations)
- Set up [alerting](ref:alerting)
- [Troubleshoot](troubleshooting/) common issues with the Graphite data source
@@ -1,174 +0,0 @@
---
description: Troubleshoot common issues with the Graphite data source.
keywords:
- grafana
- graphite
- troubleshooting
- guide
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Troubleshooting
title: Troubleshoot Graphite data source issues
weight: 400
refs:
configure-graphite:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/graphite/configure/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/graphite/configure/
query-editor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/graphite/query-editor/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/graphite/query-editor/
---
# Troubleshoot Graphite data source issues
This document provides solutions for common issues you might encounter when using the Graphite data source.
## Connection issues
Use the following troubleshooting steps to resolve connection problems between Grafana and your Graphite server.
**Data source test fails with "Unable to connect":**
If the data source test fails, verify the following:
- The URL in your data source configuration is correct and accessible from the Grafana server.
- The Graphite server is running and accepting connections.
- Any firewall rules or network policies allow traffic between Grafana and the Graphite server.
- If using TLS, ensure your certificates are valid and properly configured.
To test connectivity, run the following command from the Grafana server:
```sh
curl -v <GRAPHITE_URL>/render
```
Replace _`<GRAPHITE_URL>`_ with your Graphite server URL. A successful connection returns a response from the Graphite server.
**Authentication errors:**
If you receive 401 or 403 errors:
- Verify your Basic Auth username and password are correct.
- Ensure the **With Credentials** toggle is enabled if your Graphite server requires cookies for authentication.
- Check that your TLS client certificates are valid and match what the server expects.
For detailed authentication configuration, refer to [Configure the Graphite data source](ref:configure-graphite).
## Query issues
Use the following troubleshooting steps to resolve problems with Graphite queries.
**No data returned:**
If your query returns no data:
- Verify the metric path exists in your Graphite server by testing directly in the Graphite web interface.
- Check that the time range in Grafana matches when data was collected.
- Ensure wildcards in your query match existing metrics.
- Confirm your query syntax is correct for your Graphite version.
**HTTP 500 errors with HTML content:**
Graphite-web versions before 1.6 return HTTP 500 errors with full HTML stack traces when a query fails. If you see error messages containing HTML tags:
- Check the Graphite server logs for the full error details.
- Verify your query syntax is valid.
- Ensure the requested time range doesn't exceed your Graphite server's capabilities.
- Check that all functions used in your query are supported by your Graphite version.
**Parser errors in the query editor:**
If the query editor displays parser errors:
- Check for unbalanced parentheses in function calls.
- Verify that function arguments are in the correct format.
- Ensure metric paths don't contain unsupported characters.
For query syntax help, refer to [Graphite query editor](ref:query-editor).
## Version and feature issues
Use the following troubleshooting steps to resolve problems related to Graphite versions and features.
**Functions missing from the query editor:**
If expected functions don't appear in the query editor:
- Verify the correct Graphite version is selected in the data source configuration.
- The available functions depend on the configured version. For example, tag-based functions require Graphite 1.1 or later.
- If using a custom Graphite installation with additional functions, ensure the version setting matches your server.
**Tag-based queries not working:**
If `seriesByTag()` or other tag functions fail:
- Confirm your Graphite server is version 1.1 or later.
- Verify the Graphite version setting in your data source configuration matches your actual server version.
- Check that tags are properly configured in your Graphite server.
## Performance issues
Use the following troubleshooting steps to address slow queries or timeouts.
**Queries timing out:**
If queries consistently time out:
- Increase the **Timeout** setting in the data source configuration.
- Reduce the time range of your query.
- Use more specific metric paths instead of broad wildcards.
- Consider using `summarize()` or `consolidateBy()` functions to reduce the amount of data returned.
- Check your Graphite server's performance and resource utilization.
**Slow autocomplete in the query editor:**
If metric path autocomplete is slow:
- This often indicates a large number of metrics in your Graphite server.
- Use more specific path prefixes to narrow the search scope.
- Check your Graphite server's index performance.
## MetricTank-specific issues
If you're using MetricTank as your Graphite backend, use the following troubleshooting steps.
**Rollup indicator not appearing:**
If the rollup indicator doesn't display when expected:
- Verify **Metrictank** is selected as the Graphite backend type in the data source configuration.
- Ensure the **Rollup indicator** toggle is enabled.
- The indicator only appears when data aggregation actually occurs.
**Unexpected data aggregation:**
If you see unexpected aggregation in your data:
- Check the rollup configuration in your MetricTank instance.
- Adjust the time range or use `consolidateBy()` to control aggregation behavior.
- Review the query processing metadata in the panel inspector for details on how data was processed.
## Get additional help
If you continue to experience issues:
- Check the [Grafana community forums](https://community.grafana.com/) for similar issues and solutions.
- Review the [Graphite documentation](https://graphite.readthedocs.io/) for additional configuration options.
- Contact [Grafana Support](https://grafana.com/support/) if you're an Enterprise, Cloud Pro, or Cloud Advanced customer.
When reporting issues, include the following information:
- Grafana version
- Graphite version (for example, 1.1.x) and backend type (Default or MetricTank)
- Authentication method (Basic Auth, TLS, or none)
- Error messages (redact sensitive information)
- Steps to reproduce the issue
- Relevant configuration such as data source settings, timeout values, and Graphite version setting (redact passwords and other credentials)
- Sample query (if applicable, with sensitive data redacted)
@@ -41,8 +41,7 @@ Query parameters:
- `sortDirection`: Sort order of elements. Use `alpha-asc` for ascending and `alpha-desc` for descending sort order.
- `typeFilter`: A comma separated list of types to filter the elements by.
- `excludeUid`: Element UID to exclude from search results.
- `folderFilter`: **Deprecated.** A comma separated list of folder IDs to filter the elements by. Use `folderFilterUIDs` instead.
- `folderFilterUIDs`: A comma separated list of folder UIDs to filter the elements by.
- `folderFilter`: A comma separated list of folder IDs to filter the elements by.
- `perPage`: The number of results per page; default is 100.
- `page`: The page for a set of records, given that only `perPage` records are returned at a time. Numbering starts at `1`.
@@ -25,7 +25,7 @@ Keys:
- **theme** - One of: `light`, `dark`, or an empty string for the default theme
- **homeDashboardId** - Deprecated. Use `homeDashboardUID` instead.
- **homeDashboardUID**: The `:uid` of a dashboard
- **timezone** - Any valid IANA timezone string (e.g., `America/New_York`, `Europe/London`), `utc`, `browser`, or an empty string for the default.
- **timezone** - One of: `utc`, `browser`, or an empty string for the default
Omitting a key will cause the current value to be replaced with the
system default value.
@@ -38,6 +38,13 @@ Users can now view anonymous usage statistics, including the count of devices an
The number of anonymous devices is not limited by default. The configuration option `device_limit` allows you to enforce a limit on the number of anonymous devices. This enables you to have greater control over the usage within your Grafana instance and keep the usage within the limits of your environment. Once the limit is reached, any new devices that try to access Grafana will be denied access.
To display anonymous users and devices for versions 10.2, 10.3, 10.4, you need to enable the feature toggle `displayAnonymousStats`
```bash
[feature_toggles]
enable = displayAnonymousStats
```
## Configuration
Example:
@@ -60,15 +67,3 @@ device_limit =
```
If you change your organization name in the Grafana UI this setting needs to be updated to match the new name.
## Licensing for anonymous access
Grafana Enterprise (self-managed) licenses anonymous access as active users.
Anonymous access lets people use Grafana without login credentials. It was an early way to share dashboards, but Public dashboards gives you a more secure way to share dashboards.
### How anonymous usage is counted
Grafana estimates anonymous active users from anonymous devices:
- **Counting rule**: Grafana counts 1 anonymous user for every 3 anonymous devices detected.
+9 -3
View File
@@ -25,6 +25,7 @@ require (
github.com/Masterminds/semver v1.5.0 // @grafana/grafana-backend-group
github.com/Masterminds/semver/v3 v3.4.0 // @grafana/grafana-developer-enablement-squad
github.com/Masterminds/sprig/v3 v3.3.0 // @grafana/grafana-backend-group
github.com/ProtonMail/go-crypto v1.1.6 // @grafana/plugins-platform-backend
github.com/VividCortex/mysqlerr v0.0.0-20170204212430-6c6b55f8796f // @grafana/grafana-backend-group
github.com/alicebob/miniredis/v2 v2.34.0 // @grafana/alerting-backend
github.com/andybalholm/brotli v1.2.0 // @grafana/partner-datasources
@@ -119,7 +120,8 @@ require (
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // @grafana/identity-access-team
github.com/hashicorp/go-hclog v1.6.3 // @grafana/plugins-platform-backend
github.com/hashicorp/go-multierror v1.1.1 // @grafana/alerting-squad
github.com/hashicorp/go-plugin v1.7.0 // indirect; @grafana/plugins-platform-backend
github.com/hashicorp/go-plugin v1.7.0 // @grafana/plugins-platform-backend
github.com/hashicorp/go-secure-stdlib/plugincontainer v0.4.2 // @grafana/plugins-platform-backend
github.com/hashicorp/go-version v1.7.0 // @grafana/grafana-backend-group
github.com/hashicorp/golang-lru/v2 v2.0.7 // @grafana/alerting-backend
github.com/hashicorp/hcl/v2 v2.24.0 // @grafana/alerting-backend
@@ -228,7 +230,7 @@ require (
k8s.io/kube-aggregator v0.34.3 // @grafana/grafana-app-platform-squad
k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // @grafana/grafana-app-platform-squad
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // @grafana/partner-datasources
modernc.org/sqlite v1.40.1 // @grafana/grafana-backend-group
modernc.org/sqlite v1.42.2 // @grafana/grafana-backend-group
pgregory.net/rapid v1.2.0 // @grafana/grafana-operator-experience-squad
sigs.k8s.io/randfill v1.0.0 // @grafana/grafana-app-platform-squad
sigs.k8s.io/structured-merge-diff/v6 v6.3.1 // @grafana/grafana-app-platform-squad
@@ -391,6 +393,7 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cheekybits/genny v1.0.0 // indirect
github.com/chromedp/cdproto v0.0.0-20250803210736-d308e07a266d // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect
github.com/cockroachdb/apd/v3 v3.2.1 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
@@ -487,6 +490,7 @@ require (
github.com/jhump/protoreflect v1.17.0 // indirect
github.com/jonboulle/clockwork v0.5.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/jszwedko/go-datemath v0.1.1-0.20230526204004-640a500621d6 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
@@ -654,8 +658,10 @@ require (
require github.com/grafana/tempo v1.5.1-0.20250529124718-87c2dc380cec // @grafana/observability-traces-and-profiling
require github.com/Machiel/slugify v1.0.1 // @grafana/plugins-platform-backend
require (
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/IBM/pgxpoolprometheus v1.1.2 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
+16 -3
View File
@@ -679,7 +679,8 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+
github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk=
github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v11.2.8+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
@@ -737,6 +738,8 @@ github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXY
github.com/IBM/pgxpoolprometheus v1.1.2 h1:sHJwxoL5Lw4R79Zt+H4Uj1zZ4iqXJLdk7XDE7TPs97U=
github.com/IBM/pgxpoolprometheus v1.1.2/go.mod h1:+vWzISN6S9ssgurhUNmm6AlXL9XLah3TdWJktquKTR8=
github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk=
github.com/Machiel/slugify v1.0.1 h1:EfWSlRWstMadsgzmiV7d0yVd2IFlagWH68Q+DcYCm4E=
github.com/Machiel/slugify v1.0.1/go.mod h1:fTFGn5uWEynW4CUMG7sWkYXOf1UgDxyTM3DbR6Qfg3k=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
@@ -759,6 +762,8 @@ github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI=
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw=
github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
@@ -1026,6 +1031,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -1753,6 +1760,8 @@ github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5O
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM=
github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0=
github.com/hashicorp/go-secure-stdlib/plugincontainer v0.4.2 h1:gCNiM4T5xEc4IpT8vM50CIO+AtElr5kO9l2Rxbq+Sz8=
github.com/hashicorp/go-secure-stdlib/plugincontainer v0.4.2/go.mod h1:6ZM4ZdwClyAsiU2uDBmRHCvq0If/03BMbF9U+U7G5pA=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
@@ -1877,6 +1886,10 @@ github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbd
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 h1:hgVxRoDDPtQE68PT4LFvNlPz2nBKd3OMlGKIQ69OmR4=
github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531/go.mod h1:fqTUQpVYBvhCNIsMXGl2GE9q6z94DIP6NtFKXCSTVbg=
github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d h1:J8tJzRyiddAFF65YVgxli+TyWBi0f79Sld6rJP6CBcY=
github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d/go.mod h1:b+Q3v8Yrg5o15d71PSUraUzYb+jWl6wQMSBXSGS/hv0=
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@@ -3744,8 +3757,8 @@ modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
modernc.org/sqlite v1.40.1 h1:VfuXcxcUWWKRBuP8+BR9L7VnmusMgBNNnBYGEe9w/iY=
modernc.org/sqlite v1.40.1/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE=
modernc.org/sqlite v1.42.2 h1:7hkZUNJvJFN2PgfUdjni9Kbvd4ef4mNLOu0B9FGxM74=
modernc.org/sqlite v1.42.2/go.mod h1:+VkC6v3pLOAE0A0uVucQEcbVW0I5nHCeDaBf+DpsQT8=
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
-1
View File
@@ -32,7 +32,6 @@ use (
./pkg/build
./pkg/build/wire // skip:golangci-lint
./pkg/codegen
./pkg/plugins
./pkg/plugins/codegen
./pkg/promlib
./pkg/semconv
+3 -6
View File
@@ -280,7 +280,6 @@ github.com/Azure/go-amqp v0.17.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fw
github.com/Azure/go-amqp v1.4.0 h1:Xj3caqi4comOF/L1Uc5iuBxR/pB6KumejC01YQOqOR4=
github.com/Azure/go-amqp v1.4.0/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo=
@@ -575,7 +574,6 @@ github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnx
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/cloudflare/circl v1.6.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk=
github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
@@ -1351,7 +1349,6 @@ github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusrec
github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.121.0/go.mod h1:3axnebi8xUm9ifbs1myzehw2nODtIMrQlL566sJ4bYw=
github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.124.1 h1:XkxqUEoukMWXF+EpEWeM9itXKt62yKi13Lzd8ZEASP4=
github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.124.1/go.mod h1:CuCZVPz+yn88b5vhZPAlxaMrVuhAVexUV6f8b07lpUc=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@@ -1911,6 +1908,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
go.opentelemetry.io/otel/exporters/prometheus v0.58.0/go.mod h1:7qo/4CLI+zYSNbv0GMNquzuss2FVZo3OYrGh96n4HNc=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/AiFD5aqoxGxCvT/ILJPeHy3MjqU/NS7KogY=
@@ -1954,12 +1952,10 @@ gocloud.dev/secrets/hashivault v0.42.0/go.mod h1:LXprr1XLEAT7BVZ+Y66dJEHQMzDsowI
golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.11.1-0.20230711161743-2e82bdd1719d/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
@@ -2067,7 +2063,6 @@ golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20250908211612-aef8a434d053/go.mod h1:+nZKN+XVh4LCiA9DV3ywrzN4gumyCnKjau3NGb9SGoE=
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
@@ -2092,6 +2087,7 @@ golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
@@ -2241,6 +2237,7 @@ gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzE
gopkg.in/vmihailenco/msgpack.v2 v2.9.2 h1:gjPqo9orRVlSAH/065qw3MsFCDpH7fa1KpiizXyllY4=
gopkg.in/vmihailenco/msgpack.v2 v2.9.2/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8=
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
honnef.co/go/tools v0.3.2 h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34=
honnef.co/go/tools v0.3.2/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw=
+1 -1
View File
@@ -462,7 +462,7 @@
"js-yaml@npm:4.1.0": "^4.1.0",
"js-yaml@npm:=4.1.0": "^4.1.0",
"nodemailer": "7.0.11",
"@storybook/core@npm:8.6.15": "patch:@storybook/core@npm%3A8.6.15#~/.yarn/patches/@storybook-core-npm-8.6.15-a468a35170.patch"
"@storybook/core@npm:8.6.2": "patch:@storybook/core@npm%3A8.6.2#~/.yarn/patches/@storybook-core-npm-8.6.2-8c752112c0.patch"
},
"workspaces": {
"packages": [
+1 -1
View File
@@ -96,7 +96,7 @@
"@faker-js/faker": "^9.8.0",
"@grafana/api-clients": "12.4.0-pre",
"@grafana/i18n": "12.4.0-pre",
"@reduxjs/toolkit": "2.10.1",
"@reduxjs/toolkit": "^2.9.0",
"fishery": "^2.3.1",
"lodash": "^4.17.21",
"tinycolor2": "^1.6.0"
+1 -1
View File
@@ -170,7 +170,7 @@
},
"peerDependencies": {
"@grafana/runtime": ">=11.6 <= 12.x",
"@reduxjs/toolkit": "^2.10.0",
"@reduxjs/toolkit": "^2.8.0",
"rxjs": "7.8.2"
}
}
@@ -1021,7 +1021,6 @@ const injectedRtkApi = api
typeFilter: queryArg.typeFilter,
excludeUid: queryArg.excludeUid,
folderFilter: queryArg.folderFilter,
folderFilterUIDs: queryArg.folderFilterUiDs,
perPage: queryArg.perPage,
page: queryArg.page,
},
@@ -2916,11 +2915,8 @@ export type GetLibraryElementsApiArg = {
typeFilter?: string;
/** Element UID to exclude from search results. */
excludeUid?: string;
/** A comma separated list of folder ID(s) to filter the elements by.
Deprecated: Use FolderFilterUIDs instead. */
/** A comma separated list of folder ID(s) to filter the elements by. */
folderFilter?: string;
/** A comma separated list of folder UID(s) to filter the elements by. */
folderFilterUiDs?: string;
/** The number of results per page. */
perPage?: number;
/** The page for a set of records, given that only perPage records are returned at a time. Numbering starts at 1. */
@@ -5316,8 +5312,7 @@ export type PatchPrefsCmd = {
queryHistory?: QueryHistoryPreference;
regionalFormat?: string;
theme?: 'light' | 'dark';
/** Any IANA timezone string (e.g. America/New_York), 'utc', 'browser', or empty string */
timezone?: string;
timezone?: 'utc' | 'browser';
weekStart?: string;
};
export type UpdatePrefsCmd = {
@@ -5330,8 +5325,7 @@ export type UpdatePrefsCmd = {
queryHistory?: QueryHistoryPreference;
regionalFormat?: string;
theme?: 'light' | 'dark' | 'system';
/** Any IANA timezone string (e.g. America/New_York), 'utc', 'browser', or empty string */
timezone?: string;
timezone?: 'utc' | 'browser';
weekStart?: string;
};
export type OrgUserDto = {
@@ -5560,7 +5554,6 @@ export type ReportDashboard = {
};
export type Type = string;
export type ReportOptions = {
csvEncoding?: string;
layout?: string;
orientation?: string;
pdfCombineOneFile?: boolean;
@@ -86,8 +86,7 @@ export type PatchPrefsCmd = {
queryHistory?: QueryHistoryPreference;
regionalFormat?: string;
theme?: 'light' | 'dark';
/** Any IANA timezone string (e.g. America/New_York), 'utc', 'browser', or empty string */
timezone?: string;
timezone?: 'utc' | 'browser';
weekStart?: string;
};
export type UpdatePrefsCmd = {
@@ -100,8 +99,7 @@ export type UpdatePrefsCmd = {
queryHistory?: QueryHistoryPreference;
regionalFormat?: string;
theme?: 'light' | 'dark' | 'system';
/** Any IANA timezone string (e.g. America/New_York), 'utc', 'browser', or empty string */
timezone?: string;
timezone?: 'utc' | 'browser';
weekStart?: string;
};
export const {
@@ -122,10 +122,6 @@ const injectedRtkApi = api
}),
invalidatesTags: ['Connection'],
}),
getConnectionRepositories: build.query<GetConnectionRepositoriesApiResponse, GetConnectionRepositoriesApiArg>({
query: (queryArg) => ({ url: `/connections/${queryArg.name}/repositories` }),
providesTags: ['Connection'],
}),
getConnectionStatus: build.query<GetConnectionStatusApiResponse, GetConnectionStatusApiArg>({
query: (queryArg) => ({
url: `/connections/${queryArg.name}/status`,
@@ -730,18 +726,6 @@ export type UpdateConnectionApiArg = {
force?: boolean;
patch: Patch;
};
export type GetConnectionRepositoriesApiResponse = /** status 200 OK */ {
/** APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources */
apiVersion?: string;
items: any[];
/** Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds */
kind?: string;
metadata?: any;
};
export type GetConnectionRepositoriesApiArg = {
/** name of the ExternalRepositoryList */
name: string;
};
export type GetConnectionStatusApiResponse = /** status 200 OK */ Connection;
export type GetConnectionStatusApiArg = {
/** name of the Connection */
@@ -2095,8 +2079,6 @@ export const {
useReplaceConnectionMutation,
useDeleteConnectionMutation,
useUpdateConnectionMutation,
useGetConnectionRepositoriesQuery,
useLazyGetConnectionRepositoriesQuery,
useGetConnectionStatusQuery,
useLazyGetConnectionStatusQuery,
useReplaceConnectionStatusMutation,
+4 -4
View File
@@ -207,10 +207,6 @@ export interface FeatureToggles {
*/
reportingRetries?: boolean;
/**
* Enables CSV encoding options in the reporting feature
*/
reportingCsvEncodingOptions?: boolean;
/**
* Send query to the same datasource in a single request when using server side expressions. The `cloudWatchBatchQueries` feature toggle should be enabled if this used with CloudWatch.
*/
sseGroupByDatasource?: boolean;
@@ -782,6 +778,10 @@ export interface FeatureToggles {
*/
elasticsearchCrossClusterSearch?: boolean;
/**
* Displays the navigation history so the user can navigate back to previous pages
*/
unifiedHistory?: boolean;
/**
* Defaults to using the Loki `/labels` API instead of `/series`
* @default true
*/
+14 -14
View File
@@ -137,23 +137,23 @@
"@babel/core": "7.28.0",
"@faker-js/faker": "^9.0.0",
"@rollup/plugin-node-resolve": "16.0.1",
"@storybook/addon-a11y": "^8.6.15",
"@storybook/addon-actions": "^8.6.15",
"@storybook/addon-docs": "^8.6.15",
"@storybook/addon-essentials": "^8.6.15",
"@storybook/addon-storysource": "^8.6.15",
"@storybook/addon-a11y": "^8.6.2",
"@storybook/addon-actions": "^8.6.2",
"@storybook/addon-docs": "^8.6.2",
"@storybook/addon-essentials": "^8.6.2",
"@storybook/addon-storysource": "^8.6.2",
"@storybook/addon-webpack5-compiler-swc": "^2.1.0",
"@storybook/blocks": "^8.6.15",
"@storybook/components": "^8.6.15",
"@storybook/core-events": "^8.6.15",
"@storybook/manager-api": "^8.6.15",
"@storybook/blocks": "^8.6.2",
"@storybook/components": "^8.6.2",
"@storybook/core-events": "^8.6.2",
"@storybook/manager-api": "^8.6.2",
"@storybook/mdx2-csf": "1.1.0",
"@storybook/preset-scss": "1.0.3",
"@storybook/preview-api": "^8.6.15",
"@storybook/react": "^8.6.15",
"@storybook/react-webpack5": "^8.6.15",
"@storybook/preview-api": "^8.6.2",
"@storybook/react": "^8.6.2",
"@storybook/react-webpack5": "^8.6.2",
"@storybook/test-runner": "^0.23.0",
"@storybook/theming": "^8.6.15",
"@storybook/theming": "^8.6.2",
"@testing-library/dom": "10.4.1",
"@testing-library/jest-dom": "6.6.4",
"@testing-library/react": "16.3.0",
@@ -200,7 +200,7 @@
"rollup-plugin-node-externals": "^8.0.0",
"rollup-plugin-svg-import": "3.0.0",
"sass-loader": "16.0.5",
"storybook": "^8.6.15",
"storybook": "^8.6.2",
"style-loader": "4.0.0",
"typescript": "5.9.2",
"webpack": "5.101.0"
@@ -54,7 +54,6 @@ export const TagsInput = forwardRef<HTMLInputElement, Props>(
const [newTagName, setNewTagName] = useState('');
const styles = useStyles2(getStyles);
const theme = useTheme2();
const isTagTooLong = newTagName.length > 50;
const onNameChange = useCallback((event: React.ChangeEvent<HTMLInputElement>) => {
setNewTagName(event.target.value);
@@ -66,9 +65,6 @@ export const TagsInput = forwardRef<HTMLInputElement, Props>(
const onAdd = (event?: React.MouseEvent | React.KeyboardEvent) => {
event?.preventDefault();
if (newTagName.length > 50) {
return;
}
if (!tags.includes(newTagName)) {
onChange(tags.concat(newTagName));
}
@@ -98,17 +94,14 @@ export const TagsInput = forwardRef<HTMLInputElement, Props>(
value={newTagName}
onKeyDown={onKeyboardAdd}
onBlur={onBlur}
invalid={invalid || isTagTooLong}
invalid={invalid}
suffix={
<Button
fill="text"
className={styles.addButtonStyle}
onClick={onAdd}
size="md"
disabled={newTagName.length <= 0 || isTagTooLong}
title={
isTagTooLong ? t('grafana-ui.tags-input.tag-too-long', 'Tag too long, max 50 characters') : undefined
}
disabled={newTagName.length <= 0}
>
<Trans i18nKey="grafana-ui.tags-input.add">Add</Trans>
</Button>
+2 -2
View File
@@ -13,7 +13,7 @@ type UpdatePrefsCmd struct {
// Deprecated: Use HomeDashboardUID instead
HomeDashboardID int64 `json:"homeDashboardId"`
HomeDashboardUID *string `json:"homeDashboardUID,omitempty"`
// Any IANA timezone string (e.g. America/New_York), 'utc', 'browser', or empty string
// Enum: utc,browser
Timezone string `json:"timezone"`
WeekStart string `json:"weekStart"`
QueryHistory *pref.QueryHistoryPreference `json:"queryHistory,omitempty"`
@@ -31,7 +31,7 @@ type PatchPrefsCmd struct {
// Default:0
// Deprecated: Use HomeDashboardUID instead
HomeDashboardID *int64 `json:"homeDashboardId,omitempty"`
// Any IANA timezone string (e.g. America/New_York), 'utc', 'browser', or empty string
// Enum: utc,browser
Timezone *string `json:"timezone,omitempty"`
WeekStart *string `json:"weekStart,omitempty"`
Language *string `json:"language,omitempty"`
-4
View File
@@ -134,10 +134,6 @@ func (hs *HTTPServer) patchPreferencesFor(ctx context.Context, orgID, userID, te
return response.Error(http.StatusBadRequest, "Invalid theme", nil)
}
if dtoCmd.Timezone != nil && !pref.IsValidTimezone(*dtoCmd.Timezone) {
return response.Error(http.StatusBadRequest, "Invalid timezone. Must be a valid IANA timezone (e.g., America/New_York), 'utc', 'browser', or empty string", nil)
}
// convert dashboard UID to ID in order to store internally if it exists in the query, otherwise take the id from query
// nolint:staticcheck
dashboardID := dtoCmd.HomeDashboardID
+2 -34
View File
@@ -1,58 +1,26 @@
package generic
import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/generic/registry"
"k8s.io/apiserver/pkg/storage"
"github.com/grafana/grafana/pkg/apimachinery/utils"
)
// SelectableFieldsOptions allows customizing field selector behavior for a resource.
type SelectableFieldsOptions struct {
// GetAttrs returns labels and fields for the object.
// If nil, the default GetAttrs is used which only exposes metadata.name.
GetAttrs func(obj runtime.Object) (labels.Set, fields.Set, error)
}
func NewRegistryStore(scheme *runtime.Scheme, resourceInfo utils.ResourceInfo, optsGetter generic.RESTOptionsGetter) (*registry.Store, error) {
return NewRegistryStoreWithSelectableFields(scheme, resourceInfo, optsGetter, SelectableFieldsOptions{})
}
// NewRegistryStoreWithSelectableFields creates a registry store with custom selectable fields support.
// Use this when you need to filter resources by custom fields like spec.connection.name.
func NewRegistryStoreWithSelectableFields(scheme *runtime.Scheme, resourceInfo utils.ResourceInfo, optsGetter generic.RESTOptionsGetter, fieldOpts SelectableFieldsOptions) (*registry.Store, error) {
gv := resourceInfo.GroupVersion()
gv.Version = runtime.APIVersionInternal
strategy := NewStrategy(scheme, gv)
if resourceInfo.IsClusterScoped() {
strategy = strategy.WithClusterScope()
}
// Use custom GetAttrs if provided, otherwise use default
attrFunc := GetAttrs
predicateFunc := Matcher
if fieldOpts.GetAttrs != nil {
attrFunc = fieldOpts.GetAttrs
// Create a matcher that uses the custom GetAttrs
predicateFunc = func(label labels.Selector, field fields.Selector) storage.SelectionPredicate {
return storage.SelectionPredicate{
Label: label,
Field: field,
GetAttrs: attrFunc,
}
}
}
store := &registry.Store{
NewFunc: resourceInfo.NewFunc,
NewListFunc: resourceInfo.NewListFunc,
KeyRootFunc: KeyRootFunc(resourceInfo.GroupResource()),
KeyFunc: NamespaceKeyFunc(resourceInfo.GroupResource()),
PredicateFunc: predicateFunc,
PredicateFunc: Matcher,
DefaultQualifiedResource: resourceInfo.GroupResource(),
SingularQualifiedResource: resourceInfo.SingularGroupResource(),
TableConvertor: resourceInfo.TableConverter(),
@@ -60,7 +28,7 @@ func NewRegistryStoreWithSelectableFields(scheme *runtime.Scheme, resourceInfo u
UpdateStrategy: strategy,
DeleteStrategy: strategy,
}
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: attrFunc}
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: GetAttrs}
if err := store.CompleteWithOptions(options); err != nil {
return nil, err
}
+12
View File
@@ -36,6 +36,7 @@ import (
type provisioningControllerConfig struct {
provisioningClient *client.Clientset
resyncInterval time.Duration
repoFactory repository.Factory
unified resources.ResourceStore
clients resources.ClientFactory
tokenExchangeClient *authn.TokenExchangeClient
@@ -128,6 +129,16 @@ func setupFromConfig(cfg *setting.Cfg, registry prometheus.Registerer) (controll
return nil, fmt.Errorf("failed to create provisioning client: %w", err)
}
decrypter, err := setupDecrypter(cfg, tracer, tokenExchangeClient)
if err != nil {
return nil, fmt.Errorf("failed to setup decrypter: %w", err)
}
repoFactory, err := setupRepoFactory(cfg, decrypter, provisioningClient, registry)
if err != nil {
return nil, fmt.Errorf("failed to setup repository getter: %w", err)
}
// HACK: This logic directly connects to unified storage. We are doing this for now as there is no global
// search endpoint. But controllers, in general, should not connect directly to unified storage and instead
// go through the api server. Once there is a global search endpoint, we will switch to that here as well.
@@ -184,6 +195,7 @@ func setupFromConfig(cfg *setting.Cfg, registry prometheus.Registerer) (controll
return &provisioningControllerConfig{
provisioningClient: provisioningClient,
repoFactory: repoFactory,
unified: unified,
clients: clients,
resyncInterval: operatorSec.Key("resync_interval").MustDuration(60 * time.Second),
@@ -1,86 +0,0 @@
package provisioning
import (
"context"
"fmt"
"log/slog"
"os"
"os/signal"
"syscall"
"github.com/grafana/grafana-app-sdk/logging"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/client-go/tools/cache"
appcontroller "github.com/grafana/grafana/apps/provisioning/pkg/controller"
informer "github.com/grafana/grafana/apps/provisioning/pkg/generated/informers/externalversions"
"github.com/grafana/grafana/pkg/registry/apis/provisioning/controller"
"github.com/grafana/grafana/pkg/server"
"github.com/grafana/grafana/pkg/setting"
)
// RunConnectionController starts the connection controller operator.
func RunConnectionController(deps server.OperatorDependencies) error {
logger := logging.NewSLogLogger(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
Level: slog.LevelDebug,
})).With("logger", "provisioning-connection-controller")
logger.Info("Starting provisioning connection controller")
controllerCfg, err := getConnectionControllerConfig(deps.Config, deps.Registerer)
if err != nil {
return fmt.Errorf("failed to setup operator: %w", err)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigChan
fmt.Println("Received shutdown signal, stopping controllers")
cancel()
}()
informerFactory := informer.NewSharedInformerFactoryWithOptions(
controllerCfg.provisioningClient,
controllerCfg.resyncInterval,
)
statusPatcher := appcontroller.NewConnectionStatusPatcher(controllerCfg.provisioningClient.ProvisioningV0alpha1())
connInformer := informerFactory.Provisioning().V0alpha1().Connections()
connController, err := controller.NewConnectionController(
controllerCfg.provisioningClient.ProvisioningV0alpha1(),
connInformer,
statusPatcher,
)
if err != nil {
return fmt.Errorf("failed to create connection controller: %w", err)
}
informerFactory.Start(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), connInformer.Informer().HasSynced) {
return fmt.Errorf("failed to sync informer cache")
}
connController.Run(ctx, controllerCfg.workerCount)
return nil
}
type connectionControllerConfig struct {
provisioningControllerConfig
workerCount int
}
func getConnectionControllerConfig(cfg *setting.Cfg, registry prometheus.Registerer) (*connectionControllerConfig, error) {
controllerCfg, err := setupFromConfig(cfg, registry)
if err != nil {
return nil, err
}
return &connectionControllerConfig{
provisioningControllerConfig: *controllerCfg,
workerCount: cfg.SectionWithEnvOverrides("operator").Key("worker_count").MustInt(1),
}, nil
}
@@ -106,7 +106,6 @@ func RunRepoController(deps server.OperatorDependencies) error {
type repoControllerConfig struct {
provisioningControllerConfig
repoFactory repository.Factory
workerCount int
parallelOperations int
allowedTargets []string
@@ -120,17 +119,6 @@ func getRepoControllerConfig(cfg *setting.Cfg, registry prometheus.Registerer) (
return nil, err
}
// Setup repository factory for repo controller
decrypter, err := setupDecrypter(cfg, tracing.NewNoopTracerService(), controllerCfg.tokenExchangeClient)
if err != nil {
return nil, fmt.Errorf("failed to setup decrypter: %w", err)
}
repoFactory, err := setupRepoFactory(cfg, decrypter, controllerCfg.provisioningClient, registry)
if err != nil {
return nil, fmt.Errorf("failed to setup repository factory: %w", err)
}
allowedTargets := []string{}
cfg.SectionWithEnvOverrides("provisioning").Key("allowed_targets").Strings("|")
if len(allowedTargets) == 0 {
@@ -139,7 +127,6 @@ func getRepoControllerConfig(cfg *setting.Cfg, registry prometheus.Registerer) (
return &repoControllerConfig{
provisioningControllerConfig: *controllerCfg,
repoFactory: repoFactory,
allowedTargets: allowedTargets,
workerCount: cfg.SectionWithEnvOverrides("operator").Key("worker_count").MustInt(1),
parallelOperations: cfg.SectionWithEnvOverrides("operator").Key("parallel_operations").MustInt(10),
-6
View File
@@ -13,12 +13,6 @@ func init() {
RunFunc: provisioning.RunRepoController,
})
server.RegisterOperator(server.Operator{
Name: "provisioning-connection",
Description: "Watch provisioning connections",
RunFunc: provisioning.RunConnectionController,
})
server.RegisterOperator(server.Operator{
Name: "iam-folder-reconciler",
Description: "Reconcile folder resources into Zanzana",
-130
View File
@@ -1,130 +0,0 @@
module github.com/grafana/grafana/pkg/plugins
go 1.25.5
require (
github.com/Machiel/slugify v1.0.1
github.com/ProtonMail/go-crypto v1.3.0
github.com/gobwas/glob v0.2.3
github.com/google/go-cmp v0.7.0
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4
github.com/grafana/grafana-plugin-sdk-go v0.284.0
github.com/grafana/grafana/pkg/apimachinery v0.0.0
github.com/grafana/grafana/pkg/semconv v0.0.0
github.com/hashicorp/go-hclog v1.6.3
github.com/hashicorp/go-plugin v1.7.0
github.com/hashicorp/go-secure-stdlib/plugincontainer v0.4.2
github.com/stretchr/testify v1.11.1
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0
go.opentelemetry.io/otel v1.39.0
go.opentelemetry.io/otel/trace v1.39.0
google.golang.org/grpc v1.77.0
google.golang.org/protobuf v1.36.11
)
require (
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/apache/arrow-go/v18 v18.4.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cheekybits/genny v1.0.0 // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/docker v28.5.2+incompatible // indirect
github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/gogo/googleapis v1.4.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/flatbuffers v25.2.10+incompatible // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f // indirect
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 // indirect
github.com/grafana/otel-profiling-go v0.5.1 // indirect
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect
github.com/hashicorp/yamux v0.1.2 // indirect
github.com/jaegertracing/jaeger-idl v0.5.0 // indirect
github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/mattetti/filebuffer v1.0.1 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/sequential v0.6.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/oklog/run v1.1.0 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.67.4 // indirect
github.com/prometheus/procfs v0.19.2 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect
go.opentelemetry.io/contrib/propagators/jaeger v1.38.0 // indirect
go.opentelemetry.io/contrib/samplers/jaegerremote v0.32.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 // indirect
go.opentelemetry.io/otel/metric v1.39.0 // indirect
go.opentelemetry.io/otel/sdk v1.39.0 // indirect
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
golang.org/x/crypto v0.46.0 // indirect
golang.org/x/exp v0.0.0-20251209150349-8475f28825e9 // indirect
golang.org/x/mod v0.31.0 // indirect
golang.org/x/net v0.48.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.40.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20251213004720-97cd9d5aeac2 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gotest.tools/v3 v3.5.2 // indirect
k8s.io/apimachinery v0.34.3 // indirect
k8s.io/apiserver v0.34.3 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.1 // indirect
)
replace (
github.com/grafana/grafana/pkg/apimachinery => ../apimachinery
github.com/grafana/grafana/pkg/semconv => ../semconv
)
-347
View File
@@ -1,347 +0,0 @@
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Machiel/slugify v1.0.1 h1:EfWSlRWstMadsgzmiV7d0yVd2IFlagWH68Q+DcYCm4E=
github.com/Machiel/slugify v1.0.1/go.mod h1:fTFGn5uWEynW4CUMG7sWkYXOf1UgDxyTM3DbR6Qfg3k=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
github.com/apache/arrow-go/v18 v18.4.1 h1:q/jVkBWCJOB9reDgaIZIdruLQUb1kbkvOnOFezVH1C4=
github.com/apache/arrow-go/v18 v18.4.1/go.mod h1:tLyFubsAl17bvFdUAy24bsSvA/6ww95Iqi67fTpGu3E=
github.com/apache/thrift v0.22.0 h1:r7mTJdj51TMDe6RtcmNdQxgn9XcyfGDOzegMDRg47uc=
github.com/apache/thrift v0.22.0/go.mod h1:1e7J/O1Ae6ZQMTYdy9xa3w9k+XHWPfRvdPyJeynQ+/g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw=
github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f h1:Cbm6OKkOcJ+7CSZsGsEJzktC/SIa5bxVeYKQLuYK86o=
github.com/grafana/authlib v0.0.0-20250930082137-a40e2c2b094f/go.mod h1:axY0cdOg3q0TZHwpHnIz5x16xZ8ZBxJHShsSHHXcHQg=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4 h1:Muoy+FMGrHj3GdFbvsMzUT7eusgii9PKf9L1ZaXDDbY=
github.com/grafana/authlib/types v0.0.0-20251119142549-be091cf2f4d4/go.mod h1:qeWYbnWzaYGl88JlL9+DsP1GT2Cudm58rLtx13fKZdw=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4 h1:jSojuc7njleS3UOz223WDlXOinmuLAIPI0z2vtq8EgI=
github.com/grafana/dskit v0.0.0-20250908063411-6b6da59b5cc4/go.mod h1:VahT+GtfQIM+o8ht2StR6J9g+Ef+C2Vokh5uuSmOD/4=
github.com/grafana/grafana-plugin-sdk-go v0.284.0 h1:1bK7eWsnPBLUWDcWJWe218Ik5ad0a5JpEL4mH9ry7Ws=
github.com/grafana/grafana-plugin-sdk-go v0.284.0/go.mod h1:lHPniaSxq3SL5MxDIPy04TYB1jnTp/ivkYO+xn5Rz3E=
github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8=
github.com/grafana/otel-profiling-go v0.5.1/go.mod h1:ftN/t5A/4gQI19/8MoWurBEtC6gFw8Dns1sJZ9W4Tls=
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og=
github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 h1:QGLs/O40yoNK9vmy4rhUGBVyMf1lISBGtXRpsu/Qu/o=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0/go.mod h1:hM2alZsMUni80N33RBe6J0e423LB+odMj7d3EMP9l20=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 h1:B+8ClL/kCQkRiU82d9xajRPKYMrB7E0MbtzWVi1K4ns=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3/go.mod h1:NbCUVmiS4foBGBHOYlCT25+YmGpJ32dZPi75pGEUpj4=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-plugin v1.7.0 h1:YghfQH/0QmPNc/AZMTFE3ac8fipZyZECHdDPshfk+mA=
github.com/hashicorp/go-plugin v1.7.0/go.mod h1:BExt6KEaIYx804z8k4gRzRLEvxKVb+kn0NMcihqOqb8=
github.com/hashicorp/go-secure-stdlib/plugincontainer v0.4.2 h1:gCNiM4T5xEc4IpT8vM50CIO+AtElr5kO9l2Rxbq+Sz8=
github.com/hashicorp/go-secure-stdlib/plugincontainer v0.4.2/go.mod h1:6ZM4ZdwClyAsiU2uDBmRHCvq0If/03BMbF9U+U7G5pA=
github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8=
github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns=
github.com/jaegertracing/jaeger-idl v0.5.0 h1:zFXR5NL3Utu7MhPg8ZorxtCBjHrL3ReM1VoB65FOFGE=
github.com/jaegertracing/jaeger-idl v0.5.0/go.mod h1:ON90zFo9eoyXrt9F/KN8YeF3zxcnujaisMweFY/rg5k=
github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94=
github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8=
github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531 h1:hgVxRoDDPtQE68PT4LFvNlPz2nBKd3OMlGKIQ69OmR4=
github.com/joshlf/go-acl v0.0.0-20200411065538-eae00ae38531/go.mod h1:fqTUQpVYBvhCNIsMXGl2GE9q6z94DIP6NtFKXCSTVbg=
github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d h1:J8tJzRyiddAFF65YVgxli+TyWBi0f79Sld6rJP6CBcY=
github.com/joshlf/testutil v0.0.0-20170608050642-b5d8aa79d93d/go.mod h1:b+Q3v8Yrg5o15d71PSUraUzYb+jWl6wQMSBXSGS/hv0=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4=
github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mattetti/filebuffer v1.0.1 h1:gG7pyfnSIZCxdoKq+cPa8T0hhYtD9NxCdI4D7PTjRLM=
github.com/mattetti/filebuffer v1.0.1/go.mod h1:YdMURNDOttIiruleeVr6f56OrMc+MydEnTcXwtkxNVs=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 h1:RN3ifU8y4prNWeEnQp2kRRHz8UwonAEYZl8tUzHEXAk=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0/go.mod h1:habDz3tEWiFANTo6oUE99EmaFUrCNYAAg3wiVmusm70=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 h1:2pn7OzMewmYRiNtv1doZnLo3gONcnMHlFnmOR8Vgt+8=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0/go.mod h1:rjbQTDEPQymPE0YnRQp9/NuPwwtL0sesz/fnqRW/v84=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
go.opentelemetry.io/contrib/propagators/jaeger v1.38.0 h1:nXGeLvT1QtCAhkASkP/ksjkTKZALIaQBIW+JSIw1KIc=
go.opentelemetry.io/contrib/propagators/jaeger v1.38.0/go.mod h1:oMvOXk78ZR3KEuPMBgp/ThAMDy9ku/eyUVztr+3G6Wo=
go.opentelemetry.io/contrib/samplers/jaegerremote v0.32.0 h1:oPW/SRFyHgIgxrvNhSBzqvZER2N5kRlci3/rGTOuyWo=
go.opentelemetry.io/contrib/samplers/jaegerremote v0.32.0/go.mod h1:B9Oka5QVD0bnmZNO6gBbBta6nohD/1Z+f9waH2oXyBs=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 h1:in9O8ESIOlwJAEGTkkf34DesGRAc/Pn8qJ7k3r/42LM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0/go.mod h1:Rp0EXBm5tfnv0WL+ARyO/PHBEaEAT8UUHQ6AGJcSq6c=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 h1:Ckwye2FpXkYgiHX7fyVrN1uA/UYd9ounqqTuSNAv0k4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0/go.mod h1:teIFJh5pW2y+AN7riv6IBPX2DuesS3HgP39mwOspKwU=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18=
go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE=
go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8=
go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/exp v0.0.0-20251209150349-8475f28825e9 h1:MDfG8Cvcqlt9XXrmEiD4epKn7VJHZO84hejP9Jmp0MM=
golang.org/x/exp v0.0.0-20251209150349-8475f28825e9/go.mod h1:EPRbTFwzwjXj9NpYyyrvenVh9Y+GFeEvMNh7Xuz7xgU=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA=
golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/genproto/googleapis/api v0.0.0-20251213004720-97cd9d5aeac2 h1:7LRqPCEdE4TP4/9psdaB7F2nhZFfBiGJomA5sojLWdU=
google.golang.org/genproto/googleapis/api v0.0.0-20251213004720-97cd9d5aeac2/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2 h1:2I6GHUeJ/4shcDpoUlLs/2WPnhg7yJwvXtqcMJt9liA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251213004720-97cd9d5aeac2/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE=
k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/apiserver v0.34.3 h1:uGH1qpDvSiYG4HVFqc6A3L4CKiX+aBWDrrsxHYK0Bdo=
k8s.io/apiserver v0.34.3/go.mod h1:QPnnahMO5C2m3lm6fPW3+JmyQbvHZQ8uudAu/493P2w=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E=
sigs.k8s.io/structured-merge-diff/v6 v6.3.1/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
@@ -224,7 +224,7 @@ func (a *dashboardSqlAccess) CountResources(ctx context.Context, opts MigrateOpt
case "folder.grafana.app/folders":
summary := &resourcepb.BulkResponse_Summary{}
summary.Group = folders.GROUP
summary.Resource = folders.RESOURCE
summary.Group = folders.RESOURCE
_, err = sess.SQL("SELECT COUNT(*) FROM "+sql.Table("dashboard")+
" WHERE is_folder=TRUE AND org_id=?", orgId).Get(&summary.Count)
rsp.Summary = append(rsp.Summary, summary)
-36
View File
@@ -389,11 +389,6 @@ func (b *DashboardsAPIBuilder) validateCreate(ctx context.Context, a admission.A
return apierrors.NewBadRequest(err.Error())
}
// Validate tags
if err := validateDashboardTags(dashObj); err != nil {
return apierrors.NewBadRequest(err.Error())
}
id, err := identity.GetRequester(ctx)
if err != nil {
return fmt.Errorf("error getting requester: %w", err)
@@ -464,11 +459,6 @@ func (b *DashboardsAPIBuilder) validateUpdate(ctx context.Context, a admission.A
return apierrors.NewBadRequest(err.Error())
}
// Validate tags
if err := validateDashboardTags(newDashObj); err != nil {
return apierrors.NewBadRequest(err.Error())
}
// Validate folder existence if specified and changed
if !a.IsDryRun() && newAccessor.GetFolder() != oldAccessor.GetFolder() && newAccessor.GetFolder() != "" {
id, err := identity.GetRequester(ctx)
@@ -566,32 +556,6 @@ func getDashboardProperties(obj runtime.Object) (string, string, error) {
return title, refresh, nil
}
// validateDashboardTags validates that all dashboard tags are within the maximum length
func validateDashboardTags(obj runtime.Object) error {
var tags []string
switch d := obj.(type) {
case *dashv0.Dashboard:
tags = d.Spec.GetNestedStringSlice("tags")
case *dashv1.Dashboard:
tags = d.Spec.GetNestedStringSlice("tags")
case *dashv2alpha1.Dashboard:
tags = d.Spec.Tags
case *dashv2beta1.Dashboard:
tags = d.Spec.Tags
default:
return fmt.Errorf("unsupported dashboard version: %T", obj)
}
for _, tag := range tags {
if len(tag) > 50 {
return dashboards.ErrDashboardTagTooLong
}
}
return nil
}
func (b *DashboardsAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupInfo, opts builder.APIGroupOptions) error {
storageOpts := apistore.StorageOptions{
EnableFolderSupport: true,
+1 -1
View File
@@ -53,7 +53,7 @@ func newIAMAuthorizer(
resourceAuthorizer[iamv0.RoleBindingInfo.GetName()] = authorizer
resourceAuthorizer[iamv0.ServiceAccountResourceInfo.GetName()] = authorizer
resourceAuthorizer[iamv0.UserResourceInfo.GetName()] = authorizer
resourceAuthorizer[iamv0.ExternalGroupMappingResourceInfo.GetName()] = allowAuthorizer
resourceAuthorizer[iamv0.ExternalGroupMappingResourceInfo.GetName()] = authorizer
resourceAuthorizer[iamv0.TeamResourceInfo.GetName()] = authorizer
resourceAuthorizer["searchUsers"] = serviceAuthorizer
resourceAuthorizer["searchTeams"] = serviceAuthorizer
@@ -1,150 +0,0 @@
package authorizer
import (
"context"
"fmt"
"github.com/grafana/authlib/types"
"k8s.io/apimachinery/pkg/runtime"
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/services/apiserver/auth/authorizer/storewrapper"
apierrors "k8s.io/apimachinery/pkg/api/errors"
)
type ExternalGroupMappingAuthorizer struct {
accessClient types.AccessClient
}
var _ storewrapper.ResourceStorageAuthorizer = (*ExternalGroupMappingAuthorizer)(nil)
func NewExternalGroupMappingAuthorizer(
accessClient types.AccessClient,
) *ExternalGroupMappingAuthorizer {
return &ExternalGroupMappingAuthorizer{
accessClient: accessClient,
}
}
// AfterGet implements ResourceStorageAuthorizer.
func (r *ExternalGroupMappingAuthorizer) AfterGet(ctx context.Context, obj runtime.Object) error {
authInfo, ok := types.AuthInfoFrom(ctx)
if !ok {
return storewrapper.ErrUnauthenticated
}
concreteObj, ok := obj.(*iamv0.ExternalGroupMapping)
if !ok {
return apierrors.NewInternalError(fmt.Errorf("expected ExternalGroupMapping, got %T: %w", obj, storewrapper.ErrUnexpectedType))
}
teamName := concreteObj.Spec.TeamRef.Name
checkReq := types.CheckRequest{
Namespace: authInfo.GetNamespace(),
Group: iamv0.GROUP,
Resource: iamv0.TeamResourceInfo.GetName(),
Verb: utils.VerbGetPermissions,
Name: teamName,
}
res, err := r.accessClient.Check(ctx, authInfo, checkReq, "")
if err != nil {
return apierrors.NewInternalError(err)
}
if !res.Allowed {
return apierrors.NewForbidden(
iamv0.ExternalGroupMappingResourceInfo.GroupResource(),
concreteObj.Name,
fmt.Errorf("user cannot access team %s", teamName),
)
}
return nil
}
// BeforeCreate implements ResourceStorageAuthorizer.
func (r *ExternalGroupMappingAuthorizer) BeforeCreate(ctx context.Context, obj runtime.Object) error {
return r.beforeWrite(ctx, obj)
}
// BeforeDelete implements ResourceStorageAuthorizer.
func (r *ExternalGroupMappingAuthorizer) BeforeDelete(ctx context.Context, obj runtime.Object) error {
return r.beforeWrite(ctx, obj)
}
// BeforeUpdate implements ResourceStorageAuthorizer.
func (r *ExternalGroupMappingAuthorizer) BeforeUpdate(ctx context.Context, obj runtime.Object) error {
// Update is not supported for ExternalGroupMapping resources and update attempts are blocked at a lower level,
// so this is just a safeguard.
return apierrors.NewMethodNotSupported(iamv0.ExternalGroupMappingResourceInfo.GroupResource(), "PUT/PATCH")
}
func (r *ExternalGroupMappingAuthorizer) beforeWrite(ctx context.Context, obj runtime.Object) error {
authInfo, ok := types.AuthInfoFrom(ctx)
if !ok {
return storewrapper.ErrUnauthenticated
}
concreteObj, ok := obj.(*iamv0.ExternalGroupMapping)
if !ok {
return apierrors.NewInternalError(fmt.Errorf("expected ExternalGroupMapping, got %T: %w", obj, storewrapper.ErrUnexpectedType))
}
teamName := concreteObj.Spec.TeamRef.Name
checkReq := types.CheckRequest{
Namespace: authInfo.GetNamespace(),
Group: iamv0.GROUP,
Resource: iamv0.TeamResourceInfo.GetName(),
Verb: utils.VerbSetPermissions,
Name: teamName,
}
res, err := r.accessClient.Check(ctx, authInfo, checkReq, "")
if err != nil {
return apierrors.NewInternalError(err)
}
if !res.Allowed {
return apierrors.NewForbidden(
iamv0.ExternalGroupMappingResourceInfo.GroupResource(),
concreteObj.Name,
fmt.Errorf("user cannot write team %s", teamName),
)
}
return nil
}
// FilterList implements ResourceStorageAuthorizer.
func (r *ExternalGroupMappingAuthorizer) FilterList(ctx context.Context, list runtime.Object) (runtime.Object, error) {
authInfo, ok := types.AuthInfoFrom(ctx)
if !ok {
return nil, storewrapper.ErrUnauthenticated
}
l, ok := list.(*iamv0.ExternalGroupMappingList)
if !ok {
return nil, apierrors.NewInternalError(fmt.Errorf("expected ExternalGroupMappingList, got %T: %w", list, storewrapper.ErrUnexpectedType))
}
var filteredItems []iamv0.ExternalGroupMapping
listReq := types.ListRequest{
Namespace: authInfo.GetNamespace(),
Group: iamv0.GROUP,
Resource: iamv0.TeamResourceInfo.GetName(),
Verb: utils.VerbGetPermissions,
}
canView, _, err := r.accessClient.Compile(ctx, authInfo, listReq)
if err != nil {
return nil, apierrors.NewInternalError(err)
}
for _, item := range l.Items {
if canView(item.Spec.TeamRef.Name, "") {
filteredItems = append(filteredItems, item)
}
}
l.Items = filteredItems
return l, nil
}
@@ -1,229 +0,0 @@
package authorizer
import (
"context"
"testing"
"github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/grafana/authlib/types"
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/utils"
)
func newExternalGroupMapping(teamName, name string) *iamv0.ExternalGroupMapping {
return &iamv0.ExternalGroupMapping{
ObjectMeta: metav1.ObjectMeta{Namespace: "org-2", Name: name},
Spec: iamv0.ExternalGroupMappingSpec{
TeamRef: iamv0.ExternalGroupMappingTeamRef{
Name: teamName,
},
},
}
}
func TestExternalGroupMapping_AfterGet(t *testing.T) {
mapping := newExternalGroupMapping("team-1", "mapping-1")
tests := []struct {
name string
shouldAllow bool
}{
{
name: "allow access",
shouldAllow: true,
},
{
name: "deny access",
shouldAllow: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
checkFunc := func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error) {
require.NotNil(t, id)
require.Equal(t, "user:u001", id.GetUID())
require.Equal(t, "org-2", id.GetNamespace())
require.Equal(t, "org-2", req.Namespace)
require.Equal(t, iamv0.GROUP, req.Group)
require.Equal(t, iamv0.TeamResourceInfo.GetName(), req.Resource)
require.Equal(t, "team-1", req.Name)
require.Equal(t, utils.VerbGetPermissions, req.Verb)
require.Equal(t, "", folder)
return types.CheckResponse{Allowed: tt.shouldAllow}, nil
}
accessClient := &fakeAccessClient{checkFunc: checkFunc}
authz := NewExternalGroupMappingAuthorizer(accessClient)
ctx := types.WithAuthInfo(context.Background(), user)
err := authz.AfterGet(ctx, mapping)
if tt.shouldAllow {
require.NoError(t, err)
} else {
require.Error(t, err)
}
require.True(t, accessClient.checkCalled)
})
}
}
func TestExternalGroupMapping_FilterList(t *testing.T) {
list := &iamv0.ExternalGroupMappingList{
Items: []iamv0.ExternalGroupMapping{
*newExternalGroupMapping("team-1", "mapping-1"),
*newExternalGroupMapping("team-2", "mapping-2"),
},
ListMeta: metav1.ListMeta{
SelfLink: "/apis/iam.grafana.app/v0alpha1/namespaces/org-2/externalgroupmappings",
},
}
compileFunc := func(id types.AuthInfo, req types.ListRequest) (types.ItemChecker, types.Zookie, error) {
require.NotNil(t, id)
require.Equal(t, "user:u001", id.GetUID())
require.Equal(t, "org-2", id.GetNamespace())
require.Equal(t, "org-2", req.Namespace)
require.Equal(t, iamv0.GROUP, req.Group)
require.Equal(t, iamv0.TeamResourceInfo.GetName(), req.Resource)
require.Equal(t, utils.VerbGetPermissions, req.Verb)
return func(name, folder string) bool {
return name == "team-1"
}, &types.NoopZookie{}, nil
}
accessClient := &fakeAccessClient{compileFunc: compileFunc}
authz := NewExternalGroupMappingAuthorizer(accessClient)
ctx := types.WithAuthInfo(context.Background(), user)
obj, err := authz.FilterList(ctx, list)
require.NoError(t, err)
require.NotNil(t, list)
require.True(t, accessClient.compileCalled)
filtered, ok := obj.(*iamv0.ExternalGroupMappingList)
require.True(t, ok)
require.Len(t, filtered.Items, 1)
require.Equal(t, "mapping-1", filtered.Items[0].Name)
}
func TestExternalGroupMapping_BeforeCreate(t *testing.T) {
mapping := newExternalGroupMapping("team-1", "mapping-1")
tests := []struct {
name string
shouldAllow bool
}{
{
name: "allow create",
shouldAllow: true,
},
{
name: "deny create",
shouldAllow: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
checkFunc := func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error) {
require.NotNil(t, id)
require.Equal(t, "user:u001", id.GetUID())
require.Equal(t, "org-2", id.GetNamespace())
require.Equal(t, "org-2", req.Namespace)
require.Equal(t, iamv0.GROUP, req.Group)
require.Equal(t, iamv0.TeamResourceInfo.GetName(), req.Resource)
require.Equal(t, "team-1", req.Name)
require.Equal(t, utils.VerbSetPermissions, req.Verb)
require.Equal(t, "", folder)
return types.CheckResponse{Allowed: tt.shouldAllow}, nil
}
accessClient := &fakeAccessClient{checkFunc: checkFunc}
authz := NewExternalGroupMappingAuthorizer(accessClient)
ctx := types.WithAuthInfo(context.Background(), user)
err := authz.BeforeCreate(ctx, mapping)
if tt.shouldAllow {
require.NoError(t, err)
} else {
require.Error(t, err)
}
require.True(t, accessClient.checkCalled)
})
}
}
func TestExternalGroupMapping_BeforeUpdate(t *testing.T) {
mapping := newExternalGroupMapping("team-1", "mapping-1")
accessClient := &fakeAccessClient{
checkFunc: func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error) {
require.Fail(t, "check should not be called")
return types.CheckResponse{}, nil
},
}
authz := NewExternalGroupMappingAuthorizer(accessClient)
ctx := types.WithAuthInfo(context.Background(), user)
err := authz.BeforeUpdate(ctx, mapping)
require.Error(t, err)
require.True(t, apierrors.IsMethodNotSupported(err))
require.Contains(t, err.Error(), "PUT/PATCH")
require.False(t, accessClient.checkCalled)
}
func TestExternalGroupMapping_BeforeDelete(t *testing.T) {
mapping := newExternalGroupMapping("team-1", "mapping-1")
tests := []struct {
name string
shouldAllow bool
}{
{
name: "allow delete",
shouldAllow: true,
},
{
name: "deny delete",
shouldAllow: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
checkFunc := func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error) {
require.NotNil(t, id)
require.Equal(t, "user:u001", id.GetUID())
require.Equal(t, "org-2", id.GetNamespace())
require.Equal(t, "org-2", req.Namespace)
require.Equal(t, iamv0.GROUP, req.Group)
require.Equal(t, iamv0.TeamResourceInfo.GetName(), req.Resource)
require.Equal(t, "team-1", req.Name)
require.Equal(t, utils.VerbSetPermissions, req.Verb)
require.Equal(t, "", folder)
return types.CheckResponse{Allowed: tt.shouldAllow}, nil
}
accessClient := &fakeAccessClient{checkFunc: checkFunc}
authz := NewExternalGroupMappingAuthorizer(accessClient)
ctx := types.WithAuthInfo(context.Background(), user)
err := authz.BeforeDelete(ctx, mapping)
if tt.shouldAllow {
require.NoError(t, err)
} else {
require.Error(t, err)
}
require.True(t, accessClient.checkCalled)
})
}
}
@@ -179,17 +179,19 @@ func (r *ResourcePermissionsAuthorizer) FilterList(ctx context.Context, list run
canViewFuncs = map[schema.GroupResource]types.ItemChecker{}
)
for _, item := range l.Items {
target := item.Spec.Resource
targetGR := schema.GroupResource{Group: target.ApiGroup, Resource: target.Resource}
gr := schema.GroupResource{
Group: item.Spec.Resource.ApiGroup,
Resource: item.Spec.Resource.Resource,
}
// Reuse the same canView for items with the same resource
canView, found := canViewFuncs[targetGR]
canView, found := canViewFuncs[gr]
if !found {
listReq := types.ListRequest{
Namespace: item.Namespace,
Group: target.ApiGroup,
Resource: target.Resource,
Group: item.Spec.Resource.ApiGroup,
Resource: item.Spec.Resource.Resource,
Verb: utils.VerbGetPermissions,
}
@@ -198,9 +200,12 @@ func (r *ResourcePermissionsAuthorizer) FilterList(ctx context.Context, list run
return nil, err
}
canViewFuncs[targetGR] = canView
canViewFuncs[gr] = canView
}
target := item.Spec.Resource
targetGR := schema.GroupResource{Group: target.ApiGroup, Resource: target.Resource}
parent := ""
// Fetch the parent of the resource
// It's not efficient to do for every item in the list, but it's a good starting point.
@@ -4,15 +4,35 @@ import (
"context"
"testing"
"github.com/go-jose/go-jose/v4/jwt"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/grafana/authlib/authn"
"github.com/grafana/authlib/types"
iamv0 "github.com/grafana/grafana/apps/iam/pkg/apis/iam/v0alpha1"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/apimachinery/utils"
)
var (
user = authn.NewIDTokenAuthInfo(
authn.Claims[authn.AccessTokenClaims]{
Claims: jwt.Claims{Issuer: "grafana",
Subject: types.NewTypeID(types.TypeAccessPolicy, "grafana"), Audience: []string{"iam.grafana.app"}},
Rest: authn.AccessTokenClaims{
Namespace: "*",
Permissions: identity.ServiceIdentityClaims.Rest.Permissions,
DelegatedPermissions: identity.ServiceIdentityClaims.Rest.DelegatedPermissions,
},
}, &authn.Claims[authn.IDTokenClaims]{
Claims: jwt.Claims{Subject: types.NewTypeID(types.TypeUser, "u001")},
Rest: authn.IDTokenClaims{Namespace: "org-2", Identifier: "u001", Type: types.TypeUser},
},
)
)
func newResourcePermission(apiGroup, resource, name string) *iamv0.ResourcePermission {
return &iamv0.ResourcePermission{
ObjectMeta: metav1.ObjectMeta{Namespace: "org-2"},
@@ -202,6 +222,26 @@ func TestResourcePermissions_beforeWrite(t *testing.T) {
}
}
// fakeAccessClient is a mock implementation of claims.AccessClient
type fakeAccessClient struct {
checkCalled bool
checkFunc func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error)
compileCalled bool
compileFunc func(id types.AuthInfo, req types.ListRequest) (types.ItemChecker, types.Zookie, error)
}
func (m *fakeAccessClient) Check(ctx context.Context, id types.AuthInfo, req types.CheckRequest, folder string) (types.CheckResponse, error) {
m.checkCalled = true
return m.checkFunc(id, &req, folder)
}
func (m *fakeAccessClient) Compile(ctx context.Context, id types.AuthInfo, req types.ListRequest) (types.ItemChecker, types.Zookie, error) {
m.compileCalled = true
return m.compileFunc(id, req)
}
var _ types.AccessClient = (*fakeAccessClient)(nil)
type fakeParentProvider struct {
hasParent bool
getParentCalled bool
@@ -1,48 +0,0 @@
package authorizer
import (
"context"
"github.com/go-jose/go-jose/v4/jwt"
"github.com/grafana/authlib/authn"
"github.com/grafana/authlib/types"
"github.com/grafana/grafana/pkg/apimachinery/identity"
)
var (
// Shared test user identity
user = authn.NewIDTokenAuthInfo(
authn.Claims[authn.AccessTokenClaims]{
Claims: jwt.Claims{Issuer: "grafana",
Subject: types.NewTypeID(types.TypeAccessPolicy, "grafana"), Audience: []string{"iam.grafana.app"}},
Rest: authn.AccessTokenClaims{
Namespace: "*",
Permissions: identity.ServiceIdentityClaims.Rest.Permissions,
DelegatedPermissions: identity.ServiceIdentityClaims.Rest.DelegatedPermissions,
},
}, &authn.Claims[authn.IDTokenClaims]{
Claims: jwt.Claims{Subject: types.NewTypeID(types.TypeUser, "u001")},
Rest: authn.IDTokenClaims{Namespace: "org-2", Identifier: "u001", Type: types.TypeUser},
},
)
)
var _ types.AccessClient = (*fakeAccessClient)(nil)
// fakeAccessClient is a mock implementation of claims.AccessClient
type fakeAccessClient struct {
checkCalled bool
checkFunc func(id types.AuthInfo, req *types.CheckRequest, folder string) (types.CheckResponse, error)
compileCalled bool
compileFunc func(id types.AuthInfo, req types.ListRequest) (types.ItemChecker, types.Zookie, error)
}
func (m *fakeAccessClient) Check(ctx context.Context, id types.AuthInfo, req types.CheckRequest, folder string) (types.CheckResponse, error) {
m.checkCalled = true
return m.checkFunc(id, &req, folder)
}
func (m *fakeAccessClient) Compile(ctx context.Context, id types.AuthInfo, req types.ListRequest) (types.ItemChecker, types.Zookie, error) {
m.compileCalled = true
return m.compileFunc(id, req)
}
+47 -119
View File
@@ -246,8 +246,6 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *ge
//nolint:staticcheck // not yet migrated to OpenFeature
enableZanzanaSync := b.features.IsEnabledGlobally(featuremgmt.FlagKubernetesAuthzZanzanaSync)
//nolint:staticcheck // not yet migrated to OpenFeature
enableAuthzApis := b.features.IsEnabledGlobally(featuremgmt.FlagKubernetesAuthzApis)
// teams + users must have shorter names because they are often used as part of another name
opts.StorageOptsRegister(iamv0.TeamResourceInfo.GroupResource(), apistore.StorageOptions{
@@ -257,60 +255,6 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateAPIGroupInfo(apiGroupInfo *ge
MaximumNameLength: 80,
})
if err := b.UpdateTeamsAPIGroup(opts, storage); err != nil {
return err
}
if err := b.UpdateTeamBindingsAPIGroup(opts, storage, enableZanzanaSync); err != nil {
return err
}
if err := b.UpdateUsersAPIGroup(opts, storage, enableZanzanaSync); err != nil {
return err
}
if err := b.UpdateServiceAccountsAPIGroup(opts, storage); err != nil {
return err
}
// SSO settings apis
if b.ssoLegacyStore != nil {
ssoResource := legacyiamv0.SSOSettingResourceInfo
storage[ssoResource.StoragePath()] = b.ssoLegacyStore
}
if err := b.UpdateExternalGroupMappingAPIGroup(apiGroupInfo, opts, storage); err != nil {
return err
}
if enableAuthzApis {
// v0alpha1
if err := b.UpdateCoreRolesAPIGroup(apiGroupInfo, opts, storage, enableZanzanaSync); err != nil {
return err
}
// Role registration is delegated to the RoleApiInstaller
if err := b.roleApiInstaller.RegisterStorage(apiGroupInfo, &opts, storage); err != nil {
return err
}
if err := b.UpdateRoleBindingsAPIGroup(apiGroupInfo, opts, storage, enableZanzanaSync); err != nil {
return err
}
}
//nolint:staticcheck // not yet migrated to OpenFeature
if b.features.IsEnabledGlobally(featuremgmt.FlagKubernetesAuthzResourcePermissionApis) {
if err := b.UpdateResourcePermissionsAPIGroup(apiGroupInfo, opts, storage, enableZanzanaSync); err != nil {
return err
}
}
apiGroupInfo.VersionedResourcesStorageMap[legacyiamv0.VERSION] = storage
return nil
}
func (b *IdentityAccessManagementAPIBuilder) UpdateTeamsAPIGroup(opts builder.APIGroupOptions, storage map[string]rest.Storage) error {
teamResource := iamv0.TeamResourceInfo
teamUniStore, err := grafanaregistry.NewRegistryStore(opts.Scheme, teamResource, opts.OptsGetter)
if err != nil {
@@ -332,10 +276,6 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateTeamsAPIGroup(opts builder.AP
storage[teamResource.StoragePath("groups")] = b.teamGroupsHandler
}
return nil
}
func (b *IdentityAccessManagementAPIBuilder) UpdateTeamBindingsAPIGroup(opts builder.APIGroupOptions, storage map[string]rest.Storage, enableZanzanaSync bool) error {
teamBindingResource := iamv0.TeamBindingResourceInfo
teamBindingUniStore, err := grafanaregistry.NewRegistryStore(opts.Scheme, teamBindingResource, opts.OptsGetter)
if err != nil {
@@ -358,10 +298,8 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateTeamBindingsAPIGroup(opts bui
}
storage[teamBindingResource.StoragePath()] = dw
}
return nil
}
func (b *IdentityAccessManagementAPIBuilder) UpdateUsersAPIGroup(opts builder.APIGroupOptions, storage map[string]rest.Storage, enableZanzanaSync bool) error {
// User store registration
userResource := iamv0.UserResourceInfo
userUniStore, err := grafanaregistry.NewRegistryStore(opts.Scheme, userResource, opts.OptsGetter)
if err != nil {
@@ -387,10 +325,7 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateUsersAPIGroup(opts builder.AP
storage[userResource.StoragePath("teams")] = user.NewLegacyTeamMemberREST(b.store)
return nil
}
func (b *IdentityAccessManagementAPIBuilder) UpdateServiceAccountsAPIGroup(opts builder.APIGroupOptions, storage map[string]rest.Storage) error {
// Service Accounts store registration
saResource := iamv0.ServiceAccountResourceInfo
saUniStore, err := grafanaregistry.NewRegistryStore(opts.Scheme, saResource, opts.OptsGetter)
if err != nil {
@@ -408,17 +343,17 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateServiceAccountsAPIGroup(opts
storage[saResource.StoragePath("tokens")] = serviceaccount.NewLegacyTokenREST(b.store)
return nil
}
if b.ssoLegacyStore != nil {
ssoResource := legacyiamv0.SSOSettingResourceInfo
storage[ssoResource.StoragePath()] = b.ssoLegacyStore
}
func (b *IdentityAccessManagementAPIBuilder) UpdateExternalGroupMappingAPIGroup(apiGroupInfo *genericapiserver.APIGroupInfo, opts builder.APIGroupOptions, storage map[string]rest.Storage) error {
extGroupMappingResource := iamv0.ExternalGroupMappingResourceInfo
extGroupMappingUniStore, err := grafanaregistry.NewRegistryStore(opts.Scheme, extGroupMappingResource, opts.OptsGetter)
if err != nil {
return err
}
var extGroupMappingStore storewrapper.K8sStorage = extGroupMappingUniStore
storage[extGroupMappingResource.StoragePath()] = extGroupMappingUniStore
if b.externalGroupMappingStorage != nil {
extGroupMappingLegacyStore, err := NewLocalStore(extGroupMappingResource, apiGroupInfo.Scheme, opts.OptsGetter, b.reg, b.accessClient, b.externalGroupMappingStorage)
@@ -430,57 +365,50 @@ func (b *IdentityAccessManagementAPIBuilder) UpdateExternalGroupMappingAPIGroup(
if err != nil {
return err
}
storage[extGroupMappingResource.StoragePath()] = dw
}
var ok bool
extGroupMappingStore, ok = dw.(storewrapper.K8sStorage)
if !ok {
return fmt.Errorf("expected storewrapper.K8sStorage, got %T", dw)
//nolint:staticcheck // not yet migrated to OpenFeature
if b.features.IsEnabledGlobally(featuremgmt.FlagKubernetesAuthzApis) {
// v0alpha1
coreRoleStore, err := NewLocalStore(iamv0.CoreRoleInfo, apiGroupInfo.Scheme, opts.OptsGetter, b.reg, b.accessClient, b.coreRolesStorage)
if err != nil {
return err
}
if enableZanzanaSync {
b.logger.Info("Enabling hooks for CoreRole to sync to Zanzana")
h := NewRoleHooks(b.zClient, b.zTickets, b.logger)
coreRoleStore.AfterCreate = h.AfterRoleCreate
coreRoleStore.AfterDelete = h.AfterRoleDelete
coreRoleStore.BeginUpdate = h.BeginRoleUpdate
}
storage[iamv0.CoreRoleInfo.StoragePath()] = coreRoleStore
// Role registration is delegated to the RoleApiInstaller
if err := b.roleApiInstaller.RegisterStorage(apiGroupInfo, &opts, storage); err != nil {
return err
}
roleBindingStore, err := NewLocalStore(iamv0.RoleBindingInfo, apiGroupInfo.Scheme, opts.OptsGetter, b.reg, b.accessClient, b.roleBindingsStorage)
if err != nil {
return err
}
if enableZanzanaSync {
b.logger.Info("Enabling hooks for RoleBinding to sync to Zanzana")
roleBindingStore.AfterCreate = b.AfterRoleBindingCreate
roleBindingStore.AfterDelete = b.AfterRoleBindingDelete
roleBindingStore.BeginUpdate = b.BeginRoleBindingUpdate
}
storage[iamv0.RoleBindingInfo.StoragePath()] = roleBindingStore
}
//nolint:staticcheck // not yet migrated to OpenFeature
if b.features.IsEnabledGlobally(featuremgmt.FlagKubernetesAuthzResourcePermissionApis) {
if err := b.UpdateResourcePermissionsAPIGroup(apiGroupInfo, opts, storage, enableZanzanaSync); err != nil {
return err
}
}
authzWrapper := storewrapper.New(extGroupMappingStore, iamauthorizer.NewExternalGroupMappingAuthorizer(b.accessClient))
storage[extGroupMappingResource.StoragePath()] = authzWrapper
return nil
}
func (b *IdentityAccessManagementAPIBuilder) UpdateCoreRolesAPIGroup(
apiGroupInfo *genericapiserver.APIGroupInfo,
opts builder.APIGroupOptions,
storage map[string]rest.Storage,
enableZanzanaSync bool,
) error {
coreRoleStore, err := NewLocalStore(iamv0.CoreRoleInfo, apiGroupInfo.Scheme, opts.OptsGetter, b.reg, b.accessClient, b.coreRolesStorage)
if err != nil {
return err
}
if enableZanzanaSync {
b.logger.Info("Enabling hooks for CoreRole to sync to Zanzana")
h := NewRoleHooks(b.zClient, b.zTickets, b.logger)
coreRoleStore.AfterCreate = h.AfterRoleCreate
coreRoleStore.AfterDelete = h.AfterRoleDelete
coreRoleStore.BeginUpdate = h.BeginRoleUpdate
}
storage[iamv0.CoreRoleInfo.StoragePath()] = coreRoleStore
return nil
}
func (b *IdentityAccessManagementAPIBuilder) UpdateRoleBindingsAPIGroup(
apiGroupInfo *genericapiserver.APIGroupInfo,
opts builder.APIGroupOptions,
storage map[string]rest.Storage,
enableZanzanaSync bool,
) error {
roleBindingStore, err := NewLocalStore(iamv0.RoleBindingInfo, apiGroupInfo.Scheme, opts.OptsGetter, b.reg, b.accessClient, b.roleBindingsStorage)
if err != nil {
return err
}
if enableZanzanaSync {
b.logger.Info("Enabling hooks for RoleBinding to sync to Zanzana")
roleBindingStore.AfterCreate = b.AfterRoleBindingCreate
roleBindingStore.AfterDelete = b.AfterRoleBindingDelete
roleBindingStore.BeginUpdate = b.BeginRoleBindingUpdate
}
storage[iamv0.RoleBindingInfo.StoragePath()] = roleBindingStore
apiGroupInfo.VersionedResourcesStorageMap[legacyiamv0.VERSION] = storage
return nil
}
@@ -208,11 +208,6 @@ func (s *preferenceStorage) save(ctx context.Context, obj runtime.Object) (runti
// Create implements rest.Creater.
func (s *preferenceStorage) Create(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) {
if createValidation != nil {
if err := createValidation(ctx, obj); err != nil {
return nil, err
}
}
return s.save(ctx, obj)
}
@@ -228,12 +223,6 @@ func (s *preferenceStorage) Update(ctx context.Context, name string, objInfo res
return nil, false, err
}
if updateValidation != nil {
if err := updateValidation(ctx, obj, old); err != nil {
return nil, false, err
}
}
obj, err = s.save(ctx, obj)
return obj, false, err
}
+1 -35
View File
@@ -1,14 +1,9 @@
package preferences
import (
"context"
"fmt"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/registry/rest"
genericapiserver "k8s.io/apiserver/pkg/server"
@@ -29,8 +24,7 @@ import (
)
var (
_ builder.APIGroupBuilder = (*APIBuilder)(nil)
_ builder.APIGroupValidation = (*APIBuilder)(nil)
_ builder.APIGroupBuilder = (*APIBuilder)(nil)
)
type APIBuilder struct {
@@ -114,31 +108,3 @@ func (b *APIBuilder) GetAPIRoutes(gv schema.GroupVersion) *builder.APIRoutes {
defs := b.GetOpenAPIDefinitions()(func(path string) spec.Ref { return spec.Ref{} })
return b.merger.GetAPIRoutes(defs)
}
// Validate validates that the preference object has valid theme and timezone (if specified)
func (b *APIBuilder) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error {
if a.GetResource().Resource != "preferences" {
return nil
}
op := a.GetOperation()
if op != admission.Create && op != admission.Update {
return nil
}
obj := a.GetObject()
p, ok := obj.(*preferences.Preferences)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected Preferences object, got %T", obj))
}
if p.Spec.Timezone != nil && !pref.IsValidTimezone(*p.Spec.Timezone) {
return apierrors.NewBadRequest("invalid timezone: must be a valid IANA timezone (e.g., America/New_York), 'utc', 'browser', or empty string")
}
if p.Spec.Theme != nil && *p.Spec.Theme != "" && !pref.IsValidThemeID(*p.Spec.Theme) {
return apierrors.NewBadRequest("invalid theme")
}
return nil
}
@@ -1,69 +0,0 @@
package provisioning
import (
"context"
"net/http"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/rest"
"github.com/grafana/grafana-app-sdk/logging"
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
)
type connectionRepositoriesConnector struct{}
func NewConnectionRepositoriesConnector() *connectionRepositoriesConnector {
return &connectionRepositoriesConnector{}
}
func (*connectionRepositoriesConnector) New() runtime.Object {
return &provisioning.ExternalRepositoryList{}
}
func (*connectionRepositoriesConnector) Destroy() {}
func (*connectionRepositoriesConnector) ProducesMIMETypes(verb string) []string {
return []string{"application/json"}
}
func (*connectionRepositoriesConnector) ProducesObject(verb string) any {
return &provisioning.ExternalRepositoryList{}
}
func (*connectionRepositoriesConnector) ConnectMethods() []string {
return []string{http.MethodGet}
}
func (*connectionRepositoriesConnector) NewConnectOptions() (runtime.Object, bool, string) {
return nil, false, ""
}
func (c *connectionRepositoriesConnector) Connect(ctx context.Context, name string, opts runtime.Object, responder rest.Responder) (http.Handler, error) {
logger := logging.FromContext(ctx).With("logger", "connection-repositories-connector", "connection_name", name)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
responder.Error(apierrors.NewMethodNotSupported(provisioning.ConnectionResourceInfo.GroupResource(), r.Method))
return
}
logger.Debug("repositories endpoint called but not yet implemented")
// TODO: Implement repository listing from external git provider
// This will require:
// 1. Get the Connection object using logging.Context(r.Context(), logger)
// 2. Use the connection credentials to authenticate with the git provider
// 3. List repositories from the provider (GitHub, GitLab, Bitbucket)
// 4. Return ExternalRepositoryList with Name, Owner, and URL for each repository
responder.Error(apierrors.NewMethodNotSupported(provisioning.ConnectionResourceInfo.GroupResource(), "repositories endpoint not yet implemented"))
}), nil
}
var (
_ rest.Storage = (*connectionRepositoriesConnector)(nil)
_ rest.Connecter = (*connectionRepositoriesConnector)(nil)
_ rest.StorageMetadata = (*connectionRepositoriesConnector)(nil)
)
@@ -1,101 +0,0 @@
package provisioning
import (
"context"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
)
func TestConnectionRepositoriesConnector(t *testing.T) {
connector := NewConnectionRepositoriesConnector()
t.Run("New returns ExternalRepositoryList", func(t *testing.T) {
obj := connector.New()
require.IsType(t, &provisioning.ExternalRepositoryList{}, obj)
})
t.Run("ProducesMIMETypes returns application/json", func(t *testing.T) {
types := connector.ProducesMIMETypes("GET")
require.Equal(t, []string{"application/json"}, types)
})
t.Run("ProducesObject returns ExternalRepositoryList", func(t *testing.T) {
obj := connector.ProducesObject("GET")
require.IsType(t, &provisioning.ExternalRepositoryList{}, obj)
})
t.Run("ConnectMethods returns GET", func(t *testing.T) {
methods := connector.ConnectMethods()
require.Equal(t, []string{http.MethodGet}, methods)
})
t.Run("NewConnectOptions returns no path component", func(t *testing.T) {
obj, hasPath, path := connector.NewConnectOptions()
require.Nil(t, obj)
require.False(t, hasPath)
require.Empty(t, path)
})
t.Run("Connect returns handler that rejects non-GET methods", func(t *testing.T) {
ctx := context.Background()
responder := &mockResponder{}
handler, err := connector.Connect(ctx, "test-connection", nil, responder)
require.NoError(t, err)
require.NotNil(t, handler)
// Test POST method (should be rejected)
req := httptest.NewRequest(http.MethodPost, "/", nil)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
require.True(t, responder.called)
require.NotNil(t, responder.err)
require.True(t, apierrors.IsMethodNotSupported(responder.err))
})
t.Run("Connect returns handler that returns not implemented for GET", func(t *testing.T) {
ctx := context.Background()
responder := &mockResponder{}
handler, err := connector.Connect(ctx, "test-connection", nil, responder)
require.NoError(t, err)
require.NotNil(t, handler)
// Test GET method (should return not implemented)
req := httptest.NewRequest(http.MethodGet, "/", nil)
w := httptest.NewRecorder()
handler.ServeHTTP(w, req)
require.True(t, responder.called)
require.NotNil(t, responder.err)
require.True(t, apierrors.IsMethodNotSupported(responder.err))
require.Contains(t, responder.err.Error(), "not yet implemented")
})
}
// mockResponder implements rest.Responder for testing
type mockResponder struct {
called bool
err error
obj runtime.Object
code int
}
func (m *mockResponder) Object(statusCode int, obj runtime.Object) {
m.called = true
m.code = statusCode
m.obj = obj
}
func (m *mockResponder) Error(err error) {
m.called = true
m.err = err
}
@@ -1,411 +0,0 @@
package controller
import (
"context"
"errors"
"fmt"
"net"
"strings"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"github.com/grafana/grafana-app-sdk/logging"
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
connectionvalidation "github.com/grafana/grafana/apps/provisioning/pkg/connection"
client "github.com/grafana/grafana/apps/provisioning/pkg/generated/clientset/versioned/typed/provisioning/v0alpha1"
informer "github.com/grafana/grafana/apps/provisioning/pkg/generated/informers/externalversions/provisioning/v0alpha1"
listers "github.com/grafana/grafana/apps/provisioning/pkg/generated/listers/provisioning/v0alpha1"
"k8s.io/apimachinery/pkg/fields"
)
const connectionLoggerName = "provisioning-connection-controller"
const (
connectionMaxAttempts = 3
// connectionHealthyDuration defines how recent a health check must be to be considered "recent" when healthy
connectionHealthyDuration = 5 * time.Minute
// connectionUnhealthyDuration defines how recent a health check must be to be considered "recent" when unhealthy
connectionUnhealthyDuration = 1 * time.Minute
)
type connectionQueueItem struct {
key string
attempts int
}
// ConnectionStatusPatcher defines the interface for updating connection status.
//
//go:generate mockery --name=ConnectionStatusPatcher
type ConnectionStatusPatcher interface {
Patch(ctx context.Context, conn *provisioning.Connection, patchOperations ...map[string]interface{}) error
}
// RepositoryLister interface for listing repositories
type RepositoryLister interface {
List(ctx context.Context, options *internalversion.ListOptions) (runtime.Object, error)
}
// ConnectionController controls Connection resources.
type ConnectionController struct {
client client.ProvisioningV0alpha1Interface
connLister listers.ConnectionLister
connSynced cache.InformerSynced
logger logging.Logger
statusPatcher ConnectionStatusPatcher
repoLister RepositoryLister
queue workqueue.TypedRateLimitingInterface[*connectionQueueItem]
}
// NewConnectionController creates a new ConnectionController.
func NewConnectionController(
provisioningClient client.ProvisioningV0alpha1Interface,
connInformer informer.ConnectionInformer,
statusPatcher ConnectionStatusPatcher,
repoLister RepositoryLister,
) (*ConnectionController, error) {
cc := &ConnectionController{
client: provisioningClient,
connLister: connInformer.Lister(),
connSynced: connInformer.Informer().HasSynced,
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[*connectionQueueItem](),
workqueue.TypedRateLimitingQueueConfig[*connectionQueueItem]{
Name: "provisioningConnectionController",
},
),
statusPatcher: statusPatcher,
repoLister: repoLister,
logger: logging.DefaultLogger.With("logger", connectionLoggerName),
}
_, err := connInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: cc.enqueue,
UpdateFunc: func(oldObj, newObj interface{}) {
cc.enqueue(newObj)
},
})
if err != nil {
return nil, err
}
return cc, nil
}
func (cc *ConnectionController) enqueue(obj interface{}) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
cc.logger.Error("failed to get key for object", "error", err)
return
}
cc.queue.Add(&connectionQueueItem{key: key})
}
// Run starts the ConnectionController.
func (cc *ConnectionController) Run(ctx context.Context, workerCount int) {
defer utilruntime.HandleCrash()
defer cc.queue.ShutDown()
cc.logger.Info("starting connection controller", "workers", workerCount)
for i := 0; i < workerCount; i++ {
go wait.UntilWithContext(ctx, cc.runWorker, time.Second)
}
<-ctx.Done()
cc.logger.Info("shutting down connection controller")
}
func (cc *ConnectionController) runWorker(ctx context.Context) {
for cc.processNextWorkItem(ctx) {
}
}
func (cc *ConnectionController) processNextWorkItem(ctx context.Context) bool {
item, quit := cc.queue.Get()
if quit {
return false
}
defer cc.queue.Done(item)
logger := logging.FromContext(ctx).With("work_key", item.key)
logger.Info("ConnectionController processing key")
err := cc.process(ctx, item)
if err == nil {
cc.queue.Forget(item)
return true
}
item.attempts++
logger = logger.With("error", err, "attempts", item.attempts)
logger.Error("ConnectionController failed to process key")
if item.attempts >= connectionMaxAttempts {
logger.Error("ConnectionController failed too many times")
cc.queue.Forget(item)
return true
}
// Check if error is transient and should be retried
if !isTransientError(err) {
logger.Info("ConnectionController will not retry (non-transient error)")
cc.queue.Forget(item)
return true
}
logger.Info("ConnectionController will retry (transient error)")
utilruntime.HandleError(fmt.Errorf("%v failed with: %v", item, err))
cc.queue.AddRateLimited(item)
return true
}
func (cc *ConnectionController) process(ctx context.Context, item *connectionQueueItem) error {
logger := cc.logger.With("key", item.key)
ctx = logging.Context(ctx, logger)
namespace, name, err := cache.SplitMetaNamespaceKey(item.key)
if err != nil {
return err
}
conn, err := cc.connLister.Connections(namespace).Get(name)
switch {
case apierrors.IsNotFound(err):
return errors.New("connection not found in cache")
case err != nil:
return err
}
// Handle deletion if being deleted
if conn.DeletionTimestamp != nil {
return cc.handleDelete(ctx, conn)
}
hasSpecChanged := conn.Generation != conn.Status.ObservedGeneration
shouldCheckHealth := cc.shouldCheckHealth(conn)
// Determine the main triggering condition
switch {
case hasSpecChanged:
logger.Info("spec changed, reconciling", "generation", conn.Generation, "observedGeneration", conn.Status.ObservedGeneration)
case shouldCheckHealth:
logger.Info("health is stale, refreshing", "lastChecked", conn.Status.Health.Checked, "healthy", conn.Status.Health.Healthy)
default:
logger.Debug("skipping as conditions are not met", "generation", conn.Generation, "observedGeneration", conn.Status.ObservedGeneration)
return nil
}
// For now, just update the state to connected, health to healthy, and observed generation
// Future: Add credential validation logic here
patchOperations := []map[string]interface{}{}
// Only update observedGeneration when spec changes
if hasSpecChanged {
patchOperations = append(patchOperations, map[string]interface{}{
"op": "replace",
"path": "/status/observedGeneration",
"value": conn.Generation,
})
}
// Always update state and health
patchOperations = append(patchOperations,
map[string]interface{}{
"op": "replace",
"path": "/status/state",
"value": provisioning.ConnectionStateConnected,
},
map[string]interface{}{
"op": "replace",
"path": "/status/health",
"value": provisioning.HealthStatus{
Healthy: true,
Checked: time.Now().UnixMilli(),
},
},
)
if err := cc.statusPatcher.Patch(ctx, conn, patchOperations...); err != nil {
return fmt.Errorf("failed to update connection status: %w", err)
}
logger.Info("connection reconciled successfully")
return nil
}
func (cc *ConnectionController) handleDelete(ctx context.Context, conn *provisioning.Connection) error {
logger := logging.FromContext(ctx)
logger.Info("handle connection delete")
// Check if finalizer is present
hasFinalizer := false
for _, f := range conn.Finalizers {
if f == connectionvalidation.BlockDeletionFinalizer {
hasFinalizer = true
break
}
}
if !hasFinalizer {
logger.Info("no finalizer to process")
return nil
}
// Check if any repositories reference this connection using field selector
fieldSelector := fields.OneTermEqualSelector("spec.connection.name", conn.Name)
var allRepos []provisioning.Repository
continueToken := ""
var err error
for {
var obj runtime.Object
obj, err = cc.repoLister.List(ctx, &internalversion.ListOptions{
Limit: 100,
Continue: continueToken,
FieldSelector: fieldSelector,
})
if err != nil {
logger.Error("failed to check for connected repositories", "error", err)
return fmt.Errorf("check for connected repositories: %w", err)
}
repositoryList, ok := obj.(*provisioning.RepositoryList)
if !ok {
logger.Error("expected repository list", "type", fmt.Sprintf("%T", obj))
return fmt.Errorf("expected repository list, got %T", obj)
}
allRepos = append(allRepos, repositoryList.Items...)
continueToken = repositoryList.GetContinue()
if continueToken == "" {
break
}
}
if len(allRepos) > 0 {
repoNames := make([]string, 0, len(allRepos))
for _, repo := range allRepos {
repoNames = append(repoNames, repo.Name)
}
logger.Info("cannot delete connection while repositories reference it", "repositories", repoNames)
// Don't remove finalizer - this will prevent deletion
// The connection will remain in deletion state until repositories are removed
return fmt.Errorf("cannot delete connection while repositories are using it: %s", strings.Join(repoNames, ", "))
}
// No repositories reference this connection, remove finalizer to allow deletion
logger.Info("no repositories reference connection, removing finalizer")
_, err = cc.client.Connections(conn.GetNamespace()).
Patch(ctx, conn.Name, types.JSONPatchType, []byte(`[
{ "op": "remove", "path": "/metadata/finalizers" }
]`), metav1.PatchOptions{
FieldManager: "provisioning-connection-controller",
})
if err != nil {
// If we can't remove the finalizer, undelete the connection so it can be retried later
// This prevents the connection from being stuck in deletion state
logger.Error("failed to remove finalizer, undeleting connection", "error", err)
undeleteErr := cc.undeleteConnection(ctx, conn, err)
if undeleteErr != nil {
return fmt.Errorf("remove finalizer: %w; failed to undelete: %w", err, undeleteErr)
}
return fmt.Errorf("remove finalizer: %w (connection has been undeleted, deletion can be retried)", err)
}
return nil
}
// undeleteConnection removes the DeletionTimestamp to "undelete" the connection
// This is used when finalizer removal fails, allowing the deletion to be retried later
func (cc *ConnectionController) undeleteConnection(ctx context.Context, conn *provisioning.Connection, originalErr error) error {
logger := logging.FromContext(ctx)
logger.Info("undeleting connection due to finalizer removal failure", "error", originalErr.Error())
// Remove DeletionTimestamp by patching it to null
_, err := cc.client.Connections(conn.GetNamespace()).
Patch(ctx, conn.Name, types.JSONPatchType, []byte(`[
{ "op": "remove", "path": "/metadata/deletionTimestamp" }
]`), metav1.PatchOptions{
FieldManager: "provisioning-connection-controller",
})
if err != nil {
logger.Error("failed to undelete connection", "error", err)
return fmt.Errorf("undelete connection: %w", err)
}
logger.Info("connection undeleted successfully, deletion can be retried")
return nil
}
// isTransientError determines if an error is transient and should be retried
func isTransientError(err error) bool {
if err == nil {
return false
}
// Check for Kubernetes API transient errors
if apierrors.IsServiceUnavailable(err) {
return true
}
if apierrors.IsServerTimeout(err) {
return true
}
if apierrors.IsTooManyRequests(err) {
return true
}
if apierrors.IsInternalError(err) {
return true
}
if apierrors.IsTimeout(err) {
return true
}
// Check for network errors
var netErr net.Error
if errors.As(err, &netErr) {
if netErr.Timeout() {
return true
}
}
// Check for connection errors
var opErr *net.OpError
return errors.As(err, &opErr)
}
// shouldCheckHealth determines if a connection health check should be performed.
func (cc *ConnectionController) shouldCheckHealth(conn *provisioning.Connection) bool {
// If the connection has been updated, always check health
if conn.Generation != conn.Status.ObservedGeneration {
return true
}
// Check if health check is stale
return !cc.hasRecentHealthCheck(conn.Status.Health)
}
// hasRecentHealthCheck checks if a health check was performed recently.
func (cc *ConnectionController) hasRecentHealthCheck(healthStatus provisioning.HealthStatus) bool {
if healthStatus.Checked == 0 {
return false // Never checked
}
age := time.Since(time.UnixMilli(healthStatus.Checked))
if healthStatus.Healthy {
return age <= connectionHealthyDuration
}
return age <= connectionUnhealthyDuration
}
@@ -1,697 +0,0 @@
package controller
import (
"context"
"errors"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/rest"
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
connectionvalidation "github.com/grafana/grafana/apps/provisioning/pkg/connection"
applyconfiguration "github.com/grafana/grafana/apps/provisioning/pkg/generated/applyconfiguration/provisioning/v0alpha1"
client "github.com/grafana/grafana/apps/provisioning/pkg/generated/clientset/versioned/typed/provisioning/v0alpha1"
)
func TestConnectionController_shouldCheckHealth(t *testing.T) {
testCases := []struct {
name string
conn *provisioning.Connection
expected bool
}{
{
name: "should check health when generation differs from observed",
conn: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Generation: 2,
},
Status: provisioning.ConnectionStatus{
ObservedGeneration: 1,
},
},
expected: true,
},
{
name: "should check health when never checked before",
conn: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
Status: provisioning.ConnectionStatus{
ObservedGeneration: 1,
Health: provisioning.HealthStatus{
Checked: 0,
},
},
},
expected: true,
},
{
name: "should check health when healthy check is stale (>5 min)",
conn: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
Status: provisioning.ConnectionStatus{
ObservedGeneration: 1,
Health: provisioning.HealthStatus{
Healthy: true,
Checked: time.Now().Add(-6 * time.Minute).UnixMilli(),
},
},
},
expected: true,
},
{
name: "should check health when unhealthy check is stale (>1 min)",
conn: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
Status: provisioning.ConnectionStatus{
ObservedGeneration: 1,
Health: provisioning.HealthStatus{
Healthy: false,
Checked: time.Now().Add(-2 * time.Minute).UnixMilli(),
},
},
},
expected: true,
},
{
name: "should not check health when healthy check is recent (<5 min)",
conn: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
Status: provisioning.ConnectionStatus{
ObservedGeneration: 1,
Health: provisioning.HealthStatus{
Healthy: true,
Checked: time.Now().Add(-2 * time.Minute).UnixMilli(),
},
},
},
expected: false,
},
{
name: "should not check health when unhealthy check is recent (<1 min)",
conn: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Generation: 1,
},
Status: provisioning.ConnectionStatus{
ObservedGeneration: 1,
Health: provisioning.HealthStatus{
Healthy: false,
Checked: time.Now().Add(-30 * time.Second).UnixMilli(),
},
},
},
expected: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
cc := &ConnectionController{}
result := cc.shouldCheckHealth(tc.conn)
assert.Equal(t, tc.expected, result)
})
}
}
func TestConnectionController_hasRecentHealthCheck(t *testing.T) {
testCases := []struct {
name string
healthStatus provisioning.HealthStatus
expected bool
}{
{
name: "never checked",
healthStatus: provisioning.HealthStatus{
Checked: 0,
},
expected: false,
},
{
name: "healthy and recent",
healthStatus: provisioning.HealthStatus{
Healthy: true,
Checked: time.Now().Add(-2 * time.Minute).UnixMilli(),
},
expected: true,
},
{
name: "healthy and stale",
healthStatus: provisioning.HealthStatus{
Healthy: true,
Checked: time.Now().Add(-10 * time.Minute).UnixMilli(),
},
expected: false,
},
{
name: "unhealthy and recent",
healthStatus: provisioning.HealthStatus{
Healthy: false,
Checked: time.Now().Add(-30 * time.Second).UnixMilli(),
},
expected: true,
},
{
name: "unhealthy and stale",
healthStatus: provisioning.HealthStatus{
Healthy: false,
Checked: time.Now().Add(-2 * time.Minute).UnixMilli(),
},
expected: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
cc := &ConnectionController{}
result := cc.hasRecentHealthCheck(tc.healthStatus)
assert.Equal(t, tc.expected, result)
})
}
}
func TestConnectionController_reconcileConditions(t *testing.T) {
testCases := []struct {
name string
conn *provisioning.Connection
expectReconcile bool
expectSpecChanged bool
description string
}{
{
name: "skip when being deleted",
conn: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Name: "test-conn",
Namespace: "default",
DeletionTimestamp: &metav1.Time{Time: time.Now()},
},
},
expectReconcile: false,
expectSpecChanged: false,
description: "deleted connections should be skipped",
},
{
name: "skip when no changes needed",
conn: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Name: "test-conn",
Namespace: "default",
Generation: 1,
},
Status: provisioning.ConnectionStatus{
ObservedGeneration: 1,
Health: provisioning.HealthStatus{
Healthy: true,
Checked: time.Now().UnixMilli(),
},
},
},
expectReconcile: false,
expectSpecChanged: false,
description: "no reconcile when generation matches and health is recent",
},
{
name: "reconcile when spec changed",
conn: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Name: "test-conn",
Namespace: "default",
Generation: 2,
},
Status: provisioning.ConnectionStatus{
ObservedGeneration: 1,
Health: provisioning.HealthStatus{
Healthy: true,
Checked: time.Now().UnixMilli(),
},
},
},
expectReconcile: true,
expectSpecChanged: true,
description: "reconcile when generation differs",
},
{
name: "reconcile when health is stale",
conn: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Name: "test-conn",
Namespace: "default",
Generation: 1,
},
Status: provisioning.ConnectionStatus{
ObservedGeneration: 1,
Health: provisioning.HealthStatus{
Healthy: true,
Checked: time.Now().Add(-10 * time.Minute).UnixMilli(),
},
},
},
expectReconcile: true,
expectSpecChanged: false,
description: "reconcile when health check is stale",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
cc := &ConnectionController{}
// Test the core reconciliation conditions
if tc.conn.DeletionTimestamp != nil {
assert.False(t, tc.expectReconcile, tc.description)
return
}
hasSpecChanged := tc.conn.Generation != tc.conn.Status.ObservedGeneration
shouldCheckHealth := cc.shouldCheckHealth(tc.conn)
needsReconcile := hasSpecChanged || shouldCheckHealth
assert.Equal(t, tc.expectReconcile, needsReconcile, tc.description)
assert.Equal(t, tc.expectSpecChanged, hasSpecChanged, "spec changed check")
})
}
}
func TestConnectionController_processNextWorkItem(t *testing.T) {
t.Run("returns false when queue is shut down", func(t *testing.T) {
cc := &ConnectionController{}
// This test verifies the structure is correct
assert.NotNil(t, cc)
})
}
// mockRepositoryLister is a mock implementation of RepositoryLister for testing
type mockRepositoryLister struct {
mock.Mock
}
func (m *mockRepositoryLister) List(ctx context.Context, options *internalversion.ListOptions) (runtime.Object, error) {
args := m.Called(ctx, options)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(runtime.Object), args.Error(1)
}
// mockConnectionInterface is a mock implementation of client.ConnectionInterface for testing
type mockConnectionInterface struct {
mock.Mock
}
func (m *mockConnectionInterface) Create(ctx context.Context, connection *provisioning.Connection, opts metav1.CreateOptions) (*provisioning.Connection, error) {
args := m.Called(ctx, connection, opts)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(*provisioning.Connection), args.Error(1)
}
func (m *mockConnectionInterface) Update(ctx context.Context, connection *provisioning.Connection, opts metav1.UpdateOptions) (*provisioning.Connection, error) {
args := m.Called(ctx, connection, opts)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(*provisioning.Connection), args.Error(1)
}
func (m *mockConnectionInterface) UpdateStatus(ctx context.Context, connection *provisioning.Connection, opts metav1.UpdateOptions) (*provisioning.Connection, error) {
args := m.Called(ctx, connection, opts)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(*provisioning.Connection), args.Error(1)
}
func (m *mockConnectionInterface) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
args := m.Called(ctx, name, opts)
return args.Error(0)
}
func (m *mockConnectionInterface) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
args := m.Called(ctx, opts, listOpts)
return args.Error(0)
}
func (m *mockConnectionInterface) Get(ctx context.Context, name string, opts metav1.GetOptions) (*provisioning.Connection, error) {
args := m.Called(ctx, name, opts)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(*provisioning.Connection), args.Error(1)
}
func (m *mockConnectionInterface) List(ctx context.Context, opts metav1.ListOptions) (*provisioning.ConnectionList, error) {
args := m.Called(ctx, opts)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(*provisioning.ConnectionList), args.Error(1)
}
func (m *mockConnectionInterface) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
args := m.Called(ctx, opts)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(watch.Interface), args.Error(1)
}
func (m *mockConnectionInterface) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*provisioning.Connection, error) {
args := m.Called(ctx, name, pt, data, opts, subresources)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(*provisioning.Connection), args.Error(1)
}
func (m *mockConnectionInterface) Apply(ctx context.Context, connection *applyconfiguration.ConnectionApplyConfiguration, opts metav1.ApplyOptions) (*provisioning.Connection, error) {
args := m.Called(ctx, connection, opts)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(*provisioning.Connection), args.Error(1)
}
func (m *mockConnectionInterface) ApplyStatus(ctx context.Context, connection *applyconfiguration.ConnectionApplyConfiguration, opts metav1.ApplyOptions) (*provisioning.Connection, error) {
args := m.Called(ctx, connection, opts)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(*provisioning.Connection), args.Error(1)
}
// mockProvisioningV0alpha1InterfaceForConnections is a mock implementation of client.ProvisioningV0alpha1Interface for connection tests
type mockProvisioningV0alpha1InterfaceForConnections struct {
mock.Mock
connections *mockConnectionInterface
}
func (m *mockProvisioningV0alpha1InterfaceForConnections) RESTClient() rest.Interface {
panic("not needed for testing")
}
func (m *mockProvisioningV0alpha1InterfaceForConnections) HistoricJobs(namespace string) client.HistoricJobInterface {
panic("not needed for testing")
}
func (m *mockProvisioningV0alpha1InterfaceForConnections) Jobs(namespace string) client.JobInterface {
panic("not needed for testing")
}
func (m *mockProvisioningV0alpha1InterfaceForConnections) Connections(namespace string) client.ConnectionInterface {
return m.connections
}
func (m *mockProvisioningV0alpha1InterfaceForConnections) Repositories(namespace string) client.RepositoryInterface {
panic("not needed for testing")
}
func TestConnectionController_handleDelete(t *testing.T) {
ctx := context.Background()
tests := []struct {
name string
connection *provisioning.Connection
repoListerSetup func(*mockRepositoryLister)
connectionSetup func(*mockConnectionInterface)
expectedError string
expectFinalizerRemoved bool
}{
{
name: "no finalizer present, should return nil",
connection: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Name: "test-conn",
Namespace: "default",
DeletionTimestamp: &metav1.Time{Time: time.Now()},
Finalizers: []string{},
},
},
repoListerSetup: func(m *mockRepositoryLister) {},
connectionSetup: func(m *mockConnectionInterface) {},
expectedError: "",
expectFinalizerRemoved: false,
},
{
name: "finalizer present but repositories exist, should block deletion",
connection: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Name: "test-conn",
Namespace: "default",
DeletionTimestamp: &metav1.Time{Time: time.Now()},
Finalizers: []string{connectionvalidation.BlockDeletionFinalizer},
},
},
repoListerSetup: func(m *mockRepositoryLister) {
m.On("List", ctx, mock.MatchedBy(func(opts *internalversion.ListOptions) bool {
return opts.FieldSelector != nil && opts.FieldSelector.String() == "spec.connection.name=test-conn"
})).Return(&provisioning.RepositoryList{
Items: []provisioning.Repository{
{
ObjectMeta: metav1.ObjectMeta{Name: "repo-1"},
Spec: provisioning.RepositorySpec{
Connection: &provisioning.ConnectionInfo{Name: "test-conn"},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "repo-2"},
Spec: provisioning.RepositorySpec{
Connection: &provisioning.ConnectionInfo{Name: "test-conn"},
},
},
},
}, nil)
},
connectionSetup: func(m *mockConnectionInterface) {},
expectedError: "cannot delete connection while repositories are using it: repo-1, repo-2",
expectFinalizerRemoved: false,
},
{
name: "finalizer present and no repositories, should remove finalizer",
connection: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Name: "test-conn",
Namespace: "default",
DeletionTimestamp: &metav1.Time{Time: time.Now()},
Finalizers: []string{connectionvalidation.BlockDeletionFinalizer},
},
},
repoListerSetup: func(m *mockRepositoryLister) {
m.On("List", ctx, mock.MatchedBy(func(opts *internalversion.ListOptions) bool {
return opts.FieldSelector != nil && opts.FieldSelector.String() == "spec.connection.name=test-conn"
})).Return(&provisioning.RepositoryList{
Items: []provisioning.Repository{},
}, nil)
},
connectionSetup: func(m *mockConnectionInterface) {
m.On("Patch", ctx, "test-conn", types.JSONPatchType, mock.Anything, metav1.PatchOptions{
FieldManager: "provisioning-connection-controller",
}, mock.Anything).Return(&provisioning.Connection{}, nil)
},
expectedError: "",
expectFinalizerRemoved: true,
},
{
name: "error checking repositories, should return error",
connection: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Name: "test-conn",
Namespace: "default",
DeletionTimestamp: &metav1.Time{Time: time.Now()},
Finalizers: []string{connectionvalidation.BlockDeletionFinalizer},
},
},
repoListerSetup: func(m *mockRepositoryLister) {
m.On("List", ctx, mock.Anything).Return(nil, errors.New("list error"))
},
connectionSetup: func(m *mockConnectionInterface) {},
expectedError: "check for connected repositories: list error",
expectFinalizerRemoved: false,
},
{
name: "error removing finalizer, should undelete connection",
connection: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Name: "test-conn",
Namespace: "default",
DeletionTimestamp: &metav1.Time{Time: time.Now()},
Finalizers: []string{connectionvalidation.BlockDeletionFinalizer},
},
},
repoListerSetup: func(m *mockRepositoryLister) {
m.On("List", ctx, mock.Anything).Return(&provisioning.RepositoryList{
Items: []provisioning.Repository{},
}, nil)
},
connectionSetup: func(m *mockConnectionInterface) {
// First patch fails (remove finalizer)
m.On("Patch", ctx, "test-conn", types.JSONPatchType, mock.MatchedBy(func(data []byte) bool {
return string(data) == `[
{ "op": "remove", "path": "/metadata/finalizers" }
]`
}), metav1.PatchOptions{
FieldManager: "provisioning-connection-controller",
}, mock.Anything).Return(nil, errors.New("patch error")).Once()
// Second patch succeeds (undelete - remove DeletionTimestamp)
m.On("Patch", ctx, "test-conn", types.JSONPatchType, mock.MatchedBy(func(data []byte) bool {
return string(data) == `[
{ "op": "remove", "path": "/metadata/deletionTimestamp" }
]`
}), metav1.PatchOptions{
FieldManager: "provisioning-connection-controller",
}, mock.Anything).Return(&provisioning.Connection{}, nil).Once()
},
expectedError: "remove finalizer: patch error (connection has been undeleted, deletion can be retried)",
expectFinalizerRemoved: false,
},
{
name: "pagination handled correctly",
connection: &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Name: "test-conn",
Namespace: "default",
DeletionTimestamp: &metav1.Time{Time: time.Now()},
Finalizers: []string{connectionvalidation.BlockDeletionFinalizer},
},
},
repoListerSetup: func(m *mockRepositoryLister) {
// First call returns empty with continue token (testing pagination even when empty)
m.On("List", ctx, mock.MatchedBy(func(opts *internalversion.ListOptions) bool {
return opts.Continue == ""
})).Return(&provisioning.RepositoryList{
Items: []provisioning.Repository{},
ListMeta: metav1.ListMeta{Continue: "continue-token"},
}, nil)
// Second call returns empty with no continue token
m.On("List", ctx, mock.MatchedBy(func(opts *internalversion.ListOptions) bool {
return opts.Continue == "continue-token"
})).Return(&provisioning.RepositoryList{
Items: []provisioning.Repository{},
}, nil)
},
connectionSetup: func(m *mockConnectionInterface) {
m.On("Patch", ctx, "test-conn", types.JSONPatchType, mock.Anything, metav1.PatchOptions{
FieldManager: "provisioning-connection-controller",
}, mock.Anything).Return(&provisioning.Connection{}, nil)
},
expectedError: "",
expectFinalizerRemoved: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
repoLister := new(mockRepositoryLister)
connInterface := new(mockConnectionInterface)
client := &mockProvisioningV0alpha1InterfaceForConnections{connections: connInterface}
tt.repoListerSetup(repoLister)
tt.connectionSetup(connInterface)
cc := &ConnectionController{
client: client,
repoLister: repoLister,
logger: nil, // logger is optional for testing
}
err := cc.handleDelete(ctx, tt.connection)
if tt.expectedError != "" {
require.Error(t, err)
assert.Contains(t, err.Error(), tt.expectedError)
} else {
require.NoError(t, err)
}
if tt.expectFinalizerRemoved {
connInterface.AssertCalled(t, "Patch", ctx, "test-conn", types.JSONPatchType, mock.Anything, metav1.PatchOptions{
FieldManager: "provisioning-connection-controller",
}, mock.Anything)
} else if tt.expectedError != "" && strings.Contains(tt.expectedError, "undeleted") {
// For undelete case, we expect both patches to be called (remove finalizer fails, then undelete succeeds)
connInterface.AssertNumberOfCalls(t, "Patch", 2)
}
// For other error cases (repositories exist), no successful patch should occur
repoLister.AssertExpectations(t)
connInterface.AssertExpectations(t)
})
}
}
func TestIsTransientError(t *testing.T) {
tests := []struct {
name string
err error
expected bool
}{
{
name: "nil error",
err: nil,
expected: false,
},
{
name: "service unavailable",
err: apierrors.NewServiceUnavailable("service unavailable"),
expected: true,
},
{
name: "server timeout",
err: apierrors.NewServerTimeout(schema.GroupResource{}, "operation", 0),
expected: true,
},
{
name: "too many requests",
err: apierrors.NewTooManyRequests("too many requests", 0),
expected: true,
},
{
name: "internal error",
err: apierrors.NewInternalError(errors.New("internal error")),
expected: true,
},
{
name: "not found error",
err: apierrors.NewNotFound(schema.GroupResource{}, "resource"),
expected: false,
},
{
name: "forbidden error",
err: apierrors.NewForbidden(schema.GroupResource{}, "resource", errors.New("forbidden")),
expected: false,
},
{
name: "generic error",
err: errors.New("generic error"),
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := isTransientError(tt.err)
assert.Equal(t, tt.expected, result)
})
}
}
+2 -118
View File
@@ -480,16 +480,6 @@ func (b *APIBuilder) authorizeConnectionSubresource(ctx context.Context, a autho
Namespace: a.GetNamespace(),
}, ""))
// Repositories is read-only
case "repositories":
return toAuthorizerDecision(b.accessWithAdmin.Check(ctx, authlib.CheckRequest{
Verb: apiutils.VerbGet,
Group: provisioning.GROUP,
Resource: provisioning.ConnectionResourceInfo.GetName(),
Name: a.GetName(),
Namespace: a.GetNamespace(),
}, ""))
default:
id, err := identity.GetRequester(ctx)
if err != nil {
@@ -559,22 +549,6 @@ func (b *APIBuilder) InstallSchema(scheme *runtime.Scheme) error {
return err
}
// Register custom field label conversion for Repository to enable field selectors like spec.connection.name
err = scheme.AddFieldLabelConversionFunc(
provisioning.SchemeGroupVersion.WithKind("Repository"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", "metadata.namespace", "spec.connection.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported for Repository: %s", label)
}
},
)
if err != nil {
return err
}
metav1.AddToGroupVersion(scheme, provisioning.SchemeGroupVersion)
// Only 1 version (for now?)
return scheme.SetVersionPriority(provisioning.SchemeGroupVersion)
@@ -585,19 +559,10 @@ func (b *APIBuilder) AllowedV0Alpha1Resources() []string {
}
func (b *APIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupInfo, opts builder.APIGroupOptions) error {
// Create repository storage with custom field selectors (e.g., spec.connection.name)
repositoryStorage, err := grafanaregistry.NewRegistryStoreWithSelectableFields(
opts.Scheme,
provisioning.RepositoryResourceInfo,
opts.OptsGetter,
grafanaregistry.SelectableFieldsOptions{
GetAttrs: RepositoryGetAttrs,
},
)
repositoryStorage, err := grafanaregistry.NewRegistryStore(opts.Scheme, provisioning.RepositoryResourceInfo, opts.OptsGetter)
if err != nil {
return fmt.Errorf("failed to create repository storage: %w", err)
}
repositoryStatusStorage := grafanaregistry.NewRegistryStatusStore(opts.Scheme, repositoryStorage)
b.store = repositoryStorage
@@ -638,7 +603,6 @@ func (b *APIBuilder) UpdateAPIGroupInfo(apiGroupInfo *genericapiserver.APIGroupI
storage[provisioning.ConnectionResourceInfo.StoragePath()] = connectionsStore
storage[provisioning.ConnectionResourceInfo.StoragePath("status")] = connectionStatusStorage
storage[provisioning.ConnectionResourceInfo.StoragePath("repositories")] = NewConnectionRepositoriesConnector()
// TODO: Add some logic so that the connectors can registered themselves and we don't have logic all over the place
storage[provisioning.RepositoryResourceInfo.StoragePath("test")] = NewTestConnector(b, repository.NewRepositoryTesterWithExistingChecker(repository.NewSimpleRepositoryTester(b.validator), b.VerifyAgainstExistingRepositories))
@@ -685,12 +649,6 @@ func (b *APIBuilder) Mutate(ctx context.Context, a admission.Attributes, o admis
// TODO: complete this as part of https://github.com/grafana/git-ui-sync-project/issues/700
c, ok := obj.(*provisioning.Connection)
if ok {
// Add finalizer on create to prevent deletion while repositories reference it
if len(c.Finalizers) == 0 && a.GetOperation() == admission.Create {
c.Finalizers = []string{
connectionvalidation.BlockDeletionFinalizer,
}
}
return connectionvalidation.MutateConnection(c)
}
@@ -726,13 +684,7 @@ func (b *APIBuilder) Mutate(ctx context.Context, a admission.Attributes, o admis
// TODO: move logic to a more appropriate place. Probably controller/validation.go
func (b *APIBuilder) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) (err error) {
obj := a.GetObject()
// Handle Connection deletion - check for connected repositories
if a.GetOperation() == admission.Delete {
return b.validateDelete(ctx, a)
}
if obj == nil || a.GetOperation() == admission.Connect {
if obj == nil || a.GetOperation() == admission.Connect || a.GetOperation() == admission.Delete {
return nil // This is normal for sub-resource
}
@@ -812,42 +764,6 @@ func invalidRepositoryError(name string, list field.ErrorList) error {
name, list)
}
// validateDelete handles validation for delete operations
func (b *APIBuilder) validateDelete(ctx context.Context, a admission.Attributes) error {
// Only validate Connection deletions
if a.GetResource().Resource != "connections" {
return nil
}
connectionName := a.GetName()
namespace := a.GetNamespace()
// Set namespace in context for the repository store query
ctx, _, err := identity.WithProvisioningIdentity(ctx, namespace)
if err != nil {
return apierrors.NewInternalError(fmt.Errorf("failed to set provisioning identity: %w", err))
}
repos, err := GetRepositoriesByConnection(ctx, b.store, connectionName)
if err != nil {
return apierrors.NewInternalError(fmt.Errorf("failed to check for connected repositories: %w", err))
}
if len(repos) > 0 {
repoNames := make([]string, 0, len(repos))
for _, repo := range repos {
repoNames = append(repoNames, repo.Name)
}
return apierrors.NewForbidden(
provisioning.ConnectionResourceInfo.GroupResource(),
connectionName,
fmt.Errorf("cannot delete connection while repositories are using it: %s", strings.Join(repoNames, ", ")),
)
}
return nil
}
func (b *APIBuilder) VerifyAgainstExistingRepositories(ctx context.Context, cfg *provisioning.Repository) *field.Error {
return VerifyAgainstExistingRepositories(ctx, b.store, cfg)
}
@@ -890,10 +806,8 @@ func (b *APIBuilder) GetPostStartHooks() (map[string]genericapiserver.PostStartH
sharedInformerFactory := informers.NewSharedInformerFactory(c, 60*time.Second)
repoInformer := sharedInformerFactory.Provisioning().V0alpha1().Repositories()
jobInformer := sharedInformerFactory.Provisioning().V0alpha1().Jobs()
connInformer := sharedInformerFactory.Provisioning().V0alpha1().Connections()
go repoInformer.Informer().Run(postStartHookCtx.Done())
go jobInformer.Informer().Run(postStartHookCtx.Done())
go connInformer.Informer().Run(postStartHookCtx.Done())
// Create the repository resources factory
repositoryListerWrapper := func(ctx context.Context) ([]provisioning.Repository, error) {
@@ -1014,19 +928,6 @@ func (b *APIBuilder) GetPostStartHooks() (map[string]genericapiserver.PostStartH
go repoController.Run(postStartHookCtx.Context, repoControllerWorkers)
// Create and run connection controller
connStatusPatcher := appcontroller.NewConnectionStatusPatcher(b.GetClient())
connController, err := controller.NewConnectionController(
b.GetClient(),
connInformer,
connStatusPatcher,
b.store,
)
if err != nil {
return err
}
go connController.Run(postStartHookCtx.Context, repoControllerWorkers)
// If Loki not used, initialize the API client-based history writer and start the controller for history jobs
if b.jobHistoryLoki == nil {
// Create HistoryJobController for cleanup of old job history entries
@@ -1346,23 +1247,6 @@ spec:
oas.Paths.Paths[repoprefix+"/jobs/{uid}"] = sub
}
// Document connection repositories endpoint
connectionprefix := root + "namespaces/{namespace}/connections/{name}"
sub = oas.Paths.Paths[connectionprefix+"/repositories"]
if sub != nil {
sub.Get.Description = "List repositories available from the external git provider through this connection"
sub.Get.Summary = "List external repositories"
sub.Get.Parameters = []*spec3.Parameter{}
sub.Post = nil
sub.Put = nil
sub.Delete = nil
// Replace the content type for this response
mt := sub.Get.Responses.StatusCodeResponses[200].Content
s := defs[defsBase+"ExternalRepositoryList"].Schema
mt["*/*"].Schema = &s
}
// Run all extra post-processors.
for _, extra := range b.extras {
if err := extra.PostProcessOpenAPI(oas); err != nil {
@@ -1,44 +0,0 @@
package provisioning
import (
"fmt"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/generic"
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
)
// RepositoryToSelectableFields returns a field set that can be used for field selectors.
// This includes standard metadata fields plus custom fields like spec.connection.name.
func RepositoryToSelectableFields(obj *provisioning.Repository) fields.Set {
objectMetaFields := generic.ObjectMetaFieldsSet(&obj.ObjectMeta, true)
// Add custom selectable fields
specificFields := fields.Set{
"spec.connection.name": getConnectionName(obj),
}
return generic.MergeFieldsSets(objectMetaFields, specificFields)
}
// getConnectionName safely extracts the connection name from a Repository.
// Returns empty string if no connection is configured.
func getConnectionName(obj *provisioning.Repository) string {
if obj == nil || obj.Spec.Connection == nil {
return ""
}
return obj.Spec.Connection.Name
}
// RepositoryGetAttrs returns labels and fields of a Repository object.
// This is used by the storage layer for filtering.
func RepositoryGetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
repo, ok := obj.(*provisioning.Repository)
if !ok {
return nil, nil, fmt.Errorf("given object is not a Repository")
}
return labels.Set(repo.Labels), RepositoryToSelectableFields(repo), nil
}
@@ -1,184 +0,0 @@
package provisioning
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
)
func TestGetConnectionName(t *testing.T) {
tests := []struct {
name string
repo *provisioning.Repository
expected string
}{
{
name: "nil repository returns empty string",
repo: nil,
expected: "",
},
{
name: "repository without connection returns empty string",
repo: &provisioning.Repository{
Spec: provisioning.RepositorySpec{
Title: "test-repo",
},
},
expected: "",
},
{
name: "repository with connection returns connection name",
repo: &provisioning.Repository{
Spec: provisioning.RepositorySpec{
Title: "test-repo",
Connection: &provisioning.ConnectionInfo{
Name: "my-connection",
},
},
},
expected: "my-connection",
},
{
name: "repository with empty connection name returns empty string",
repo: &provisioning.Repository{
Spec: provisioning.RepositorySpec{
Title: "test-repo",
Connection: &provisioning.ConnectionInfo{
Name: "",
},
},
},
expected: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := getConnectionName(tt.repo)
assert.Equal(t, tt.expected, result)
})
}
}
func TestRepositoryToSelectableFields(t *testing.T) {
tests := []struct {
name string
repo *provisioning.Repository
expectedFields map[string]string
}{
{
name: "includes metadata.name and metadata.namespace",
repo: &provisioning.Repository{
ObjectMeta: metav1.ObjectMeta{
Name: "test-repo",
Namespace: "default",
},
Spec: provisioning.RepositorySpec{
Title: "Test Repository",
},
},
expectedFields: map[string]string{
"metadata.name": "test-repo",
"metadata.namespace": "default",
"spec.connection.name": "",
},
},
{
name: "includes spec.connection.name when set",
repo: &provisioning.Repository{
ObjectMeta: metav1.ObjectMeta{
Name: "repo-with-connection",
Namespace: "org-1",
},
Spec: provisioning.RepositorySpec{
Title: "Repo With Connection",
Connection: &provisioning.ConnectionInfo{
Name: "github-connection",
},
},
},
expectedFields: map[string]string{
"metadata.name": "repo-with-connection",
"metadata.namespace": "org-1",
"spec.connection.name": "github-connection",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fields := RepositoryToSelectableFields(tt.repo)
for key, expectedValue := range tt.expectedFields {
actualValue, exists := fields[key]
assert.True(t, exists, "field %s should exist", key)
assert.Equal(t, expectedValue, actualValue, "field %s should have correct value", key)
}
})
}
}
func TestRepositoryGetAttrs(t *testing.T) {
t.Run("returns error for non-Repository object", func(t *testing.T) {
// Pass a different runtime.Object type instead of a Repository
connection := &provisioning.Connection{
ObjectMeta: metav1.ObjectMeta{
Name: "not-a-repository",
},
}
_, _, err := RepositoryGetAttrs(connection)
require.Error(t, err)
assert.Contains(t, err.Error(), "not a Repository")
})
t.Run("returns labels and fields for valid Repository", func(t *testing.T) {
repo := &provisioning.Repository{
ObjectMeta: metav1.ObjectMeta{
Name: "test-repo",
Namespace: "default",
Labels: map[string]string{
"app": "grafana",
"env": "test",
},
},
Spec: provisioning.RepositorySpec{
Title: "Test Repository",
Connection: &provisioning.ConnectionInfo{
Name: "my-connection",
},
},
}
labels, fields, err := RepositoryGetAttrs(repo)
require.NoError(t, err)
// Check labels
assert.Equal(t, "grafana", labels["app"])
assert.Equal(t, "test", labels["env"])
// Check fields
assert.Equal(t, "test-repo", fields["metadata.name"])
assert.Equal(t, "default", fields["metadata.namespace"])
assert.Equal(t, "my-connection", fields["spec.connection.name"])
})
t.Run("returns empty connection name when not set", func(t *testing.T) {
repo := &provisioning.Repository{
ObjectMeta: metav1.ObjectMeta{
Name: "test-repo",
Namespace: "default",
},
Spec: provisioning.RepositorySpec{
Title: "Test Repository",
},
}
_, fields, err := RepositoryGetAttrs(repo)
require.NoError(t, err)
assert.Equal(t, "", fields["spec.connection.name"])
})
}
@@ -7,7 +7,6 @@ import (
"strings"
"k8s.io/apimachinery/pkg/apis/meta/internalversion"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/endpoints/request"
@@ -51,39 +50,6 @@ func GetRepositoriesInNamespace(ctx context.Context, store RepositoryLister) ([]
return allRepositories, nil
}
// GetRepositoriesByConnection retrieves all repositories that reference a specific connection
func GetRepositoriesByConnection(ctx context.Context, store RepositoryLister, connectionName string) ([]provisioning.Repository, error) {
var allRepositories []provisioning.Repository
continueToken := ""
fieldSelector := fields.OneTermEqualSelector("spec.connection.name", connectionName)
for {
obj, err := store.List(ctx, &internalversion.ListOptions{
Limit: 100,
Continue: continueToken,
FieldSelector: fieldSelector,
})
if err != nil {
return nil, err
}
repositoryList, ok := obj.(*provisioning.RepositoryList)
if !ok {
return nil, fmt.Errorf("expected repository list")
}
allRepositories = append(allRepositories, repositoryList.Items...)
continueToken = repositoryList.GetContinue()
if continueToken == "" {
break
}
}
return allRepositories, nil
}
// VerifyAgainstExistingRepositories validates a repository configuration against existing repositories
func VerifyAgainstExistingRepositories(ctx context.Context, store RepositoryLister, cfg *provisioning.Repository) *field.Error {
ctx, _, err := identity.WithProvisioningIdentity(ctx, cfg.Namespace)
@@ -1,200 +0,0 @@
package provisioning
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
provisioning "github.com/grafana/grafana/apps/provisioning/pkg/apis/provisioning/v0alpha1"
)
// mockRepositoryLister is a mock implementation of RepositoryLister for testing
type mockRepositoryLister struct {
repositories []provisioning.Repository
listErr error
// Track the field selector used in List calls
lastFieldSelector fields.Selector
}
func (m *mockRepositoryLister) List(ctx context.Context, options *internalversion.ListOptions) (runtime.Object, error) {
if m.listErr != nil {
return nil, m.listErr
}
// Store the field selector for verification
m.lastFieldSelector = options.FieldSelector
// Filter repositories based on field selector if present
filteredRepos := m.repositories
if options.FieldSelector != nil && !options.FieldSelector.Empty() {
filteredRepos = make([]provisioning.Repository, 0)
for _, repo := range m.repositories {
// Simulate field selector matching for spec.connection.name
repoFields := fields.Set{
"spec.connection.name": getRepoConnectionName(&repo),
}
if options.FieldSelector.Matches(repoFields) {
filteredRepos = append(filteredRepos, repo)
}
}
}
return &provisioning.RepositoryList{
Items: filteredRepos,
}, nil
}
func getRepoConnectionName(repo *provisioning.Repository) string {
if repo.Spec.Connection == nil {
return ""
}
return repo.Spec.Connection.Name
}
func TestGetRepositoriesByConnection(t *testing.T) {
tests := []struct {
name string
repositories []provisioning.Repository
connectionName string
expectedCount int
expectedNames []string
expectedErr bool
}{
{
name: "empty repository list returns empty",
repositories: []provisioning.Repository{},
connectionName: "test-conn",
expectedCount: 0,
expectedNames: []string{},
},
{
name: "finds single matching repository",
repositories: []provisioning.Repository{
{
ObjectMeta: metav1.ObjectMeta{Name: "repo-1"},
Spec: provisioning.RepositorySpec{
Connection: &provisioning.ConnectionInfo{Name: "conn-a"},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "repo-2"},
Spec: provisioning.RepositorySpec{
Connection: &provisioning.ConnectionInfo{Name: "conn-b"},
},
},
},
connectionName: "conn-a",
expectedCount: 1,
expectedNames: []string{"repo-1"},
},
{
name: "finds multiple matching repositories",
repositories: []provisioning.Repository{
{
ObjectMeta: metav1.ObjectMeta{Name: "repo-1"},
Spec: provisioning.RepositorySpec{
Connection: &provisioning.ConnectionInfo{Name: "shared-conn"},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "repo-2"},
Spec: provisioning.RepositorySpec{
Connection: &provisioning.ConnectionInfo{Name: "shared-conn"},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "repo-3"},
Spec: provisioning.RepositorySpec{
Connection: &provisioning.ConnectionInfo{Name: "different-conn"},
},
},
},
connectionName: "shared-conn",
expectedCount: 2,
expectedNames: []string{"repo-1", "repo-2"},
},
{
name: "no matches returns empty list",
repositories: []provisioning.Repository{
{
ObjectMeta: metav1.ObjectMeta{Name: "repo-1"},
Spec: provisioning.RepositorySpec{
Connection: &provisioning.ConnectionInfo{Name: "conn-a"},
},
},
},
connectionName: "non-existent",
expectedCount: 0,
expectedNames: []string{},
},
{
name: "empty connection name matches repos without connection",
repositories: []provisioning.Repository{
{
ObjectMeta: metav1.ObjectMeta{Name: "repo-with-conn"},
Spec: provisioning.RepositorySpec{
Connection: &provisioning.ConnectionInfo{Name: "some-conn"},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "repo-without-conn"},
Spec: provisioning.RepositorySpec{
Connection: nil,
},
},
},
connectionName: "",
expectedCount: 1,
expectedNames: []string{"repo-without-conn"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mock := &mockRepositoryLister{repositories: tt.repositories}
ctx := context.Background()
repos, err := GetRepositoriesByConnection(ctx, mock, tt.connectionName)
if tt.expectedErr {
require.Error(t, err)
return
}
require.NoError(t, err)
assert.Len(t, repos, tt.expectedCount)
// Verify the field selector was used
require.NotNil(t, mock.lastFieldSelector, "field selector should have been set")
expectedSelector := fields.OneTermEqualSelector("spec.connection.name", tt.connectionName)
assert.Equal(t, expectedSelector.String(), mock.lastFieldSelector.String())
// Verify the correct repositories were returned
actualNames := make([]string, len(repos))
for i, repo := range repos {
actualNames[i] = repo.Name
}
for _, expectedName := range tt.expectedNames {
assert.Contains(t, actualNames, expectedName)
}
})
}
}
func TestGetRepositoriesByConnection_ListError(t *testing.T) {
mock := &mockRepositoryLister{
listErr: assert.AnError,
}
ctx := context.Background()
repos, err := GetRepositoriesByConnection(ctx, mock, "any-conn")
require.Error(t, err)
assert.Nil(t, repos)
}
@@ -4,8 +4,6 @@ import (
"context"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/grafana/grafana/pkg/apimachinery/identity"
"github.com/grafana/grafana/pkg/infra/db"
@@ -437,11 +435,6 @@ func anonymousRoleBindingsCollector(cfg *setting.Cfg, store db.DB) legacyTupleCo
func zanzanaCollector(relations []string) zanzanaTupleCollector {
return func(ctx context.Context, client zanzana.Client, object string, namespace string) (map[string]*openfgav1.TupleKey, error) {
ctx, span := tracer.Start(ctx, "accesscontrol.dualwrite.resourceReconciler.zanzanaTupleCollector",
trace.WithAttributes(attribute.String("namespace", namespace)),
)
defer span.End()
// list will use continuation token to collect all tuples for object and relation
list := func(relation string) ([]*openfgav1.Tuple, error) {
first, err := client.Read(ctx, &authzextv1.ReadRequest{
@@ -6,8 +6,6 @@ import (
"strings"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
claims "github.com/grafana/authlib/types"
@@ -50,12 +48,6 @@ func newResourceReconciler(name string, legacy legacyTupleCollector, zanzanaColl
}
func (r resourceReconciler) reconcile(ctx context.Context, namespace string) error {
ctx, span := tracer.Start(ctx, "accesscontrol.dualwrite.resourceReconciler.reconcile",
trace.WithAttributes(attribute.String("namespace", namespace)),
trace.WithAttributes(attribute.String("reconciler", r.name)),
)
defer span.End()
info, err := claims.ParseNamespace(namespace)
if err != nil {
return err
@@ -71,12 +63,7 @@ func (r resourceReconciler) reconcile(ctx context.Context, namespace string) err
}
// 1. Fetch grafana resources stored in grafana db.
legacyCtx, legacySpan := tracer.Start(ctx, "accesscontrol.dualwrite.resourceReconciler.legacyCollector",
trace.WithAttributes(attribute.String("namespace", namespace)),
trace.WithAttributes(attribute.String("reconciler", r.name)),
)
res, err := r.legacy(legacyCtx, info.OrgID)
legacySpan.End()
res, err := r.legacy(ctx, info.OrgID)
if err != nil {
return fmt.Errorf("failed to collect legacy tuples for %s: %w", r.name, err)
}
@@ -224,12 +211,6 @@ func (r resourceReconciler) collectOrphanDeletes(
}
func (r resourceReconciler) readAllTuples(ctx context.Context, namespace string) ([]*authzextv1.Tuple, error) {
ctx, span := tracer.Start(ctx, "accesscontrol.dualwrite.resourceReconciler.zanzana.readAllTuples",
trace.WithAttributes(attribute.String("namespace", namespace)),
trace.WithAttributes(attribute.String("reconciler", r.name)),
)
defer span.End()
var (
out []*authzextv1.Tuple
continueToken string
+21
View File
@@ -182,6 +182,25 @@ func newFolderTranslation() translation {
return folderTranslation
}
func newExternalGroupMappingTranslation() translation {
return translation{
resource: "teams.permissions",
attribute: "uid",
verbMapping: map[string]string{
utils.VerbGet: "teams.permissions:read",
utils.VerbList: "teams.permissions:read",
utils.VerbWatch: "teams.permissions:read",
utils.VerbCreate: "teams.permissions:write",
utils.VerbUpdate: "teams.permissions:write",
utils.VerbPatch: "teams.permissions:write",
utils.VerbDelete: "teams.permissions:write",
utils.VerbGetPermissions: "teams.permissions:write",
utils.VerbSetPermissions: "teams.permissions:write",
},
folderSupport: false,
}
}
func NewMapperRegistry() MapperRegistry {
skipScopeOnAllVerbs := map[string]bool{
utils.VerbCreate: true,
@@ -210,6 +229,8 @@ func NewMapperRegistry() MapperRegistry {
"serviceaccounts": newResourceTranslation("serviceaccounts", "uid", false, map[string]bool{utils.VerbCreate: true}),
// Teams is a special case. We translate user permissions from id to uid based.
"teams": newResourceTranslation("teams", "uid", false, map[string]bool{utils.VerbCreate: true}),
// ExternalGroupMappings is a special case. We translate team permissions from id to uid based.
"externalgroupmappings": newExternalGroupMappingTranslation(),
"coreroles": translation{
resource: "roles",
attribute: "uid",
+2 -2
View File
@@ -90,7 +90,7 @@ func ProvideZanzanaClient(cfg *setting.Cfg, db db.DB, tracer tracing.Tracer, fea
authzv1.RegisterAuthzServiceServer(channel, srv)
authzextv1.RegisterAuthzExtentionServiceServer(channel, srv)
client, err := zClient.New(channel, reg)
client, err := zClient.New(channel)
if err != nil {
return nil, fmt.Errorf("failed to initialize zanzana client: %w", err)
}
@@ -169,7 +169,7 @@ func NewRemoteZanzanaClient(cfg ZanzanaClientConfig, reg prometheus.Registerer)
return nil, fmt.Errorf("failed to create zanzana client to remote server: %w", err)
}
client, err := zClient.New(conn, reg)
client, err := zClient.New(conn)
if err != nil {
return nil, fmt.Errorf("failed to initialize zanzana client: %w", err)
}
+1 -22
View File
@@ -9,7 +9,6 @@ import (
authzlib "github.com/grafana/authlib/authz"
authzv1 "github.com/grafana/authlib/authz/proto/v1"
authlib "github.com/grafana/authlib/types"
"github.com/prometheus/client_golang/prometheus"
"github.com/grafana/grafana/pkg/infra/log"
authzextv1 "github.com/grafana/grafana/pkg/services/authz/proto/v1"
@@ -26,17 +25,15 @@ type Client struct {
authz authzv1.AuthzServiceClient
authzext authzextv1.AuthzExtentionServiceClient
authzlibclient *authzlib.ClientImpl
metrics *clientMetrics
}
func New(cc grpc.ClientConnInterface, reg prometheus.Registerer) (*Client, error) {
func New(cc grpc.ClientConnInterface) (*Client, error) {
authzlibclient := authzlib.NewClient(cc, authzlib.WithTracerClientOption(tracer))
c := &Client{
authzlibclient: authzlibclient,
authz: authzv1.NewAuthzServiceClient(cc),
authzext: authzextv1.NewAuthzExtentionServiceClient(cc),
logger: log.New("zanzana.client"),
metrics: newClientMetrics(reg),
}
return c, nil
@@ -46,9 +43,6 @@ func (c *Client) Check(ctx context.Context, id authlib.AuthInfo, req authlib.Che
ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Check")
defer span.End()
timer := prometheus.NewTimer(c.metrics.requestDurationSeconds.WithLabelValues("Check", req.Namespace))
defer timer.ObserveDuration()
return c.authzlibclient.Check(ctx, id, req, folder)
}
@@ -56,9 +50,6 @@ func (c *Client) Compile(ctx context.Context, id authlib.AuthInfo, req authlib.L
ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Compile")
defer span.End()
timer := prometheus.NewTimer(c.metrics.requestDurationSeconds.WithLabelValues("Compile", req.Namespace))
defer timer.ObserveDuration()
return c.authzlibclient.Compile(ctx, id, req)
}
@@ -73,9 +64,6 @@ func (c *Client) Write(ctx context.Context, req *authzextv1.WriteRequest) error
ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Write")
defer span.End()
timer := prometheus.NewTimer(c.metrics.requestDurationSeconds.WithLabelValues("Write", req.Namespace))
defer timer.ObserveDuration()
_, err := c.authzext.Write(ctx, req)
return err
}
@@ -84,9 +72,6 @@ func (c *Client) BatchCheck(ctx context.Context, req *authzextv1.BatchCheckReque
ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Check")
defer span.End()
timer := prometheus.NewTimer(c.metrics.requestDurationSeconds.WithLabelValues("BatchCheck", req.Namespace))
defer timer.ObserveDuration()
return c.authzext.BatchCheck(ctx, req)
}
@@ -102,9 +87,6 @@ func (c *Client) Mutate(ctx context.Context, req *authzextv1.MutateRequest) erro
ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Mutate")
defer span.End()
timer := prometheus.NewTimer(c.metrics.requestDurationSeconds.WithLabelValues("Mutate", req.Namespace))
defer timer.ObserveDuration()
_, err := c.authzext.Mutate(ctx, req)
return err
}
@@ -113,8 +95,5 @@ func (c *Client) Query(ctx context.Context, req *authzextv1.QueryRequest) (*auth
ctx, span := tracer.Start(ctx, "authlib.zanzana.client.Query")
defer span.End()
timer := prometheus.NewTimer(c.metrics.requestDurationSeconds.WithLabelValues("Query", req.Namespace))
defer timer.ObserveDuration()
return c.authzext.Query(ctx, req)
}
+4 -24
View File
@@ -7,10 +7,10 @@ import (
const (
metricsNamespace = "iam"
metricsSubSystem = "authz_zanzana_client"
metricsSubSystem = "authz_zanzana"
)
type shadowClientMetrics struct {
type metrics struct {
// evaluationsSeconds is a summary for evaluating access for a specific engine (RBAC and zanzana)
evaluationsSeconds *prometheus.HistogramVec
// compileSeconds is a summary for compiling item checker for a specific engine (RBAC and zanzana)
@@ -19,13 +19,8 @@ type shadowClientMetrics struct {
evaluationStatusTotal *prometheus.CounterVec
}
type clientMetrics struct {
// requestDurationSeconds is a summary for zanzana client request duration
requestDurationSeconds *prometheus.HistogramVec
}
func newShadowClientMetrics(reg prometheus.Registerer) *shadowClientMetrics {
return &shadowClientMetrics{
func newShadowClientMetrics(reg prometheus.Registerer) *metrics {
return &metrics{
evaluationsSeconds: promauto.With(reg).NewHistogramVec(
prometheus.HistogramOpts{
Name: "engine_evaluations_seconds",
@@ -57,18 +52,3 @@ func newShadowClientMetrics(reg prometheus.Registerer) *shadowClientMetrics {
),
}
}
func newClientMetrics(reg prometheus.Registerer) *clientMetrics {
return &clientMetrics{
requestDurationSeconds: promauto.With(reg).NewHistogramVec(
prometheus.HistogramOpts{
Name: "request_duration_seconds",
Help: "Histogram for zanzana client request duration",
Namespace: metricsNamespace,
Subsystem: metricsSubSystem,
Buckets: prometheus.ExponentialBuckets(0.00001, 4, 10),
},
[]string{"method", "request_namespace"},
),
}
}
@@ -20,7 +20,7 @@ type ShadowClient struct {
logger log.Logger
accessClient authlib.AccessClient
zanzanaClient authlib.AccessClient
metrics *shadowClientMetrics
metrics *metrics
}
// WithShadowClient returns a new access client that runs zanzana checks in the background.
@@ -542,9 +542,6 @@ func (d *dashboardStore) saveDashboard(ctx context.Context, sess *db.Session, cm
tags := dash.GetTags()
if len(tags) > 0 {
for _, tag := range tags {
if len(tag) > 50 {
return nil, dashboards.ErrDashboardTagTooLong
}
if _, err := sess.Insert(dashboardTag{DashboardId: dash.ID, Term: tag, OrgID: dash.OrgID, DashboardUID: dash.UID}); err != nil {
return nil, err
}
-5
View File
@@ -79,11 +79,6 @@ var (
Reason: "message too long, max 500 characters",
StatusCode: 400,
}
ErrDashboardTagTooLong = dashboardaccess.DashboardErr{
Reason: "dashboard tag too long, max 50 characters",
StatusCode: 400,
Status: "tag-too-long",
}
ErrDashboardCannotSaveProvisionedDashboard = dashboardaccess.DashboardErr{
Reason: "Cannot save provisioned dashboard",
StatusCode: 400,
+7 -7
View File
@@ -322,13 +322,6 @@ var (
Owner: grafanaOperatorExperienceSquad,
RequiresRestart: true,
},
{
Name: "reportingCsvEncodingOptions",
Description: "Enables CSV encoding options in the reporting feature",
Stage: FeatureStageExperimental,
FrontendOnly: false,
Owner: grafanaOperatorExperienceSquad,
},
{
Name: "sseGroupByDatasource",
Description: "Send query to the same datasource in a single request when using server side expressions. The `cloudWatchBatchQueries` feature toggle should be enabled if this used with CloudWatch.",
@@ -1290,6 +1283,13 @@ var (
Owner: grafanaPartnerPluginsSquad,
Expression: "false",
},
{
Name: "unifiedHistory",
Description: "Displays the navigation history so the user can navigate back to previous pages",
Stage: FeatureStageExperimental,
Owner: grafanaFrontendSearchNavOrganise,
FrontendOnly: true,
},
{
// Remove this flag once Loki v4 is released and the min supported version is v3.0+,
// since users on v2.9 need it to disable the feature, as it doesn't work for them.
+1 -1
View File
@@ -43,7 +43,6 @@ configurableSchedulerTick,experimental,@grafana/alerting-squad,false,true,false
dashgpt,GA,@grafana/dashboards-squad,false,false,true
aiGeneratedDashboardChanges,experimental,@grafana/dashboards-squad,false,false,true
reportingRetries,preview,@grafana/grafana-operator-experience-squad,false,true,false
reportingCsvEncodingOptions,experimental,@grafana/grafana-operator-experience-squad,false,false,false
sseGroupByDatasource,experimental,@grafana/grafana-datasources-core-services,false,false,false
lokiRunQueriesInParallel,privatePreview,@grafana/observability-logs,false,false,false
externalServiceAccounts,preview,@grafana/identity-access-team,false,false,false
@@ -178,6 +177,7 @@ alertingAIAnalyzeCentralStateHistory,experimental,@grafana/alerting-squad,false,
alertingNotificationsStepMode,GA,@grafana/alerting-squad,false,false,true
unifiedStorageSearchUI,experimental,@grafana/search-and-storage,false,false,false
elasticsearchCrossClusterSearch,GA,@grafana/partner-datasources,false,false,false
unifiedHistory,experimental,@grafana/grafana-search-navigate-organise,false,false,true
lokiLabelNamesQueryApi,GA,@grafana/observability-logs,false,false,false
k8SFolderCounts,experimental,@grafana/search-and-storage,false,false,false
k8SFolderMove,experimental,@grafana/search-and-storage,false,false,false
1 Name Stage Owner requiresDevMode RequiresRestart FrontendOnly
43 dashgpt GA @grafana/dashboards-squad false false true
44 aiGeneratedDashboardChanges experimental @grafana/dashboards-squad false false true
45 reportingRetries preview @grafana/grafana-operator-experience-squad false true false
reportingCsvEncodingOptions experimental @grafana/grafana-operator-experience-squad false false false
46 sseGroupByDatasource experimental @grafana/grafana-datasources-core-services false false false
47 lokiRunQueriesInParallel privatePreview @grafana/observability-logs false false false
48 externalServiceAccounts preview @grafana/identity-access-team false false false
177 alertingNotificationsStepMode GA @grafana/alerting-squad false false true
178 unifiedStorageSearchUI experimental @grafana/search-and-storage false false false
179 elasticsearchCrossClusterSearch GA @grafana/partner-datasources false false false
180 unifiedHistory experimental @grafana/grafana-search-navigate-organise false false true
181 lokiLabelNamesQueryApi GA @grafana/observability-logs false false false
182 k8SFolderCounts experimental @grafana/search-and-storage false false false
183 k8SFolderMove experimental @grafana/search-and-storage false false false
-4
View File
@@ -135,10 +135,6 @@ const (
// Enables rendering retries for the reporting feature
FlagReportingRetries = "reportingRetries"
// FlagReportingCsvEncodingOptions
// Enables CSV encoding options in the reporting feature
FlagReportingCsvEncodingOptions = "reportingCsvEncodingOptions"
// FlagSseGroupByDatasource
// Send query to the same datasource in a single request when using server side expressions. The `cloudWatchBatchQueries` feature toggle should be enabled if this used with CloudWatch.
FlagSseGroupByDatasource = "sseGroupByDatasource"
+2 -18
View File
@@ -3137,18 +3137,6 @@
"hideFromDocs": true
}
},
{
"metadata": {
"name": "reportingCsvEncodingOptions",
"resourceVersion": "1766080709938",
"creationTimestamp": "2025-12-18T17:58:29Z"
},
"spec": {
"description": "Enables CSV encoding options in the reporting feature",
"stage": "experimental",
"codeowner": "@grafana/grafana-operator-experience-squad"
}
},
{
"metadata": {
"name": "reportingRetries",
@@ -3584,12 +3572,8 @@
{
"metadata": {
"name": "unifiedHistory",
"resourceVersion": "1762958248290",
"creationTimestamp": "2024-12-13T10:41:18Z",
"deletionTimestamp": "2025-11-13T16:25:53Z",
"annotations": {
"grafana.app/updatedTimestamp": "2025-11-12 14:37:28.29086 +0000 UTC"
}
"resourceVersion": "1764664939750",
"creationTimestamp": "2024-12-13T10:41:18Z"
},
"spec": {
"description": "Displays the navigation history so the user can navigate back to previous pages",
-6
View File
@@ -501,15 +501,9 @@ type GetLibraryElementsParams struct {
// required:false
ExcludeUID string `json:"excludeUid"`
// A comma separated list of folder ID(s) to filter the elements by.
// Deprecated: Use FolderFilterUIDs instead.
// in:query
// required:false
// deprecated:true
FolderFilter string `json:"folderFilter"`
// A comma separated list of folder UID(s) to filter the elements by.
// in:query
// required:false
FolderFilterUIDs string `json:"folderFilterUIDs"`
// The number of results per page.
// in:query
// required:false
+1 -1
View File
@@ -440,7 +440,7 @@ func (s *ServiceImpl) buildAlertNavLinks(c *contextmodel.ReqContext) *navtree.Na
if s.features.IsEnabled(c.Req.Context(), featuremgmt.FlagAlertingTriage) {
if hasAccess(ac.EvalAny(ac.EvalPermission(ac.ActionAlertingRuleRead), ac.EvalPermission(ac.ActionAlertingRuleExternalRead))) {
alertChildNavs = append(alertChildNavs, &navtree.NavLink{
Text: "Alert activity", SubTitle: "Visualize active and pending alerts", Id: "alert-alerts", Url: s.cfg.AppSubURL + "/alerting/alerts", Icon: "bell", IsNew: true,
Text: "Alerts", SubTitle: "Visualize active and pending alerts", Id: "alert-alerts", Url: s.cfg.AppSubURL + "/alerting/alerts", Icon: "bell", IsNew: true,
})
}
}
-4
View File
@@ -20,10 +20,6 @@ func UpdatePreferencesFor(ctx context.Context,
return response.Error(http.StatusBadRequest, "Invalid theme", nil)
}
if !pref.IsValidTimezone(dtoCmd.Timezone) {
return response.Error(http.StatusBadRequest, "Invalid timezone. Must be a valid IANA timezone (e.g., America/New_York), 'utc', 'browser', or empty string", nil)
}
// convert dashboard UID to ID in order to store internally if it exists in the query, otherwise take the id from query
// nolint:staticcheck
dashboardID := dtoCmd.HomeDashboardID
-21
View File
@@ -1,21 +0,0 @@
package pref
import (
"time"
)
// IsValidTimezone checks if the timezone string is valid.
// It accepts:
// - "" - uses default
// - "utc"
// - "browser"
// - Any valid IANA timezone (e.g., "America/New_York", "Europe/London")
func IsValidTimezone(timezone string) bool {
if timezone == "" || timezone == "utc" || timezone == "browser" {
return true
}
// try to load as IANA timezone
_, err := time.LoadLocation(timezone)
return err == nil
}
-38
View File
@@ -1,38 +0,0 @@
package pref
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestIsValidTimezone(t *testing.T) {
tests := []struct {
timezone string
valid bool
}{
{
timezone: "utc",
valid: true,
},
{
timezone: "browser",
valid: true,
},
{
timezone: "Europe/London",
valid: true,
},
{
timezone: "invalid",
valid: false,
},
{
timezone: "",
valid: true,
},
}
for _, test := range tests {
assert.Equal(t, test.valid, IsValidTimezone(test.timezone))
}
}
-2
View File
@@ -637,8 +637,6 @@ type UnifiedStorageConfig struct {
// EnableMigration indicates whether migration is enabled for the resource.
// If not set, will use the default from MigratedUnifiedResources.
EnableMigration bool
// AutoMigrationThreshold is the threshold below which a resource is automatically migrated.
AutoMigrationThreshold int
}
type InstallPlugin struct {
-3
View File
@@ -36,8 +36,6 @@ type SecretsManagerSettings struct {
// How long to wait for the process to clean up a secure value to complete.
GCWorkerPerSecureValueCleanupTimeout time.Duration
// Whether to register the MT CRUD API
RegisterAPIServer bool
// Whether to create the MT secrets management database
RunSecretsDBMigrations bool
// Whether to run the data key id migration. Requires that RunSecretsDBMigrations is also true.
@@ -66,7 +64,6 @@ func (cfg *Cfg) readSecretsManagerSettings() {
cfg.SecretsManagement.GCWorkerPollInterval = secretsMgmt.Key("gc_worker_poll_interval").MustDuration(1 * time.Minute)
cfg.SecretsManagement.GCWorkerPerSecureValueCleanupTimeout = secretsMgmt.Key("gc_worker_per_request_timeout").MustDuration(5 * time.Second)
cfg.SecretsManagement.RegisterAPIServer = secretsMgmt.Key("register_api_server").MustBool(true)
cfg.SecretsManagement.RunSecretsDBMigrations = secretsMgmt.Key("run_secrets_db_migrations").MustBool(true)
cfg.SecretsManagement.RunDataKeyMigration = secretsMgmt.Key("run_data_key_migration").MustBool(true)
@@ -171,28 +171,6 @@ domain = example.com
assert.Empty(t, cfg.SecretsManagement.ConfiguredKMSProviders)
})
t.Run("should handle configuration with register_api_server disabled", func(t *testing.T) {
iniContent := `
[secrets_manager]
register_api_server = false
`
cfg, err := NewCfgFromBytes([]byte(iniContent))
require.NoError(t, err)
assert.False(t, cfg.SecretsManagement.RegisterAPIServer)
})
t.Run("should handle configuration without register_api_server set", func(t *testing.T) {
iniContent := `
[secrets_manager]
encryption_provider = aws_kms
`
cfg, err := NewCfgFromBytes([]byte(iniContent))
require.NoError(t, err)
assert.True(t, cfg.SecretsManagement.RegisterAPIServer)
})
t.Run("should handle configuration with run_secrets_db_migrations disabled", func(t *testing.T) {
iniContent := `
[secrets_manager]
+4 -44
View File
@@ -8,10 +8,6 @@ import (
"github.com/grafana/grafana/pkg/util/osutil"
)
// DefaultAutoMigrationThreshold is the default threshold for auto migration switching.
// If a resource has entries at or below this count, it will be migrated.
const DefaultAutoMigrationThreshold = 10
const (
PlaylistResource = "playlists.playlist.grafana.app"
FolderResource = "folders.folder.grafana.app"
@@ -25,13 +21,6 @@ var MigratedUnifiedResources = map[string]bool{
DashboardResource: false,
}
// AutoMigratedUnifiedResources maps resources that support auto-migration
// TODO: remove this before Grafana 13 GA: https://github.com/grafana/search-and-storage-team/issues/613
var AutoMigratedUnifiedResources = map[string]bool{
FolderResource: true,
DashboardResource: true,
}
// read storage configs from ini file. They look like:
// [unified_storage.<group>.<resource>]
// <field> = <value>
@@ -70,13 +59,6 @@ func (cfg *Cfg) setUnifiedStorageConfig() {
enableMigration = section.Key("enableMigration").MustBool(MigratedUnifiedResources[resourceName])
}
// parse autoMigrationThreshold from resource section
autoMigrationThreshold := 0
autoMigrate := AutoMigratedUnifiedResources[resourceName]
if autoMigrate {
autoMigrationThreshold = section.Key("autoMigrationThreshold").MustInt(DefaultAutoMigrationThreshold)
}
storageConfig[resourceName] = UnifiedStorageConfig{
DualWriterMode: rest.DualWriterMode(dualWriterMode),
DualWriterPeriodicDataSyncJobEnabled: dualWriterPeriodicDataSyncJobEnabled,
@@ -84,7 +66,6 @@ func (cfg *Cfg) setUnifiedStorageConfig() {
DataSyncerRecordsLimit: dataSyncerRecordsLimit,
DataSyncerInterval: dataSyncerInterval,
EnableMigration: enableMigration,
AutoMigrationThreshold: autoMigrationThreshold,
}
}
cfg.UnifiedStorage = storageConfig
@@ -92,13 +73,13 @@ func (cfg *Cfg) setUnifiedStorageConfig() {
// Set indexer config for unified storage
section := cfg.Raw.Section("unified_storage")
cfg.DisableDataMigrations = section.Key("disable_data_migrations").MustBool(false)
if !cfg.DisableDataMigrations && cfg.UnifiedStorageType() == "unified" {
if !cfg.DisableDataMigrations && cfg.getUnifiedStorageType() == "unified" {
// Helper log to find instances running migrations in the future
cfg.Logger.Info("Unified migration configs enforced")
cfg.enforceMigrationToUnifiedConfigs()
} else {
// Helper log to find instances disabling migration
cfg.Logger.Info("Unified migration configs enforcement disabled", "storage_type", cfg.UnifiedStorageType(), "disable_data_migrations", cfg.DisableDataMigrations)
cfg.Logger.Info("Unified migration configs enforcement disabled", "storage_type", cfg.getUnifiedStorageType(), "disable_data_migrations", cfg.DisableDataMigrations)
}
cfg.EnableSearch = section.Key("enable_search").MustBool(false)
cfg.MaxPageSizeBytes = section.Key("max_page_size_bytes").MustInt(0)
@@ -166,15 +147,14 @@ func (cfg *Cfg) enforceMigrationToUnifiedConfigs() {
DualWriterMode: 5,
DualWriterMigrationDataSyncDisabled: true,
EnableMigration: true,
AutoMigrationThreshold: resourceCfg.AutoMigrationThreshold,
}
}
}
// UnifiedStorageType returns the configured storage type without creating or mutating keys.
// getUnifiedStorageType returns the configured storage type without creating or mutating keys.
// Precedence: env > ini > default ("unified").
// Used to decide unified storage behavior early without side effects.
func (cfg *Cfg) UnifiedStorageType() string {
func (cfg *Cfg) getUnifiedStorageType() string {
const (
grafanaAPIServerSectionName = "grafana-apiserver"
storageTypeKeyName = "storage_type"
@@ -188,23 +168,3 @@ func (cfg *Cfg) UnifiedStorageType() string {
}
return defaultStorageType
}
// UnifiedStorageConfig returns the UnifiedStorageConfig for a resource.
func (cfg *Cfg) UnifiedStorageConfig(resource string) UnifiedStorageConfig {
if cfg.UnifiedStorage == nil {
return UnifiedStorageConfig{}
}
return cfg.UnifiedStorage[resource]
}
// EnableMode5 enables migration and sets mode 5 for a resource.
func (cfg *Cfg) EnableMode5(resource string) {
if cfg.UnifiedStorage == nil {
cfg.UnifiedStorage = make(map[string]UnifiedStorageConfig)
}
config := cfg.UnifiedStorage[resource]
config.DualWriterMode = rest.Mode5
config.DualWriterMigrationDataSyncDisabled = true
config.EnableMigration = true
cfg.UnifiedStorage[resource] = config
}
@@ -43,16 +43,10 @@ func TestCfg_setUnifiedStorageConfig(t *testing.T) {
}
assert.Equal(t, exists, true, migratedResource)
expectedThreshold := 0
if AutoMigratedUnifiedResources[migratedResource] {
expectedThreshold = DefaultAutoMigrationThreshold
}
assert.Equal(t, UnifiedStorageConfig{
DualWriterMode: 5,
DualWriterMigrationDataSyncDisabled: true,
EnableMigration: isEnabled,
AutoMigrationThreshold: expectedThreshold,
}, resourceCfg, migratedResource)
}
}
@@ -77,7 +71,6 @@ func TestCfg_setUnifiedStorageConfig(t *testing.T) {
DualWriterPeriodicDataSyncJobEnabled: true,
DataSyncerRecordsLimit: 1001,
DataSyncerInterval: time.Minute * 10,
AutoMigrationThreshold: 0,
})
validateMigratedResources(false)
@@ -214,18 +214,8 @@ func runMigrationTestSuite(t *testing.T, testCases []resourceMigratorTestCase) {
for _, state := range testStates {
t.Run(state.tc.name(), func(t *testing.T) {
shouldExist := true
for _, gvr := range state.tc.resources() {
resourceKey := fmt.Sprintf("%s.%s", gvr.Resource, gvr.Group)
// Resources exist if they're either:
// 1. In MigratedUnifiedResources (enabled by default), OR
// 2. In AutoMigratedUnifiedResources (auto-migrated because count is below threshold)
if !setting.MigratedUnifiedResources[resourceKey] && !setting.AutoMigratedUnifiedResources[resourceKey] {
shouldExist = false
break
}
}
state.tc.verify(t, helper, shouldExist)
// Verify resources now exist in unified storage after migration
state.tc.verify(t, helper, true)
})
}
@@ -280,7 +270,7 @@ const (
var migrationIDsToDefault = map[string]bool{
playlistsID: true,
foldersAndDashboardsID: true, // Auto-migrated when resource count is below threshold
foldersAndDashboardsID: false,
}
func verifyRegisteredMigrations(t *testing.T, helper *apis.K8sTestHelper, onlyDefault bool, optOut bool) {
@@ -10,11 +10,9 @@ import (
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/registry/apis/dashboard/legacy"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/grafana/grafana/pkg/storage/unified/resourcepb"
"github.com/grafana/grafana/pkg/util/xorm"
"github.com/grafana/grafana/pkg/util/xorm/core"
"k8s.io/apimachinery/pkg/runtime/schema"
)
@@ -33,20 +31,6 @@ type ResourceMigration struct {
migrationID string
validators []Validator // Optional: custom validation logic for this migration
log log.Logger
cfg *setting.Cfg
autoMigrate bool // If true, auto-migrate resource if count is below threshold
hadErrors bool // Tracks if errors occurred during migration (used with ignoreErrors)
}
// ResourceMigrationOption is a functional option for configuring ResourceMigration.
type ResourceMigrationOption func(*ResourceMigration)
// WithAutoMigrate configures the migration to auto-migrate resource if count is below threshold.
func WithAutoMigrate(cfg *setting.Cfg) ResourceMigrationOption {
return func(m *ResourceMigration) {
m.cfg = cfg
m.autoMigrate = true
}
}
// NewResourceMigration creates a new migration for the specified resources.
@@ -55,24 +39,14 @@ func NewResourceMigration(
resources []schema.GroupResource,
migrationID string,
validators []Validator,
opts ...ResourceMigrationOption,
) *ResourceMigration {
m := &ResourceMigration{
return &ResourceMigration{
migrator: migrator,
resources: resources,
migrationID: migrationID,
validators: validators,
log: log.New("storage.unified.resource_migration." + migrationID),
}
for _, opt := range opts {
opt(m)
}
return m
}
func (m *ResourceMigration) SkipMigrationLog() bool {
// Skip populating the log table if auto-migrate is enabled and errors occurred
return m.autoMigrate && m.hadErrors
}
var _ migrator.CodeMigration = (*ResourceMigration)(nil)
@@ -83,23 +57,7 @@ func (m *ResourceMigration) SQL(_ migrator.Dialect) string {
}
// Exec implements migrator.CodeMigration interface. Executes the migration across all organizations.
func (m *ResourceMigration) Exec(sess *xorm.Session, mg *migrator.Migrator) (err error) {
// Track any errors that occur during migration
defer func() {
if err != nil {
if m.autoMigrate {
m.log.Warn(
`[WARN] Resource migration failed and is currently skipped.
This migration will be enforced in the next major Grafana release, where failures will block startup or resource loading.
This warning is intended to help you detect and report issues early.
Please investigate the failure and report it to the Grafana team so it can be addressed before the next major release.`,
"error", err)
}
m.hadErrors = true
}
}()
func (m *ResourceMigration) Exec(sess *xorm.Session, mg *migrator.Migrator) error {
ctx := context.Background()
orgs, err := m.getAllOrgs(sess)
@@ -117,8 +75,7 @@ Please investigate the failure and report it to the Grafana team so it can be ad
if mg.Dialect.DriverName() == migrator.SQLite {
// reuse transaction in SQLite to avoid "database is locked" errors
var tx *core.Tx
tx, err = sess.Tx()
tx, err := sess.Tx()
if err != nil {
m.log.Error("Failed to get transaction from session", "error", err)
return fmt.Errorf("failed to get transaction: %w", err)
@@ -128,22 +85,12 @@ Please investigate the failure and report it to the Grafana team so it can be ad
}
for _, org := range orgs {
if err = m.migrateOrg(ctx, sess, org); err != nil {
if err := m.migrateOrg(ctx, sess, org); err != nil {
return err
}
}
// Auto-enable mode 5 for resources after successful migration
// TODO: remove this before Grafana 13 GA: https://github.com/grafana/search-and-storage-team/issues/613
if m.autoMigrate {
for _, gr := range m.resources {
m.log.Info("Auto-enabling mode 5 for resource", "resource", gr.Resource+"."+gr.Group)
m.cfg.EnableMode5(gr.Resource + "." + gr.Group)
}
}
m.log.Info("Migration completed successfully for all organizations", "org_count", len(orgs))
return nil
}
+36 -222
View File
@@ -1,13 +1,11 @@
package migrations
import (
"context"
"fmt"
v1beta1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1"
folders "github.com/grafana/grafana/apps/folder/pkg/apis/folder/v1beta1"
playlists "github.com/grafana/grafana/apps/playlist/pkg/apis/playlist/v0alpha1"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/registry/apis/dashboard/legacy"
sqlstoremigrator "github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"github.com/grafana/grafana/pkg/setting"
@@ -16,70 +14,69 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
type resourceDefinition struct {
groupResource schema.GroupResource
migratorFunc string // Name of the method: "MigrateFolders", "MigrateDashboards", etc.
type ResourceDefinition struct {
GroupResource schema.GroupResource
MigratorFunc string // Name of the method: "MigrateFolders", "MigrateDashboards", etc.
}
type migrationDefinition struct {
name string
migrationID string // The ID stored in the migration log table (e.g., "playlists migration")
resources []string
registerFunc func(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient, opts ...ResourceMigrationOption)
registerFunc func(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient)
}
var resourceRegistry = []resourceDefinition{
var resourceRegistry = []ResourceDefinition{
{
groupResource: schema.GroupResource{Group: folders.GROUP, Resource: folders.RESOURCE},
migratorFunc: "MigrateFolders",
GroupResource: schema.GroupResource{Group: folders.GROUP, Resource: folders.RESOURCE},
MigratorFunc: "MigrateFolders",
},
{
groupResource: schema.GroupResource{Group: v1beta1.GROUP, Resource: v1beta1.LIBRARY_PANEL_RESOURCE},
migratorFunc: "MigrateLibraryPanels",
GroupResource: schema.GroupResource{Group: v1beta1.GROUP, Resource: v1beta1.LIBRARY_PANEL_RESOURCE},
MigratorFunc: "MigrateLibraryPanels",
},
{
groupResource: schema.GroupResource{Group: v1beta1.GROUP, Resource: v1beta1.DASHBOARD_RESOURCE},
migratorFunc: "MigrateDashboards",
GroupResource: schema.GroupResource{Group: v1beta1.GROUP, Resource: v1beta1.DASHBOARD_RESOURCE},
MigratorFunc: "MigrateDashboards",
},
{
groupResource: schema.GroupResource{Group: playlists.APIGroup, Resource: "playlists"},
migratorFunc: "MigratePlaylists",
GroupResource: schema.GroupResource{Group: playlists.APIGroup, Resource: "playlists"},
MigratorFunc: "MigratePlaylists",
},
}
var migrationRegistry = []migrationDefinition{
{
name: "playlists",
migrationID: "playlists migration",
resources: []string{setting.PlaylistResource},
registerFunc: registerPlaylistMigration,
},
{
name: "folders and dashboards",
migrationID: "folders and dashboards migration",
resources: []string{setting.FolderResource, setting.DashboardResource},
registerFunc: registerDashboardAndFolderMigration,
},
}
func registerMigrations(ctx context.Context,
cfg *setting.Cfg,
mg *sqlstoremigrator.Migrator,
migrator UnifiedMigrator,
client resource.ResourceClient,
sqlStore db.DB,
) error {
func registerMigrations(cfg *setting.Cfg, mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient) error {
for _, migration := range migrationRegistry {
if shouldAutoMigrate(ctx, migration, cfg, sqlStore) {
migration.registerFunc(mg, migrator, client, WithAutoMigrate(cfg))
continue
var (
hasValue bool
allEnabled bool
)
for _, res := range migration.resources {
enabled := cfg.UnifiedStorage[res].EnableMigration
if !hasValue {
allEnabled = enabled
hasValue = true
continue
}
if enabled != allEnabled {
return fmt.Errorf("cannot migrate resources separately: %v migration must be either all enabled or all disabled", migration.resources)
}
}
enabled, err := isMigrationEnabled(migration, cfg)
if err != nil {
return err
}
if !enabled {
if !allEnabled {
logger.Info("Migration is disabled in config, skipping", "migration", migration.name)
continue
}
@@ -88,193 +85,10 @@ func registerMigrations(ctx context.Context,
return nil
}
func registerDashboardAndFolderMigration(mg *sqlstoremigrator.Migrator,
migrator UnifiedMigrator,
client resource.ResourceClient,
opts ...ResourceMigrationOption,
) {
foldersDef := getResourceDefinition("folder.grafana.app", "folders")
dashboardsDef := getResourceDefinition("dashboard.grafana.app", "dashboards")
driverName := mg.Dialect.DriverName()
folderCountValidator := NewCountValidator(
client,
foldersDef.groupResource,
"dashboard",
"org_id = ? and is_folder = true",
driverName,
)
dashboardCountValidator := NewCountValidator(
client,
dashboardsDef.groupResource,
"dashboard",
"org_id = ? and is_folder = false",
driverName,
)
folderTreeValidator := NewFolderTreeValidator(client, foldersDef.groupResource, driverName)
dashboardsAndFolders := NewResourceMigration(
migrator,
[]schema.GroupResource{foldersDef.groupResource, dashboardsDef.groupResource},
"folders-dashboards",
[]Validator{folderCountValidator, dashboardCountValidator, folderTreeValidator},
opts...,
)
mg.AddMigration("folders and dashboards migration", dashboardsAndFolders)
}
func registerPlaylistMigration(mg *sqlstoremigrator.Migrator,
migrator UnifiedMigrator,
client resource.ResourceClient,
opts ...ResourceMigrationOption,
) {
playlistsDef := getResourceDefinition("playlist.grafana.app", "playlists")
driverName := mg.Dialect.DriverName()
playlistCountValidator := NewCountValidator(
client,
playlistsDef.groupResource,
"playlist",
"org_id = ?",
driverName,
)
playlistsMigration := NewResourceMigration(
migrator,
[]schema.GroupResource{playlistsDef.groupResource},
"playlists",
[]Validator{playlistCountValidator},
opts...,
)
mg.AddMigration("playlists migration", playlistsMigration)
}
// TODO: remove this before Grafana 13 GA: https://github.com/grafana/search-and-storage-team/issues/613
func shouldAutoMigrate(ctx context.Context, migration migrationDefinition, cfg *setting.Cfg, sqlStore db.DB) bool {
autoMigrate := false
for _, res := range migration.resources {
config := cfg.UnifiedStorageConfig(res)
if config.DualWriterMode == 5 {
return false
}
if !setting.AutoMigratedUnifiedResources[res] {
continue
}
if checkIfAlreadyMigrated(ctx, migration, sqlStore) {
for _, res := range migration.resources {
cfg.EnableMode5(res)
}
logger.Info("Auto-migration already completed, enabling mode 5 for resources", "migration", migration.name)
return true
}
autoMigrate = true
threshold := int64(setting.DefaultAutoMigrationThreshold)
if config.AutoMigrationThreshold > 0 {
threshold = int64(config.AutoMigrationThreshold)
}
count, err := countResource(ctx, sqlStore, res)
if err != nil {
logger.Warn("Failed to count resource for auto migration check", "resource", res, "error", err)
return false
}
logger.Info("Resource count for auto migration check", "resource", res, "count", count, "threshold", threshold)
if count > threshold {
return false
}
}
if !autoMigrate {
return false
}
logger.Info("Auto-migration enabled for migration", "migration", migration.name)
return true
}
func checkIfAlreadyMigrated(ctx context.Context, migration migrationDefinition, sqlStore db.DB) bool {
if migration.migrationID == "" {
return false
}
exists, err := migrationExists(ctx, sqlStore, migration.migrationID)
if err != nil {
logger.Warn("Failed to check if migration exists", "migration", migration.name, "error", err)
return false
}
return exists
}
func isMigrationEnabled(migration migrationDefinition, cfg *setting.Cfg) (bool, error) {
var (
hasValue bool
allEnabled bool
)
for _, res := range migration.resources {
enabled := cfg.UnifiedStorage[res].EnableMigration
if !hasValue {
allEnabled = enabled
hasValue = true
continue
}
if enabled != allEnabled {
return false, fmt.Errorf("cannot migrate resources separately: %v migration must be either all enabled or all disabled", migration.resources)
}
}
return allEnabled, nil
}
// TODO: remove this before Grafana 13 GA: https://github.com/grafana/search-and-storage-team/issues/613
func countResource(ctx context.Context, sqlStore db.DB, resourceName string) (int64, error) {
var count int64
err := sqlStore.WithDbSession(ctx, func(sess *db.Session) error {
switch resourceName {
case setting.DashboardResource:
var err error
count, err = sess.Table("dashboard").Where("is_folder = ?", false).Count()
return err
case setting.FolderResource:
var err error
count, err = sess.Table("dashboard").Where("is_folder = ?", true).Count()
return err
default:
return fmt.Errorf("unknown resource: %s", resourceName)
}
})
return count, err
}
const migrationLogTableName = "unifiedstorage_migration_log"
func migrationExists(ctx context.Context, sqlStore db.DB, migrationID string) (bool, error) {
var count int64
err := sqlStore.WithDbSession(ctx, func(sess *db.Session) error {
var err error
count, err = sess.Table(migrationLogTableName).Where("migration_id = ?", migrationID).Count()
return err
})
if err != nil {
return false, fmt.Errorf("failed to check migration existence: %w", err)
}
return count > 0, nil
}
func getResourceDefinition(group, resource string) *resourceDefinition {
func getResourceDefinition(group, resource string) *ResourceDefinition {
for i := range resourceRegistry {
r := &resourceRegistry[i]
if r.groupResource.Group == group && r.groupResource.Resource == resource {
if r.GroupResource.Group == group && r.GroupResource.Resource == resource {
return r
}
}
@@ -288,8 +102,8 @@ func buildResourceKey(group, resource, namespace string) *resourcepb.ResourceKey
}
return &resourcepb.ResourceKey{
Namespace: namespace,
Group: def.groupResource.Group,
Resource: def.groupResource.Resource,
Group: def.GroupResource.Group,
Resource: def.GroupResource.Resource,
}
}
@@ -299,7 +113,7 @@ func getMigratorFunc(accessor legacy.MigrationDashboardAccessor, group, resource
return nil
}
switch def.migratorFunc {
switch def.MigratorFunc {
case "MigrateFolders":
return accessor.MigrateFolders
case "MigrateLibraryPanels":
@@ -316,7 +130,7 @@ func getMigratorFunc(accessor legacy.MigrationDashboardAccessor, group, resource
func validateRegisteredResources() error {
registeredMap := make(map[string]bool)
for _, gr := range resourceRegistry {
key := fmt.Sprintf("%s.%s", gr.groupResource.Resource, gr.groupResource.Group)
key := fmt.Sprintf("%s.%s", gr.GroupResource.Resource, gr.GroupResource.Group)
registeredMap[key] = true
}
@@ -1,15 +1,12 @@
package migrations
import (
"context"
"strings"
"testing"
sqlstoremigrator "github.com/grafana/grafana/pkg/services/sqlstore/migrator"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// TestRegisterMigrations exercises registerMigrations with various EnableMigration configs using a table-driven test.
@@ -17,28 +14,20 @@ func TestRegisterMigrations(t *testing.T) {
origRegistry := migrationRegistry
t.Cleanup(func() { migrationRegistry = origRegistry })
// Use fake resource names that are NOT in setting.AutoMigratedUnifiedResources
// to avoid triggering the auto-migrate code path which requires a non-nil sqlStore.
const (
fakePlaylistResource = "fake.playlists.resource"
fakeFolderResource = "fake.folders.resource"
fakeDashboardResource = "fake.dashboards.resource"
)
// helper to build a fake registry with custom register funcs that bump counters
makeFakeRegistry := func(migrationCalls map[string]int) []migrationDefinition {
return []migrationDefinition{
{
name: "playlists",
resources: []string{fakePlaylistResource},
registerFunc: func(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient, opts ...ResourceMigrationOption) {
resources: []string{setting.PlaylistResource},
registerFunc: func(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient) {
migrationCalls["playlists"]++
},
},
{
name: "folders and dashboards",
resources: []string{fakeFolderResource, fakeDashboardResource},
registerFunc: func(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient, opts ...ResourceMigrationOption) {
resources: []string{setting.FolderResource, setting.DashboardResource},
registerFunc: func(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient) {
migrationCalls["folders and dashboards"]++
},
},
@@ -49,9 +38,7 @@ func TestRegisterMigrations(t *testing.T) {
makeCfg := func(vals map[string]bool) *setting.Cfg {
cfg := &setting.Cfg{UnifiedStorage: make(map[string]setting.UnifiedStorageConfig)}
for k, v := range vals {
cfg.UnifiedStorage[k] = setting.UnifiedStorageConfig{
EnableMigration: v,
}
cfg.UnifiedStorage[k] = setting.UnifiedStorageConfig{EnableMigration: v}
}
return cfg
}
@@ -84,13 +71,13 @@ func TestRegisterMigrations(t *testing.T) {
migrationRegistry = makeFakeRegistry(migrationCalls)
cfg := makeCfg(map[string]bool{
fakePlaylistResource: tt.enablePlaylist,
fakeFolderResource: tt.enableFolder,
fakeDashboardResource: tt.enableDashboard,
setting.PlaylistResource: tt.enablePlaylist,
setting.FolderResource: tt.enableFolder,
setting.DashboardResource: tt.enableDashboard,
})
// We pass nils for migrator dependencies because our fake registerFuncs don't use them
err := registerMigrations(context.Background(), cfg, nil, nil, nil, nil)
err := registerMigrations(cfg, nil, nil, nil)
if tt.wantErr {
require.Error(t, err, "expected error for mismatched enablement")
@@ -103,176 +90,3 @@ func TestRegisterMigrations(t *testing.T) {
})
}
}
// TestResourceMigration_AutoMigrateEnablesMode5 verifies the autoMigrate behavior:
// - When autoMigrate=true AND cfg is set AND storage type is "unified", mode 5 should be enabled
// - In all other cases, mode 5 should NOT be enabled
func TestResourceMigration_AutoMigrateEnablesMode5(t *testing.T) {
// Helper to create a cfg with unified storage type
makeUnifiedCfg := func() *setting.Cfg {
cfg := setting.NewCfg()
cfg.Raw.Section("grafana-apiserver").Key("storage_type").SetValue("unified")
cfg.UnifiedStorage = make(map[string]setting.UnifiedStorageConfig)
return cfg
}
// Helper to create a cfg with legacy storage type
makeLegacyCfg := func() *setting.Cfg {
cfg := setting.NewCfg()
cfg.Raw.Section("grafana-apiserver").Key("storage_type").SetValue("legacy")
cfg.UnifiedStorage = make(map[string]setting.UnifiedStorageConfig)
return cfg
}
tests := []struct {
name string
autoMigrate bool
cfg *setting.Cfg
resources []string
wantMode5Enabled bool
description string
}{
{
name: "autoMigrate enabled with unified storage",
autoMigrate: true,
cfg: makeUnifiedCfg(),
resources: []string{setting.DashboardResource},
wantMode5Enabled: true,
description: "Should enable mode 5 when autoMigrate=true and storage type is unified",
},
{
name: "autoMigrate disabled with unified storage",
autoMigrate: false,
cfg: makeUnifiedCfg(),
resources: []string{setting.DashboardResource},
wantMode5Enabled: false,
description: "Should NOT enable mode 5 when autoMigrate=false",
},
{
name: "autoMigrate enabled with legacy storage",
autoMigrate: true,
cfg: makeLegacyCfg(),
resources: []string{setting.DashboardResource},
wantMode5Enabled: false,
description: "Should NOT enable mode 5 when storage type is legacy",
},
{
name: "autoMigrate enabled with nil cfg",
autoMigrate: true,
cfg: nil,
resources: []string{setting.DashboardResource},
wantMode5Enabled: false,
description: "Should NOT enable mode 5 when cfg is nil",
},
{
name: "autoMigrate enabled with multiple resources",
autoMigrate: true,
cfg: makeUnifiedCfg(),
resources: []string{setting.FolderResource, setting.DashboardResource},
wantMode5Enabled: true,
description: "Should enable mode 5 for all resources when autoMigrate=true",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Build schema.GroupResource from resource strings
resources := make([]schema.GroupResource, 0, len(tt.resources))
for _, r := range tt.resources {
parts := strings.SplitN(r, ".", 2)
resources = append(resources, schema.GroupResource{
Resource: parts[0],
Group: parts[1],
})
}
// Create the migration with options
var opts []ResourceMigrationOption
if tt.autoMigrate {
opts = append(opts, WithAutoMigrate(tt.cfg))
}
m := NewResourceMigration(nil, resources, "test-auto-migrate", nil, opts...)
// Simulate what happens at the end of a successful migration
// This is the logic from Exec() that we're testing
if m.autoMigrate && m.cfg != nil && m.cfg.UnifiedStorageType() == "unified" {
for _, gr := range m.resources {
m.cfg.EnableMode5(gr.Resource + "." + gr.Group)
}
}
// Verify mode 5 was enabled (or not) for each resource
for _, resourceName := range tt.resources {
if tt.cfg == nil {
// If cfg is nil, we can't check - just verify we didn't panic
continue
}
config := tt.cfg.UnifiedStorageConfig(resourceName)
if tt.wantMode5Enabled {
require.Equal(t, 5, int(config.DualWriterMode), "%s: %s", tt.description, resourceName)
require.True(t, config.EnableMigration, "%s: EnableMigration should be true for %s", tt.description, resourceName)
require.True(t, config.DualWriterMigrationDataSyncDisabled, "%s: DualWriterMigrationDataSyncDisabled should be true for %s", tt.description, resourceName)
} else {
require.Equal(t, 0, int(config.DualWriterMode), "%s: mode should be 0 for %s", tt.description, resourceName)
}
}
})
}
}
// TestResourceMigration_SkipMigrationLog verifies the SkipMigrationLog behavior:
// - When ignoreErrors=true AND errors occurred (hadErrors=true), skip writing to migration log
// This allows the migration to be re-run on the next startup
// - In all other cases, write to migration log normally
//
// This is important for the folders/dashboards migration which uses WithIgnoreErrors() to handle
// partial failures gracefully while still allowing retry on next startup.
func TestResourceMigration_SkipMigrationLog(t *testing.T) {
tests := []struct {
name string
autoMigrate bool
hadErrors bool
want bool
description string
}{
{
name: "normal migration success",
autoMigrate: false,
hadErrors: false,
want: false,
description: "Normal successful migration should write to log",
},
{
name: "ignoreErrors migration success",
autoMigrate: true,
hadErrors: false,
want: false,
description: "Migration with ignoreErrors that succeeds should still write to log",
},
{
name: "normal migration with errors",
autoMigrate: false,
hadErrors: true,
want: false,
description: "Migration that fails without ignoreErrors should write error to log",
},
{
name: "ignoreErrors migration with errors - skip log",
autoMigrate: true,
hadErrors: true,
want: true,
description: "Migration with ignoreErrors that has errors should SKIP log to allow retry",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
m := &ResourceMigration{
autoMigrate: tt.autoMigrate,
hadErrors: tt.hadErrors,
}
require.Equal(t, tt.want, m.SkipMigrationLog(), tt.description)
})
}
}
+57 -5
View File
@@ -14,6 +14,7 @@ import (
"github.com/grafana/grafana/pkg/storage/unified/resource"
"github.com/prometheus/client_golang/prometheus"
"go.opentelemetry.io/otel"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var tracer = otel.Tracer("github.com/grafana/grafana/pkg/storage/unified/migrations")
@@ -53,7 +54,6 @@ func (p *UnifiedStorageMigrationServiceImpl) Run(ctx context.Context) error {
logger.Info("Data migrations are disabled, skipping")
return nil
}
logger.Info("Running migrations for unified storage")
metrics.MUnifiedStorageMigrationStatus.Set(3)
return RegisterMigrations(ctx, p.migrator, p.cfg, p.sqlStore, p.client)
@@ -79,7 +79,7 @@ func RegisterMigrations(
return err
}
if err := registerMigrations(ctx, cfg, mg, migrator, client, sqlStore); err != nil {
if err := registerMigrations(cfg, mg, migrator, client); err != nil {
return err
}
@@ -92,13 +92,65 @@ func RegisterMigrations(
db.SetMaxOpenConns(3)
defer db.SetMaxOpenConns(maxOpenConns)
}
err := mg.RunMigrations(ctx,
if err := mg.RunMigrations(ctx,
sec.Key("migration_locking").MustBool(true),
sec.Key("locking_attempt_timeout_sec").MustInt())
if err != nil {
sec.Key("locking_attempt_timeout_sec").MustInt()); err != nil {
return fmt.Errorf("unified storage data migration failed: %w", err)
}
logger.Info("Unified storage migrations completed successfully")
return nil
}
func registerDashboardAndFolderMigration(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient) {
foldersDef := getResourceDefinition("folder.grafana.app", "folders")
dashboardsDef := getResourceDefinition("dashboard.grafana.app", "dashboards")
driverName := mg.Dialect.DriverName()
folderCountValidator := NewCountValidator(
client,
foldersDef.GroupResource,
"dashboard",
"org_id = ? and is_folder = true",
driverName,
)
dashboardCountValidator := NewCountValidator(
client,
dashboardsDef.GroupResource,
"dashboard",
"org_id = ? and is_folder = false",
driverName,
)
folderTreeValidator := NewFolderTreeValidator(client, foldersDef.GroupResource, driverName)
dashboardsAndFolders := NewResourceMigration(
migrator,
[]schema.GroupResource{foldersDef.GroupResource, dashboardsDef.GroupResource},
"folders-dashboards",
[]Validator{folderCountValidator, dashboardCountValidator, folderTreeValidator},
)
mg.AddMigration("folders and dashboards migration", dashboardsAndFolders)
}
func registerPlaylistMigration(mg *sqlstoremigrator.Migrator, migrator UnifiedMigrator, client resource.ResourceClient) {
playlistsDef := getResourceDefinition("playlist.grafana.app", "playlists")
driverName := mg.Dialect.DriverName()
playlistCountValidator := NewCountValidator(
client,
playlistsDef.GroupResource,
"playlist",
"org_id = ?",
driverName,
)
playlistsMigration := NewResourceMigration(
migrator,
[]schema.GroupResource{playlistsDef.GroupResource},
"playlists",
[]Validator{playlistCountValidator},
)
mg.AddMigration("playlists migration", playlistsMigration)
}
@@ -1,211 +0,0 @@
package threshold
import (
"context"
"fmt"
"net/http"
"os"
"testing"
authlib "github.com/grafana/authlib/types"
"github.com/grafana/grafana/pkg/infra/db"
"github.com/grafana/grafana/pkg/services/folder"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/tests/apis"
"github.com/grafana/grafana/pkg/tests/testinfra"
"github.com/grafana/grafana/pkg/tests/testsuite"
"github.com/grafana/grafana/pkg/util/testutil"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// TODO: remove this test before Grafana 13 GA
func TestMain(m *testing.M) {
testsuite.Run(m)
}
// TestIntegrationAutoMigrateThresholdExceeded verifies that auto-migration is skipped when
// resource count exceeds the configured threshold.
// TODO: remove this test before Grafana 13 GA
func TestIntegrationAutoMigrateThresholdExceeded(t *testing.T) {
testutil.SkipIntegrationTestInShortMode(t)
if db.IsTestDbSQLite() {
// Share the same SQLite DB file between steps
tmpDir := t.TempDir()
dbPath := tmpDir + "/shared-threshold-test.db"
oldVal := os.Getenv("SQLITE_TEST_DB")
require.NoError(t, os.Setenv("SQLITE_TEST_DB", dbPath))
t.Cleanup(func() {
if oldVal == "" {
_ = os.Unsetenv("SQLITE_TEST_DB")
} else {
_ = os.Setenv("SQLITE_TEST_DB", oldVal)
}
})
t.Logf("Using shared database path: %s", dbPath)
}
var org1 *apis.OrgUsers
var orgB *apis.OrgUsers
dashboardGVR := schema.GroupVersionResource{
Group: "dashboard.grafana.app",
Version: "v1beta1",
Resource: "dashboards",
}
folderGVR := schema.GroupVersionResource{
Group: "folder.grafana.app",
Version: "v1beta1",
Resource: "folders",
}
dashboardKey := fmt.Sprintf("%s.%s", dashboardGVR.Resource, dashboardGVR.Group)
folderKey := fmt.Sprintf("%s.%s", folderGVR.Resource, folderGVR.Group)
playlistKey := "playlists.playlist.grafana.app"
// Step 1: Create resources exceeding the threshold (3 resources, threshold=1)
t.Run("Step 1: Create resources exceeding threshold", func(t *testing.T) {
unifiedConfig := map[string]setting.UnifiedStorageConfig{}
helper := apis.NewK8sTestHelper(t, testinfra.GrafanaOpts{
AppModeProduction: true,
DisableAnonymous: true,
DisableDataMigrations: true,
DisableDBCleanup: true,
APIServerStorageType: "unified",
UnifiedStorageConfig: unifiedConfig,
})
org1 = &helper.Org1
orgB = &helper.OrgB
// Create 3 dashboards
for i := 1; i <= 3; i++ {
createTestDashboard(t, helper, fmt.Sprintf("Threshold Dashboard %d", i))
}
// Create 3 folders
for i := 1; i <= 3; i++ {
createTestFolder(t, helper, fmt.Sprintf("folder-%d", i), fmt.Sprintf("Threshold Folder %d", i), "")
}
// Explicitly shutdown helper before Step 1 ends to ensure database is properly closed
helper.Shutdown()
})
// Set SKIP_DB_TRUNCATE to prevent truncation in subsequent steps
oldSkipTruncate := os.Getenv("SKIP_DB_TRUNCATE")
require.NoError(t, os.Setenv("SKIP_DB_TRUNCATE", "true"))
t.Cleanup(func() {
if oldSkipTruncate == "" {
_ = os.Unsetenv("SKIP_DB_TRUNCATE")
} else {
_ = os.Setenv("SKIP_DB_TRUNCATE", oldSkipTruncate)
}
})
// Step 2: Verify auto-migration is skipped due to threshold
t.Run("Step 2: Verify auto-migration skipped (threshold exceeded)", func(t *testing.T) {
// Set threshold=1, but we have 3 resources of each type, so migration should be skipped
// Disable playlists migration since we're only testing dashboard/folder threshold behavior
unifiedConfig := map[string]setting.UnifiedStorageConfig{
dashboardKey: {AutoMigrationThreshold: 1, EnableMigration: false},
folderKey: {AutoMigrationThreshold: 1, EnableMigration: false},
playlistKey: {EnableMigration: false},
}
helper := apis.NewK8sTestHelperWithOpts(t, apis.K8sTestHelperOpts{
GrafanaOpts: testinfra.GrafanaOpts{
AppModeProduction: true,
DisableAnonymous: true,
DisableDataMigrations: false, // Allow migration system to run
APIServerStorageType: "unified",
UnifiedStorageConfig: unifiedConfig,
},
Org1Users: org1,
OrgBUsers: orgB,
})
t.Cleanup(helper.Shutdown)
namespace := authlib.OrgNamespaceFormatter(helper.Org1.OrgID)
dashCli := helper.GetResourceClient(apis.ResourceClientArgs{
User: helper.Org1.Admin,
Namespace: namespace,
GVR: dashboardGVR,
})
verifyResourceCount(t, dashCli, 3)
folderCli := helper.GetResourceClient(apis.ResourceClientArgs{
User: helper.Org1.Admin,
Namespace: namespace,
GVR: folderGVR,
})
verifyResourceCount(t, folderCli, 3)
// Verify migration did NOT run by checking the migration log
count, err := helper.GetEnv().SQLStore.GetEngine().Table("unifiedstorage_migration_log").
Where("migration_id = ?", "folders and dashboards migration").
Count()
require.NoError(t, err)
require.Equal(t, int64(0), count, "Migration should not have run")
})
}
func createTestDashboard(t *testing.T, helper *apis.K8sTestHelper, title string) string {
t.Helper()
payload := fmt.Sprintf(`{"dashboard": {"title": "%s", "panels": []}, "overwrite": false}`, title)
result := apis.DoRequest(helper, apis.RequestParams{
User: helper.Org1.Admin,
Method: "POST",
Path: "/api/dashboards/db",
Body: []byte(payload),
}, &map[string]interface{}{})
require.NotNil(t, result.Response)
require.Equal(t, 200, result.Response.StatusCode)
uid := (*result.Result)["uid"].(string)
require.NotEmpty(t, uid)
return uid
}
func createTestFolder(t *testing.T, helper *apis.K8sTestHelper, uid, title, parentUID string) *folder.Folder {
t.Helper()
payload := fmt.Sprintf(`{
"title": "%s",
"uid": "%s"`, title, uid)
if parentUID != "" {
payload += fmt.Sprintf(`,
"parentUid": "%s"`, parentUID)
}
payload += "}"
folderCreate := apis.DoRequest(helper, apis.RequestParams{
User: helper.Org1.Admin,
Method: http.MethodPost,
Path: "/api/folders",
Body: []byte(payload),
}, &folder.Folder{})
require.NotNil(t, folderCreate.Result)
return folderCreate.Result
}
// verifyResourceCount verifies that the expected number of resources exist in K8s storage
func verifyResourceCount(t *testing.T, client *apis.K8sResourceClient, expectedCount int) {
t.Helper()
l, err := client.Resource.List(context.Background(), metav1.ListOptions{})
require.NoError(t, err)
resources, err := meta.ExtractList(l)
require.NoError(t, err)
require.Equal(t, expectedCount, len(resources))
}
@@ -12,7 +12,7 @@ INSERT INTO {{ .Ident .TableName }}
VALUES (
{{ .Arg .GUID }},
{{ .Arg .KeyPath }},
{{ .Arg .Value }},
COALESCE({{ .Arg .Value }}, ""),
{{ .Arg .Group }},
{{ .Arg .Resource }},
{{ .Arg .Namespace }},
@@ -10,7 +10,7 @@ INSERT INTO {{ .Ident "resource_history" }}
{{ .Ident "folder" }}
)
VALUES (
{{ .Arg .Value }},
COALESCE({{ .Arg .Value }}, ""),
{{ .Arg .GUID }},
{{ .Arg .Group }},
{{ .Arg .Resource }},
@@ -5,7 +5,7 @@ INSERT INTO {{ .Ident .TableName }}
)
VALUES (
{{ .Arg .KeyPath }},
{{ .Arg .Value }}
COALESCE({{ .Arg .Value }}, "")
)
{{- if eq .DialectName "mysql" }}
ON DUPLICATE KEY UPDATE {{ .Ident "value" }} = {{ .Arg .Value }}
+1 -7
View File
@@ -864,15 +864,11 @@ func (d *dataStore) applyBackwardsCompatibleChanges(ctx context.Context, tx db.T
return nil
}
generation := event.Object.GetGeneration()
if key.Action == DataActionDeleted {
generation = 0
}
_, err := dbutil.Exec(ctx, tx, sqlKVUpdateLegacyResourceHistory, sqlKVLegacyUpdateHistoryRequest{
SQLTemplate: sqltemplate.New(kv.dialect),
GUID: key.GUID,
PreviousRV: event.PreviousRV,
Generation: generation,
Generation: event.Object.GetGeneration(),
})
if err != nil {
@@ -914,7 +910,6 @@ func (d *dataStore) applyBackwardsCompatibleChanges(ctx context.Context, tx db.T
Resource: key.Resource,
Namespace: key.Namespace,
Name: key.Name,
Action: action,
Folder: key.Folder,
PreviousRV: event.PreviousRV,
})
@@ -925,7 +920,6 @@ func (d *dataStore) applyBackwardsCompatibleChanges(ctx context.Context, tx db.T
case DataActionDeleted:
_, err := dbutil.Exec(ctx, tx, sqlKVDeleteLegacyResource, sqlKVLegacySaveRequest{
SQLTemplate: sqltemplate.New(kv.dialect),
Group: key.Group,
Resource: key.Resource,
Namespace: key.Namespace,
Name: key.Name,

Some files were not shown because too many files have changed in this diff Show More